• 整理Py小demo


     1 from email.mime.text import MIMEText
     2 # 第一个参数就是邮件正文,第二个参数是MIME的subtype,
     3 # 传入'plain'表示纯文本,最终的MIME就是'text/plain',最后一定要用utf-8编码保证多语言兼容性。
     4 msg=MIMEText('Hello Send By Python123','plain','utf-8')
     5 
     6 import smtplib
     7 server =smtplib.SMTP_SSL('smtp.qq.com',465)
     8 server.set_debuglevel(1)
     9 # essplrlkpwcvbfci
    10 # server.ehlo()
    11 # server.starttls()
    12 server.login('912549963@qq.com','xxxx是邮箱客户端的一列序列号')
    13 server.sendmail('912549963@qq.com','18437963713@163.com',msg.as_string())
    14 server.quit()
    15 
    16 
    17 
    18 # from email.mime.text import MIMEText
    19 # msg = MIMEText('hello, send by Python5656', 'plain', 'utf-8')
    20 # import smtplib
    21 # server = smtplib.SMTP_SSL('smtp.qq.com', 465) # SMTP协议默认端口是25
    22 # server.set_debuglevel(1)
    23 # server.ehlo()
    24 # server.starttls()
    25 # server.login('912549963@qq.com','nrabssgxathrbfhh')
    26 # server.sendmail('912549963@qq.com','18437963713@163.com', msg.as_string())
    27 # server.quit()
    邮箱
     1 # import urllib.request
     2 # import re
     3 # import os
     4 # import urllib
     5 # #根据给定的网址来获取网页详细信息,得到的html就是网页的源代码
     6 # def getHtml(url):
     7 #     page = urllib.request.urlopen(url)
     8 #     html = page.read()
     9 #     return html.decode('UTF-8')
    10 #
    11 # def getImg(html):
    12 #     reg = r'src="(.+?.jpg)" pic_ext'
    13 #     imgre = re.compile(reg)
    14 #     imglist = imgre.findall(html)#表示在整个网页中过滤出所有图片的地址,放在imglist中
    15 #     x = 0
    16 #     path = 'D:\test'
    17 #    # 将图片保存到D:\test文件夹中,如果没有test文件夹则创建
    18 #     if not os.path.isdir(path):
    19 #         os.makedirs(path)
    20 #     paths = path+'\'      #保存在test路径下
    21 #
    22 #     for imgurl in imglist:
    23 #         urllib.request.urlretrieve(imgurl,'{}{}.jpg'.format(paths,x))  #打开imglist中保存的图片网址,并下载图片保存在本地,format格式化字符串
    24 #         x = x + 1
    25 #     return imglist
    26 # html = getHtml("http://tieba.baidu.com/p/2460150866")#获取该网址网页详细信息,得到的html就是网页的源代码
    27 # print (getImg(html)) #从网页源代码中分析并下载保存图片
    28 #endregion
    爬取图片 并存在本地文件夹中
     1 from flask import Flask # 导入包
     2 
     3 app = Flask(__name__) # 创建一个Web应用
     4 
     5 @app.route('/') # 定义路由(Views),可以理解为定义页面的URL
     6 def index():
     7     return "这是用Python + Flask 搞出来的。" # 渲染页面
     8 
     9 if __name__ == "__main__":
    10     app.run(host='127.0.0.1',port=8080) # 运行,指定监听地址为 127.0.0.1:8080
    11 # http://127.0.0.1:8080/  运行这个会返回--》这是用Python + Flask 搞出来的。
    创建web应用
    if __name__ == '__main__':#这个其实就是在当前模块的时候,_name_的值是__main__,在其他模块引用此模块的话,name的值就是“当前”模块的值。
    

      

    1 import urllib.request
    2 url='http://www.sogou.com/'
    3 local_path='E:download.html'
    4 urllib.request.urlretrieve(url, local_path)
    把整个网页爬出来
    1 # a=lambda x:x+2#这里的拉姆达表达示被看成一个函数了
    2 # print(a(1))
    3 # print(a(2))
    4 #
    5 # add = lambda x, y : x+y
    6 # print(add(1,2) ) # 结果为3
    7 
    8 # y=lambda  a,b,c:(a+b)/c
    9 # print(str(y(1,5,9))[0:5])
    Lambda
     1 # from bs4 import BeautifulSoup
     2 # import requests
     3 # from datetime import datetime
     4 # import json
     5 # import re
     6 
     7 # news_url = 'http://news.sina.com.cn/c/nd/2017-05-08/doc-ifyeycfp9368908.shtml'
     8 # web_data = requests.get(news_url)
     9 # web_data.encoding = 'utf-8'
    10 # soup = BeautifulSoup(web_data.text,'lxml')
    11 # # print(soup.select('a'))
    12 #
    13 # links = soup.findAll('a')
    14 # href=soup.select('.side-tool to-top')
    15 # print(links)
    16 # print(href)
    17 # print(soup.a.string)
    18 # print(soup.a.attrs)
    19 # title = soup.select('#artibodyTitle')[0].text
    20 # a1 = soup.select('a')
    21 # a = soup.select('a')[0]
    22 # aa = soup.select('a')[0]['href']
    23 # print(title)
    24 # print('----------------------------------------------------')
    25 #
    26 # for hrefs in a1:
    27 #     print(hrefs.text)
    28 #     print(hrefs)
    29 #     a= a1.select('a')[2]['href']
    30 #     print(a)
    31     # print(hrefs.select('.href'))
    32 # print(a1)
    33 # print(a)
    34 # print(aa)
    爬取网页 BeautifulSoup
     1 from  bs4 import BeautifulSoup
     2 import requests
     3 from datetime import datetime
     4 import json
     5 import re
     6 
     7 url='http://news.sina.com.cn/c/nd/2017-05-08/doc-ifyeycfp9368908.shtml'
     8 web_Content=requests.get(url)
     9 web_Content.encoding='utf-8'
    10 soup=BeautifulSoup(web_Content.text,'lxml')
    11 title=soup.select('#artibodyTitle')[0].text
    12 
    13 print(title)
    14 
    15 time = soup.select('.time-source')[0].contents[0].strip()
    16 dt = datetime.strptime(time,'%Y年%m月%d日%H:%M')
    17 print(dt)
    时间格式化
     1 class Foo:
     2 
     3     def __init__(self, name, age):
     4         self.name = name
     5         self.age = age
     6 
     7     def detail(self):
     8         print(self.name)
     9         print(self.age)
    10 
    11 obj1 = Foo('chengd', 18)
    12 obj1.detail()  # Python默认会将obj1传给self参数,即:obj1.detail(obj1),所以,此时方法内部的 self = obj1,即:self.name 是 chengd ;self.age 是 18
    13 
    14 # obj2 = Foo('python', 99)
    15 # obj2.detail()  # Python默认会将obj2传给self参数,即:obj1.detail(obj2),所以,此时方法内部的 self = obj2,即:self.name 是 python ; self.age 是 99x
    函数
     1 # 在这个函数中Yield 是用来返回值得,就好比return
     2 # def addlist(alist):
     3 #     for i in alist:
     4 #         yield i+1
     5 #
     6 # alist=[1,2,3]
     7 # for x in addlist(alist):
     8 #     print(x)
     9 print('----------12121212--------------------------------------------')
    10 def h():
    11     print ('To be brave')
    12     m= yield 5656
    13     print('Fighting!')
    14 cc=h()
    15 mm=cc.__next__()
    16 # print(cc.__next__())
    17 print(mm)
    18 
    19 print('------------------78979887--------------------------')
    20 def h():
    21     print ('Wen Chuan')
    22     mm = yield 5555
    23     print ('Fighting!')
    24 
    25 c = h()
    26 m = c.__next__()
    27 print('mm value is ',m)
    28 
    29 print('-------------------------------------------------------------------')
    30 def h():
    31     print ('Wen Chuan')
    32     m = yield 5  # Fighting!
    33     print (m)
    34     d = yield 12
    35     print ('We are together!')
    36 c = h()
    37 c.__next__()  #相当于c.send(None)
    38 c.send('Fighting!')  #(yield 5)表达式被赋予了'Fighting!'
    39 
    40 print('-----------rrrrrrrrrrrrrrrrrrrrrrr----------------------------------------')
    41 def h():
    42     print ('Wen Chuan',)
    43     mm = yield 5  # Fighting!
    44     print (m)
    45     ddd = yield 12
    46     print ('We are together!')
    47 c = h()
    48 m = c.__next__()  #m 获取了yield 5 的参数值 5 ---- 可能是遇到的第一个yield的值
    49 d = c.send('Fighting!')  #d 获取了yield 12 的参数值12 ---- send里的参数会把第一个yield替换掉。然后c.send()获取第二个yield的值
    50 print ('We will never forget the date', m, '.', d)
    51 # send(msg) 和 next()是有返回值的,它们的返回值很特殊,返回的是下一个yield表达式的参数
    yield Next Send
  • 相关阅读:
    61. 最长不含重复字符的子字符串
    60. 礼物的最大价值 (未理解)
    59. 把数字翻译成字符串
    58. 把数组排成最小的数
    57. 数字序列中某一位的数字 (不懂)
    spring data jpa 官方文档
    idea 编译报错 源发行版 1.8 需要目标发行版 1.8
    idea maven 依赖报错 invalid classes root
    solr
    spring boot 官方文档
  • 原文地址:https://www.cnblogs.com/ZkbFighting/p/9638732.html
Copyright © 2020-2023  润新知