from datetime import datetime
import json
import requests
import urllib.request # 需要安装 urllib 库
from bs4 import BeautifulSoup # 需要安装 bs4 库
import os
from apscheduler.schedulers.blocking import BlockingScheduler
def get_iciba_everyday_chicken_soup():
url = 'http://open.iciba.com/dsapi/'
r = requests.get(url)
all_content = json.loads(r.text) # 获取到json格式的内容,内容很多,json.loads: 将str转成dict
English = all_content['content'] # 提取json中的英文鸡汤
Chinese = all_content['note'] # 提取json中的中文鸡汤
everyday_soup = English + '
' + Chinese # 合并需要的字符串内容
return everyday_soup # 返回结果
# print(get_iciba_everyday_chicken_soup())
def get_weather(city_pinyin):
# 声明请求头,模拟真人操作,防止被反爬虫发现
#header需要F12抓取下信息
header = {
"User-Agent": "*******"}
# 通过传入的城市名拼音参数来拼接出该城市的天气预报的网页地址
#city_pinyin='zhengzhou'
website = "https://www.tianqi.com/" + city_pinyin + "/"
req = urllib.request.Request(url=website, headers=header)
page = urllib.request.urlopen(req)
html = page.read()
soup = BeautifulSoup(html.decode("utf-8"), "html.parser")
# html.parser表示解析使用的解析器
nodes = soup.find_all('dd')
tody_weather = [node.text.strip() for node in nodes] # 定义一个列表,并遍历nodes
tody_weather[0] = tody_weather[0][:2] # 去除多余字符
# 去除字符串中的空行:
tianqi = " ".join([s for s in tody_weather if s.strip("
")])
print(tianqi)
return tianqi
# 返回结果
# 调用封装号好的函数获取天气预报,参数‘zhengzhou’是郑州的拼音:
# print(get_weather("zhengzhou"))
if __name__ == '__main__':
scheduler = BlockingScheduler(timezone="Asia/Shanghai")
scheduler.add_job(get_weather, 'interval', seconds=3,args=['zhengzhou'])
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C '))
try:
scheduler.start()
except(KeyboardInterrupt, SystemExit):
pass