from urllib import request,parse
url='http://www.xxx.cn'
req=request.urlopen(url) #打开一个url,发get请求
content=req.read().decode()
fw=open('baidu.html','w',encoding='utf-8')
fw.write(content)
# 网络爬虫,从其他的网站上,获取一些有用的内容
import json
url='http://api.xxx.xx/api/user/stu_info?stu_name=xxx'
req=request.urlopen(url)
content=req.read().decode()
res_dic=json.loads(content)
if res_dic.get('error_code')==0:
print('测试通过')
else:
print('测试失败',res_dic)
url='http://api.xxx.xx/api/user/lxxin'
data={
'username':'xxxxxxx',
'passwd':'aA123456'
}
data=parse.urlencode(data) #urlencode,自动给你拼好参数
req=request.urlopen(url,data.encode())
print(req.read().decode())