1、知识点
""" logging : scrapy: settings中设置LOG_LEVEL="WARNING" settings中设置LOG_FILE="./log.log" #设置日志保存的位置,设置后在终端不会显示日志内容 import logging 实例化一个logger的方式在任何文件中使用logger输出内容 logger = logging.getLogger(__name__) #实例化 普通项目中: import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='myapp.log', filemode='w') #设置日志输出格式 实例化一个ogger = logging.getLogger(__name__) 在任何py文件中调用logger即可 """
2、scrapy项目中使用logging
# -*- coding: utf-8 -*- import scrapy import logging logger = logging.getLogger(__name__) class JulyeduSpider(scrapy.Spider): name = 'julyedu' allowed_domains = ['julyedu.com'] start_urls = ['http://julyedu.com/'] #这个parse方法名不能改 def parse(self, response): """ 爬虫七月在线的导师名单 :param response: :return: """ list_li = response.xpath("//div[@class='swiper-wrapper']//li") #print(list_li) item = {} for li in list_li: item["name"] = li.xpath(".//h3/text()").extract_first() item["content"] = li.xpath(".//p[@class='teacherBrief']/text()").extract_first() #item["content"] = li.xpath(".//p[@class='teacherIntroduction']/text()").extract_first() #print(item) #将数据传递道pipelines,yield只接受Request,BaseItem,dict,None四种类型 logger.warning(item) #打印日志 yield item
2、普通项目中
a)建立一个通用的log_a.py
# coding = utf-8 import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='myapp.log', filemode='w') logger = logging.getLogger(__name__) if __name__ == '__main__': logger.info("this is a log ")
b)log_b.py文件使用通用的log_a.py
# coding = utf-8 from log_a import logger if __name__ == '__main__': logger.warning("b文件")