先建立es的mapping,也就是建立在es中建立一个空的Index,代码如下:执行后就会在es建lagou 这个index。
from datetime import datetime from elasticsearch_dsl import DocType, Date, Nested, Boolean, analyzer, InnerDoc, Completion, Keyword, Text, Integer from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["localhost"])
class LagouType(DocType): # url_object_id = Keyword() url = Keyword() title = Text(analyzer="ik_max_word") salary = Keyword() job_city = Keyword() work_years = Text(analyzer="ik_max_word") degree_need = Keyword() job_type = Text(analyzer="ik_max_word") publish_time = Date() tags = Text(analyzer="ik_max_word") job_advantage = Text(analyzer="ik_max_word") job_desc = Text(analyzer="ik_max_word") job_addr = Text(analyzer="ik_max_word") company_url = Keyword() company_name = Text(analyzer="ik_max_word") crawl_time = Date()
# min_salary = Integer() # max_salary = Integer()
class Meta: index = 'lagou' doc_type = "jobs"
if __name__ == "__main__": LagouType.init() |
接着在items 中定义到保存到es的代码,代码如下:
from lagou.models.es_type import LagouType
class LagouJobItem(scrapy.Item):
url_object_id = scrapy.Field() url = scrapy.Field() title= scrapy.Field() salary= scrapy.Field() job_city= scrapy.Field() work_years= scrapy.Field() degree_need= scrapy.Field() job_type= scrapy.Field() publish_time = scrapy.Field() tags= scrapy.Field() job_advantage= scrapy.Field() job_desc= scrapy.Field() job_addr= scrapy.Field() company_url = scrapy.Field() company_name= scrapy.Field() crawl_time= scrapy.Field() min_salary=scrapy.Field() max_salary= scrapy.Field()
def save_to_es(self): lagou_type=LagouType() lagou_type.url=self["url"] lagou_type.title=self["title"] lagou_type.salary=self["salary"] lagou_type.job_city=self["job_city"] lagou_type.work_years=self["work_years"] lagou_type.degree_need=self['degree_need'] lagou_type.job_type=self['job_type'] lagou_type.publish_time=self['publish_time'] lagou_type.tags=self['tags'] lagou_type.job_advantage=self['job_advantage'] lagou_type.job_desc=self['job_desc'] lagou_type.job_addr=self['job_addr'] lagou_type.company_url=self['company_url'] lagou_type.company_name=self['company_name'] lagou_type.crawl_time=self['crawl_time'] lagou_type.meta.id=self['url_object_id']
lagou_type.save()
return |
接下来就是在piplines文件中定义保存到es的pipline
class ElasticsearchPipline(object): def process_item(self, item, spider): item.save_to_es() return item |
之后就是到settings中进行设置。把这个pipline加入到item_pipline中
'lagou.pipelines.ElasticsearchPipline':300
这样就可以将爬取到的数据保存到es中
详细说明:
elasticsearch官方也提供了一个python操作elasticsearch(搜索引擎)的接口包,就像sqlalchemy操作数据库一样的ORM框,这样我们操作elasticsearch就不用写命令了,用elasticsearch-dsl-py这个模块来操作,也就是用python的方式操作一个类即可
elasticsearch-dsl-py下载
下载地址:https://github.com/elastic/elasticsearch-dsl-py
文档说明:http://elasticsearch-dsl.readthedocs.io/en/latest/
首先安装好elasticsearch-dsl-py模块
1、elasticsearch-dsl模块使用说明
create_connection(hosts=['127.0.0.1']):连接elasticsearch(搜索引擎)服务器方法,可以连接多台服务器
class Meta:设置索引名称和表名称
索引类名称.init(): 生成索引和表以及字段
实例化索引类.save():将数据写入elasticsearch(搜索引擎)
from elasticsearch_dsl.connections import connections # 导入连接elasticsearch(搜索引擎)服务器方法
connections.create_connection(hosts=['127.0.0.1']) #连接到本地
class lagouType(DocType): # 自定义一个类来继承DocType类
# Text类型需要分词,所以需要知道中文分词器,ik_max_wordwei为中文分词器
title = Text(analyzer="ik_max_word") # 设置,字段名称=字段类型,Text为字符串类型并且可以分词建立倒排索引
description = Text(analyzer="ik_max_word")
keywords = Text(analyzer="ik_max_word")
url = Keyword() # 设置,字段名称=字段类型,Keyword为普通字符串类型,不分词
riqi = Date() # 设置,字段名称=字段类型,Date日期类型
class Meta: # Meta是固定写法
index = "lagou" # 设置索引名称(相当于数据库名称)
doc_type = 'jobs' # 设置表名称
if __name__ == "__main__": # 判断在本代码文件执行才执行里面的方法,其他页面调用的则不执行里面的方法
lagouType.init() # 生成elasticsearch(搜索引擎)的索引,表,字段等信息