1.导包
2.索引的创建
2.1首先,我们需要定义一个词法分析器。
Analyzer analyzer = new IKAnalyzer();//官方推荐 Analyzer analyzer = new StandardAnalyzer();
2.2第二步,确定索引文件存储的位置,Lucene提供给我们两种方式:
2.2.1本地文件存储
Directory directory = FSDirectory.open(new File("D:\JavaWeb\Lucene"));
2.2.2 内存存储
Directory directory = new RAMDirectory();
2.3第三步,创建IndexWriter,进行索引文件的写入。
IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer); IndexWriter indexWriter = new IndexWriter(directory, config);
2.4第四步,内容提取,进行索引的存储。
Document doc = new Document();//申请了一个document对象,这个类似于数据库中的表中的一行。 String text = "This is the text to be indexed.";//即将索引的字符串 Field fileNameField = new TextField("fileName", text, Store.YES); doc.add(fileNameField);//把字符串存储起来 indexWriter.addDocument(doc);//把doc对象加入到索引创建中 indexWriter.close();//关闭IndexWriter,提交创建内容
lucene常见Field
IntField | 主要对int类型的字段进行存储,需要注意的是如果需要对InfField进行排序使用SortField.Type.INT来比较,如果进范围查询或过滤,需要采用NumericRangeQuery.newIntRange() |
---|---|
LongField | 主要处理Long类型的字段的存储,排序使用SortField.Type.Long,如果进行范围查询或过滤利用NumericRangeQuery.newLongRange(),LongField常用来进行时间戳的排序,保存System.currentTimeMillions() |
FloatField | 对Float类型的字段进行存储,排序采用SortField.Type.Float,范围查询采用NumericRangeQuery.newFloatRange() |
BinaryDocVluesField | 只存储不共享值,如果需要共享值可以用SortedDocValuesField |
NumericDocValuesField | 用于数值类型的Field的排序(预排序),需要在要排序的field后添加一个同名的NumericDocValuesField |
SortedDocValuesField | 用于String类型的Field的排序,需要在StringField后添加同名的SortedDocValuesField |
StringField | 用户String类型的字段的存储,StringField是只索引不分词 |
TextField | 对String类型的字段进行存储,TextField和StringField的不同是TextField既索引又分词 |
StoredField | 存储Field的值,可以用IndexSearcher.doc和IndexReader.document来获取此Field和存储的值 |
实战代码
@Test public void testIndex() throws Exception { ApplicationContext ac = new ClassPathXmlApplicationContext("applicationContext.xml"); BooksMapper booksMapper = ac.getBean(BooksMapper.class); /*Books book= booksMapper.selectByPrimaryKey(4939); System.out.println(book.getTitle());*/ List<Books> listBooks=booksMapper.selectBookList(); System.out.println(listBooks.size()); Analyzer analyzer = new IKAnalyzer(); Directory directory = FSDirectory.open(new File("D:\JavaWeb\Lucene")); IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer); IndexWriter indexWriter = new IndexWriter(directory, config); for (int i = 0; i < listBooks.size(); i++) { Document doc = new Document();//申请了一个document对象,这个类似于数据库中的表中的一行。 String title = listBooks.get(i).getTitle(); Field filedTitle = new TextField("title", title, Store.YES); doc.add(filedTitle); String isbn = listBooks.get(i).getIsbn(); Field filedISBN = new TextField("isbn", isbn, Store.YES); doc.add(filedISBN); int wordsCount = listBooks.get(i).getWordscount(); Field filedWordsCount = new LongField("WordsCount", wordsCount, Store.YES); doc.add(filedWordsCount); indexWriter.addDocument(doc);//把doc对象加入到索引创建中 } indexWriter.close();//关闭IndexWriter,提交创建内容 }
luke-5.0查看索引结果 (定位到luke-5.0所在的目录,然后输入码命令java -jar luke-5.0.jar)
3.索引的查询
@Test public void testSearch() throws Exception { // 第一步:创建一个Directory对象,也就是索引库存放的位置。 Directory directory = FSDirectory.open(new File("D:\temp\index"));// 磁盘 // 第二步:创建一个indexReader对象,需要指定Directory对象。 IndexReader indexReader = DirectoryReader.open(directory); // 第三步:创建一个indexsearcher对象,需要指定IndexReader对象 IndexSearcher indexSearcher = new IndexSearcher(indexReader); // 第四步:创建一个TermQuery对象,指定查询的域和查询的关键词。 Query query = new TermQuery(new Term("fileName", "lucene")); // 第五步:执行查询。 TopDocs topDocs = indexSearcher.search(query, 10); // 第六步:返回查询结果。遍历查询结果并输出。 ScoreDoc[] scoreDocs = topDocs.scoreDocs; for (ScoreDoc scoreDoc : scoreDocs) { int doc = scoreDoc.doc; Document document = indexSearcher.doc(doc); // 文件名称 String fileName = document.get("fileName"); System.out.println(fileName); // 文件内容 String fileContent = document.get("fileContent"); System.out.println(fileContent); // 文件大小 String fileSize = document.get("fileSize"); System.out.println(fileSize); // 文件路径 String filePath = document.get("filePath"); System.out.println(filePath); System.out.println("------------"); } // 第七步:关闭IndexReader对象 indexReader.close(); }
4.查看标准分析器的分词效果
@Test public void testTokenStream() throws Exception { // 创建一个标准分析器对象 //Analyzer analyzer = new StandardAnalyzer(); //Analyzer analyzer = new CJKAnalyzer(); //Analyzer analyzer = new SmartChineseAnalyzer(); Analyzer analyzer = new IKAnalyzer(); // 获得tokenStream对象 // 第一个参数:域名,可以随便给一个 // 第二个参数:要分析的文本内容 //TokenStream tokenStream = analyzer.tokenStream("test", //"The Spring Framework provides a comprehensive programming and configuration model."); TokenStream tokenStream = analyzer.tokenStream("test","高富帅可以用二维表结构来逻辑表达实现的数据"); // 添加一个引用,可以获得每个关键词 CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); // 添加一个偏移量的引用,记录了关键词的开始位置以及结束位置 OffsetAttribute offsetAttribute = tokenStream.addAttribute(OffsetAttribute.class); // 将指针调整到列表的头部 tokenStream.reset(); // 遍历关键词列表,通过incrementToken方法判断列表是否结束 while (tokenStream.incrementToken()) { // 关键词的起始位置 System.out.println("start->" + offsetAttribute.startOffset()); // 取关键词 System.out.println(charTermAttribute); // 结束位置 System.out.println("end->" + offsetAttribute.endOffset()); } tokenStream.close(); }
5.IKAnalyzer分词
5.1IKAnalyzer.cfg.xml
<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd"> <properties> <comment>IK Analyzer 扩展配置</comment> <!--用户可以在这里配置自己的扩展字典 --> <entry key="ext_dict">ext.dic;</entry> <!--用户可以在这里配置自己的扩展停止词字典--> <entry key="ext_stopwords">stopword.dic;</entry> </properties>
5.2扩展ext.dic
高富帅
二维表
5.3停止stopword.dic
我
是
用
的
二
维
表
来
a
an
and
are
as
at
be
but
by
for
if
in
into
is
it
no
not
of
on
or
such
that
the
their
then
there
these
they
this
to
was
will
with
6.封装LuceneHelper
package com.mf.lucene; import java.io.File; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.TextField; import org.apache.lucene.document.Field.Store; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.Term; import org.apache.lucene.queryparser.classic.MultiFieldQueryParser; import org.apache.lucene.queryparser.classic.QueryParser; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.apache.lucene.util.Version; import org.junit.Test; import org.wltea.analyzer.lucene.IKAnalyzer; public class LuceneHelper { public IndexWriter getIndexWriter() throws Exception { Directory directory = FSDirectory.open(new File("D:\JavaWeb\Lucene")); // Directory directory = new RAMDirectory();//保存索引到内存中 (内存索引库) Analyzer analyzer = new IKAnalyzer(); IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, analyzer); return new IndexWriter(directory, config); } //全删除 @Test public void testAllDelete() throws Exception { IndexWriter indexWriter = getIndexWriter(); indexWriter.deleteAll(); indexWriter.close(); } //根据条件删除 @Test public void testDelete() throws Exception { IndexWriter indexWriter = getIndexWriter(); Query query = new TermQuery(new Term("title","c#")); indexWriter.deleteDocuments(query); indexWriter.close(); } //修改 @Test public void testUpdate() throws Exception { IndexWriter indexWriter = getIndexWriter(); Document doc = new Document(); doc.add(new TextField("fileN", "测试文件名",Store.YES)); doc.add(new TextField("fileC", "测试文件内容",Store.YES)); indexWriter.updateDocument(new Term("isbn","9787115155108"), doc, new IKAnalyzer()); indexWriter.close(); } //IndexReader IndexSearcher public IndexSearcher getIndexSearcher() throws Exception{ // 第一步:创建一个Directory对象,也就是索引库存放的位置。 Directory directory = FSDirectory.open(new File("D:\JavaWeb\Lucene"));// 磁盘 // 第二步:创建一个indexReader对象,需要指定Directory对象。 IndexReader indexReader = DirectoryReader.open(directory); // 第三步:创建一个indexsearcher对象,需要指定IndexReader对象 return new IndexSearcher(indexReader); } //执行查询的结果 public void printResult(IndexSearcher indexSearcher,Query query)throws Exception{ // 第五步:执行查询。 TopDocs topDocs = indexSearcher.search(query, 10); // 第六步:返回查询结果。遍历查询结果并输出。 ScoreDoc[] scoreDocs = topDocs.scoreDocs; for (ScoreDoc scoreDoc : scoreDocs) { int doc = scoreDoc.doc; Document document = indexSearcher.doc(doc); // 文件名称 String title = document.get("title"); System.out.println(title); //WordsCount String WordsCount = document.get("WordsCount"); System.out.println(WordsCount); System.out.println("------------"); } } //查询所有 @Test public void testMatchAllDocsQuery() throws Exception { IndexSearcher indexSearcher = getIndexSearcher(); Query query = new MatchAllDocsQuery(); System.out.println(query); printResult(indexSearcher, query); //关闭资源 indexSearcher.getIndexReader().close(); } //根据数值范围查询 @Test public void testNumericRangeQuery() throws Exception { IndexSearcher indexSearcher = getIndexSearcher(); Query query = NumericRangeQuery.newLongRange("WordsCount", 0L, 10000L, true, true); System.out.println(query); printResult(indexSearcher, query); //关闭资源 indexSearcher.getIndexReader().close(); } //可以组合查询条件 @Test public void testBooleanQuery() throws Exception { IndexSearcher indexSearcher = getIndexSearcher(); BooleanQuery booleanQuery = new BooleanQuery(); Query query1 = new TermQuery(new Term("title","c#")); Query query2 = new TermQuery(new Term("WordsCount","660000")); // select * from user where id =1 or name = 'safdsa' booleanQuery.add(query1, Occur.MUST); booleanQuery.add(query2, Occur.MUST); System.out.println(booleanQuery); printResult(indexSearcher, booleanQuery); //关闭资源 indexSearcher.getIndexReader().close(); } //条件解释的对象查询 @Test public void testQueryParser() throws Exception { IndexSearcher indexSearcher = getIndexSearcher(); //参数1: 默认查询的域 //参数2:采用的分析器 QueryParser queryParser = new QueryParser("title",new IKAnalyzer()); // *:* 域:值 Query query = queryParser.parse("title:c#"); printResult(indexSearcher, query); //关闭资源 indexSearcher.getIndexReader().close(); } //条件解析的对象查询 多个默念域 @Test public void testMultiFieldQueryParser() throws Exception { IndexSearcher indexSearcher = getIndexSearcher(); String[] fields = {"title","isbn"}; //参数1: 默认查询的域 //参数2:采用的分析器 MultiFieldQueryParser queryParser = new MultiFieldQueryParser(fields,new IKAnalyzer()); // *:* 域:值 Query query = queryParser.parse("c#"); printResult(indexSearcher, query); //关闭资源 indexSearcher.getIndexReader().close(); } }