• lucene 3.3一元切分查询例子


    import java.io.File;
    import java.io.IOException;
    import org.apache.lucene.analysis.Analyzer;
    import org.apache.lucene.analysis.standard.StandardAnalyzer;
    import org.apache.lucene.document.Document;
    import org.apache.lucene.document.Field;
    import org.apache.lucene.index.IndexWriter;
    import org.apache.lucene.index.IndexWriterConfig;
    import org.apache.lucene.index.IndexWriterConfig.OpenMode;
    import org.apache.lucene.store.Directory;
    import org.apache.lucene.store.FSDirectory;
    import org.apache.lucene.util.Version;


    public class test2 {
        private static String indexPath = "D:\\test\\index";//索引存放目录 
        /**
         * @param args
         */
        public static void main(String[] args) {
            // TODO Auto-generated method stub
            try {


                Directory dir = FSDirectory.open(new File(indexPath));
                Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31);
                IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_31, analyzer);

    //            if (create) {
    //              // Create a new index in the directory, removing any
    //              // previously indexed documents:
    //              iwc.setOpenMode(OpenMode.CREATE);
    //            } else {
    //              // Add new documents to an existing index:
                  iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    //            }

                // Optional: for better indexing performance, if you
                // are indexing many documents, increase the RAM
                // buffer.  But if you do this, increase the max heap
                // size to the JVM (eg add -Xmx512m or -Xmx1g):
                //
                // iwc.setRAMBufferSizeMB(256.0);

                IndexWriter writer = new IndexWriter(dir, iwc);
                Document doc = new Document();
                  Field f=new Field("title","诺基亚返乡贴补n95",Field.Store.YES,Field.Index.ANALYZED);
                  doc.add(f);
                writer.addDocument(doc);
                // NOTE: if you want to maximize search performance,
                // you can optionally call optimize here.  This can be
                // a costly operation, so generally it's only worth
                // it when your index is relatively static (ie you're
                // done adding documents to it):
                //
                // writer.optimize();

                writer.close();

                System.out.println(" caught b " );


              } catch (IOException e) {
                System.out.println(" caught a " );
              }
        }

    }


    import java.io.File;
    import java.io.IOException;
    import java.sql.Date;

    import org.apache.lucene.analysis.Analyzer;
    import org.apache.lucene.analysis.standard.StandardAnalyzer;
    import org.apache.lucene.document.Document;
    import org.apache.lucene.index.CorruptIndexException;
    import org.apache.lucene.queryParser.ParseException;
    import org.apache.lucene.queryParser.QueryParser;
    import org.apache.lucene.search.IndexSearcher;
    import org.apache.lucene.search.Query;
    import org.apache.lucene.search.ScoreDoc;
    import org.apache.lucene.search.TopScoreDocCollector;
    import org.apache.lucene.store.FSDirectory;
    import org.apache.lucene.util.Version;


    public class query {
        private static String indexPath = "D:\\test\\index";//索引存放目录 
        /**
         * @param args
         * @throws IOException
         * @throws CorruptIndexException
         * @throws ParseException
         */
        public static void main(String[] args) throws CorruptIndexException, IOException, ParseException {
            // TODO Auto-generated method stub
            IndexSearcher searcher = new IndexSearcher(FSDirectory.open(new File(indexPath)));
            Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31);
            QueryParser parser = new QueryParser(Version.LUCENE_31, "title", analyzer);//有变化的地方 
            Query query = parser.parse("诺基亚95"); 
           
            TopScoreDocCollector collector = TopScoreDocCollector.create(100 , false);//有变化的地方 
            searcher.search(query, collector); 
            ScoreDoc[] hits = collector.topDocs().scoreDocs;           
           
            System.out.println(hits.length); 
            for (int i = 0; i < hits.length; i++) { 
                Document doc = searcher.doc(hits[i].doc);//new method is.doc() 
                System.out.println(doc.getField("title")+"   "+hits[i].toString()+"  "); 
            } 
         
            System.out.println("Found " + collector.getTotalHits());         

           }

    }


    参考
    http://cumtfirefly.iteye.com/blog/543664



  • 相关阅读:
    JVM入门(一)
    2017目标
    2016目标
    C语言--第0次作业
    Hibernate ORM框架——续第一章:对象在Hibernate中的状态
    Hibernate ORM框架——续第一章:Java增删改查与Hibernate的增删改查的对比
    Hibernate ORM框架——续第一章:Hibernate的增删改查(第一个hibernate代码的优化)
    Hibernate ORM框架——第一章:Hibernate简介与操作基础
    改善SQL语句
    SQL Server的聚集索引和非聚集索引
  • 原文地址:https://www.cnblogs.com/lexus/p/2189692.html
Copyright © 2020-2023  润新知