0

假设一个带有字段的 lucene 索引:日期、内容。我想获取日期为昨天的文档的所有术语值和频率。日期字段是关键字字段。内容字段被分析和索引。

请帮助我提供示例代码。

4

1 回答 1

0

我的解决方案来源如下...

/**
 * 
 * 
 * @param reader
 * @param fromDateTime
 *            - yyyymmddhhmmss 
 * @param toDateTime
 *            - yyyymmddhhmmss 
 * @return 
 */
static public String top10(IndexSearcher searcher, String fromDateTime,
        String toDateTime) {
    String top10Query = "";
    try {
        Query query = new TermRangeQuery("tweetDate", new BytesRef(
                fromDateTime), new BytesRef(toDateTime), true, false);
        final BitSet bits = new BitSet(searcher.getIndexReader().maxDoc());
        searcher.search(query, new Collector() {

            private int docBase;

            @Override
            public void setScorer(Scorer scorer) throws IOException {
            }

            @Override
            public void setNextReader(AtomicReaderContext context)
                    throws IOException {
                this.docBase = context.docBase;
            }

            @Override
            public void collect(int doc) throws IOException {
                bits.set(doc + docBase);
            }

            @Override
            public boolean acceptsDocsOutOfOrder() {
                return false;
            }
        });

        //
        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_43,
                EnglishStopWords.getEnglishStopWords());

        //
        HashMap<String, Long> wordFrequency = new HashMap<>();
        for (int wx = 0; wx < bits.length(); ++wx) {
            if (bits.get(wx)) {
                Document wd = searcher.doc(wx);
                //
                TokenStream tokenStream = analyzer.tokenStream("temp",
                        new StringReader(wd.get("content")));
                // OffsetAttribute offsetAttribute = tokenStream
                // .addAttribute(OffsetAttribute.class);
                CharTermAttribute charTermAttribute = tokenStream
                        .addAttribute(CharTermAttribute.class);
                tokenStream.reset();
                while (tokenStream.incrementToken()) {
                    // int startOffset = offsetAttribute.startOffset();
                    // int endOffset = offsetAttribute.endOffset();
                    String term = charTermAttribute.toString();
                    if (term.length() < 2)
                        continue;
                    Long wl;
                    if ((wl = wordFrequency.get(term)) == null)
                        wordFrequency.put(term, 1L);
                    else {
                        wl += 1;
                        wordFrequency.put(term, wl);
                    }
                }
                tokenStream.end();
                tokenStream.close();
            }
        }
        analyzer.close();

        // sort
        List<String> occurterm = new ArrayList<String>();
        for (String ws : wordFrequency.keySet()) {
            occurterm.add(String.format("%06d\t%s", wordFrequency.get(ws),
                    ws));
        }
        Collections.sort(occurterm, Collections.reverseOrder());

        // make query string by top 10 words
        int topCount = 10;
        for (String ws : occurterm) {
            if (topCount-- == 0)
                break;
            String[] tks = ws.split("\\t");
            top10Query += tks[1] + " ";
        }
        top10Query.trim();
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
    }
    // return top10 word string
    return top10Query;
}
于 2013-07-24T13:00:59.507 回答