code
stringlengths
1
2.01M
repo_name
stringlengths
3
62
path
stringlengths
1
267
language
stringclasses
231 values
license
stringclasses
13 values
size
int64
1
2.01M
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; /** * This abstract class defines methods to iterate over a set of non-decreasing * doc ids. Note that this class assumes it iterates on doc Ids, and therefore * {@link #NO_MORE_DOCS} is set to {@value #NO_MORE_DOCS} in order to be used as * a sentinel object. Implementations of this class are expected to consider * {@link Integer#MAX_VALUE} as an invalid value. */ public abstract class DocIdSetIterator { private int doc = -1; /** * When returned by {@link #nextDoc()}, {@link #advance(int)} and * {@link #docID()} it means there are no more docs in the iterator. */ public static final int NO_MORE_DOCS = Integer.MAX_VALUE; /** * Returns the following: * <ul> * <li>-1 or {@link #NO_MORE_DOCS} if {@link #nextDoc()} or * {@link #advance(int)} were not called yet. * <li>{@link #NO_MORE_DOCS} if the iterator has exhausted. * <li>Otherwise it should return the doc ID it is currently on. * </ul> * <p> * * @since 2.9 */ public abstract int docID(); /** * Advances to the next document in the set and returns the doc it is * currently on, or {@link #NO_MORE_DOCS} if there are no more docs in the * set.<br> * * <b>NOTE:</b> after the iterator has exhausted you should not call this * method, as it may result in unpredicted behavior. * * @since 2.9 */ public abstract int nextDoc() throws IOException; /** * Advances to the first beyond the current whose document number is greater * than or equal to <i>target</i>. Returns the current document number or * {@link #NO_MORE_DOCS} if there are no more docs in the set. * <p> * Behaves as if written: * * <pre> * int advance(int target) { * int doc; * while ((doc = nextDoc()) &lt; target) { * } * return doc; * } * </pre> * * Some implementations are considerably more efficient than that. * <p> * <b>NOTE:</b> certain implementations may return a different value (each * time) if called several times in a row with the same target. * <p> * <b>NOTE:</b> this method may be called with {@value #NO_MORE_DOCS} for * efficiency by some Scorers. If your implementation cannot efficiently * determine that it should exhaust, it is recommended that you check for that * value in each call to this method. * <p> * <b>NOTE:</b> after the iterator has exhausted you should not call this * method, as it may result in unpredicted behavior. * <p> * * @since 2.9 */ public abstract int advance(int target) throws IOException; }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/DocIdSetIterator.java
Java
art
3,446
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.FieldInvertState; import org.apache.lucene.index.Term; import org.apache.lucene.search.Explanation.IDFExplanation; import org.apache.lucene.util.SmallFloat; import java.io.IOException; import java.io.Serializable; import java.util.Collection; import java.util.IdentityHashMap; /** * Expert: Scoring API. * * <p>Similarity defines the components of Lucene scoring. * Overriding computation of these components is a convenient * way to alter Lucene scoring. * * <p>Suggested reading: * <a href="http://nlp.stanford.edu/IR-book/html/htmledition/queries-as-vectors-1.html"> * Introduction To Information Retrieval, Chapter 6</a>. * * <p>The following describes how Lucene scoring evolves from * underlying information retrieval models to (efficient) implementation. * We first brief on <i>VSM Score</i>, * then derive from it <i>Lucene's Conceptual Scoring Formula</i>, * from which, finally, evolves <i>Lucene's Practical Scoring Function</i> * (the latter is connected directly with Lucene classes and methods). * * <p>Lucene combines * <a href="http://en.wikipedia.org/wiki/Standard_Boolean_model"> * Boolean model (BM) of Information Retrieval</a> * with * <a href="http://en.wikipedia.org/wiki/Vector_Space_Model"> * Vector Space Model (VSM) of Information Retrieval</a> - * documents "approved" by BM are scored by VSM. * * <p>In VSM, documents and queries are represented as * weighted vectors in a multi-dimensional space, * where each distinct index term is a dimension, * and weights are * <a href="http://en.wikipedia.org/wiki/Tfidf">Tf-idf</a> values. * * <p>VSM does not require weights to be <i>Tf-idf</i> values, * but <i>Tf-idf</i> values are believed to produce search results of high quality, * and so Lucene is using <i>Tf-idf</i>. * <i>Tf</i> and <i>Idf</i> are described in more detail below, * but for now, for completion, let's just say that * for given term <i>t</i> and document (or query) <i>x</i>, * <i>Tf(t,x)</i> varies with the number of occurrences of term <i>t</i> in <i>x</i> * (when one increases so does the other) and * <i>idf(t)</i> similarly varies with the inverse of the * number of index documents containing term <i>t</i>. * * <p><i>VSM score</i> of document <i>d</i> for query <i>q</i> is the * <a href="http://en.wikipedia.org/wiki/Cosine_similarity"> * Cosine Similarity</a> * of the weighted query vectors <i>V(q)</i> and <i>V(d)</i>: * * <br>&nbsp;<br> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr><td> * <table cellpadding="1" cellspacing="0" border="1" align="center"> * <tr><td> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * cosine-similarity(q,d) &nbsp; = &nbsp; * </td> * <td valign="middle" align="center"> * <table> * <tr><td align="center"><small>V(q)&nbsp;&middot;&nbsp;V(d)</small></td></tr> * <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr> * <tr><td align="center"><small>|V(q)|&nbsp;|V(d)|</small></td></tr> * </table> * </td> * </tr> * </table> * </td></tr> * </table> * </td></tr> * <tr><td> * <center><font=-1><u>VSM Score</u></font></center> * </td></tr> * </table> * <br>&nbsp;<br> * * * Where <i>V(q)</i> &middot; <i>V(d)</i> is the * <a href="http://en.wikipedia.org/wiki/Dot_product">dot product</a> * of the weighted vectors, * and <i>|V(q)|</i> and <i>|V(d)|</i> are their * <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norms</a>. * * <p>Note: the above equation can be viewed as the dot product of * the normalized weighted vectors, in the sense that dividing * <i>V(q)</i> by its euclidean norm is normalizing it to a unit vector. * * <p>Lucene refines <i>VSM score</i> for both search quality and usability: * <ul> * <li>Normalizing <i>V(d)</i> to the unit vector is known to be problematic in that * it removes all document length information. * For some documents removing this info is probably ok, * e.g. a document made by duplicating a certain paragraph <i>10</i> times, * especially if that paragraph is made of distinct terms. * But for a document which contains no duplicated paragraphs, * this might be wrong. * To avoid this problem, a different document length normalization * factor is used, which normalizes to a vector equal to or larger * than the unit vector: <i>doc-len-norm(d)</i>. * </li> * * <li>At indexing, users can specify that certain documents are more * important than others, by assigning a document boost. * For this, the score of each document is also multiplied by its boost value * <i>doc-boost(d)</i>. * </li> * * <li>Lucene is field based, hence each query term applies to a single * field, document length normalization is by the length of the certain field, * and in addition to document boost there are also document fields boosts. * </li> * * <li>The same field can be added to a document during indexing several times, * and so the boost of that field is the multiplication of the boosts of * the separate additions (or parts) of that field within the document. * </li> * * <li>At search time users can specify boosts to each query, sub-query, and * each query term, hence the contribution of a query term to the score of * a document is multiplied by the boost of that query term <i>query-boost(q)</i>. * </li> * * <li>A document may match a multi term query without containing all * the terms of that query (this is correct for some of the queries), * and users can further reward documents matching more query terms * through a coordination factor, which is usually larger when * more terms are matched: <i>coord-factor(q,d)</i>. * </li> * </ul> * * <p>Under the simplifying assumption of a single field in the index, * we get <i>Lucene's Conceptual scoring formula</i>: * * <br>&nbsp;<br> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr><td> * <table cellpadding="1" cellspacing="0" border="1" align="center"> * <tr><td> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * score(q,d) &nbsp; = &nbsp; * <font color="#FF9933">coord-factor(q,d)</font> &middot; &nbsp; * <font color="#CCCC00">query-boost(q)</font> &middot; &nbsp; * </td> * <td valign="middle" align="center"> * <table> * <tr><td align="center"><small><font color="#993399">V(q)&nbsp;&middot;&nbsp;V(d)</font></small></td></tr> * <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr> * <tr><td align="center"><small><font color="#FF33CC">|V(q)|</font></small></td></tr> * </table> * </td> * <td valign="middle" align="right" rowspan="1"> * &nbsp; &middot; &nbsp; <font color="#3399FF">doc-len-norm(d)</font> * &nbsp; &middot; &nbsp; <font color="#3399FF">doc-boost(d)</font> * </td> * </tr> * </table> * </td></tr> * </table> * </td></tr> * <tr><td> * <center><font=-1><u>Lucene Conceptual Scoring Formula</u></font></center> * </td></tr> * </table> * <br>&nbsp;<br> * * <p>The conceptual formula is a simplification in the sense that (1) terms and documents * are fielded and (2) boosts are usually per query term rather than per query. * * <p>We now describe how Lucene implements this conceptual scoring formula, and * derive from it <i>Lucene's Practical Scoring Function</i>. * * <p>For efficient score computation some scoring components * are computed and aggregated in advance: * * <ul> * <li><i>Query-boost</i> for the query (actually for each query term) * is known when search starts. * </li> * * <li>Query Euclidean norm <i>|V(q)|</i> can be computed when search starts, * as it is independent of the document being scored. * From search optimization perspective, it is a valid question * why bother to normalize the query at all, because all * scored documents will be multiplied by the same <i>|V(q)|</i>, * and hence documents ranks (their order by score) will not * be affected by this normalization. * There are two good reasons to keep this normalization: * <ul> * <li>Recall that * <a href="http://en.wikipedia.org/wiki/Cosine_similarity"> * Cosine Similarity</a> can be used find how similar * two documents are. One can use Lucene for e.g. * clustering, and use a document as a query to compute * its similarity to other documents. * In this use case it is important that the score of document <i>d3</i> * for query <i>d1</i> is comparable to the score of document <i>d3</i> * for query <i>d2</i>. In other words, scores of a document for two * distinct queries should be comparable. * There are other applications that may require this. * And this is exactly what normalizing the query vector <i>V(q)</i> * provides: comparability (to a certain extent) of two or more queries. * </li> * * <li>Applying query normalization on the scores helps to keep the * scores around the unit vector, hence preventing loss of score data * because of floating point precision limitations. * </li> * </ul> * </li> * * <li>Document length norm <i>doc-len-norm(d)</i> and document * boost <i>doc-boost(d)</i> are known at indexing time. * They are computed in advance and their multiplication * is saved as a single value in the index: <i>norm(d)</i>. * (In the equations below, <i>norm(t in d)</i> means <i>norm(field(t) in doc d)</i> * where <i>field(t)</i> is the field associated with term <i>t</i>.) * </li> * </ul> * * <p><i>Lucene's Practical Scoring Function</i> is derived from the above. * The color codes demonstrate how it relates * to those of the <i>conceptual</i> formula: * * <P> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr><td> * <table cellpadding="" cellspacing="2" border="2" align="center"> * <tr><td> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * score(q,d) &nbsp; = &nbsp; * <A HREF="#formula_coord"><font color="#FF9933">coord(q,d)</font></A> &nbsp;&middot;&nbsp; * <A HREF="#formula_queryNorm"><font color="#FF33CC">queryNorm(q)</font></A> &nbsp;&middot;&nbsp; * </td> * <td valign="bottom" align="center" rowspan="1"> * <big><big><big>&sum;</big></big></big> * </td> * <td valign="middle" align="right" rowspan="1"> * <big><big>(</big></big> * <A HREF="#formula_tf"><font color="#993399">tf(t in d)</font></A> &nbsp;&middot;&nbsp; * <A HREF="#formula_idf"><font color="#993399">idf(t)</font></A><sup>2</sup> &nbsp;&middot;&nbsp; * <A HREF="#formula_termBoost"><font color="#CCCC00">t.getBoost()</font></A>&nbsp;&middot;&nbsp; * <A HREF="#formula_norm"><font color="#3399FF">norm(t,d)</font></A> * <big><big>)</big></big> * </td> * </tr> * <tr valigh="top"> * <td></td> * <td align="center"><small>t in q</small></td> * <td></td> * </tr> * </table> * </td></tr> * </table> * </td></tr> * <tr><td> * <center><font=-1><u>Lucene Practical Scoring Function</u></font></center> * </td></tr> * </table> * * <p> where * <ol> * <li> * <A NAME="formula_tf"></A> * <b><i>tf(t in d)</i></b> * correlates to the term's <i>frequency</i>, * defined as the number of times term <i>t</i> appears in the currently scored document <i>d</i>. * Documents that have more occurrences of a given term receive a higher score. * Note that <i>tf(t in q)</i> is assumed to be <i>1</i> and therefore it does not appear in this equation, * However if a query contains twice the same term, there will be * two term-queries with that same term and hence the computation would still be correct (although * not very efficient). * The default computation for <i>tf(t in d)</i> in * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) DefaultSimilarity} is: * * <br>&nbsp;<br> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * {@link org.apache.lucene.search.DefaultSimilarity#tf(float) tf(t in d)} &nbsp; = &nbsp; * </td> * <td valign="top" align="center" rowspan="1"> * frequency<sup><big>&frac12;</big></sup> * </td> * </tr> * </table> * <br>&nbsp;<br> * </li> * * <li> * <A NAME="formula_idf"></A> * <b><i>idf(t)</i></b> stands for Inverse Document Frequency. This value * correlates to the inverse of <i>docFreq</i> * (the number of documents in which the term <i>t</i> appears). * This means rarer terms give higher contribution to the total score. * <i>idf(t)</i> appears for <i>t</i> in both the query and the document, * hence it is squared in the equation. * The default computation for <i>idf(t)</i> in * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) DefaultSimilarity} is: * * <br>&nbsp;<br> * <table cellpadding="2" cellspacing="2" border="0" align="center"> * <tr> * <td valign="middle" align="right"> * {@link org.apache.lucene.search.DefaultSimilarity#idf(int, int) idf(t)}&nbsp; = &nbsp; * </td> * <td valign="middle" align="center"> * 1 + log <big>(</big> * </td> * <td valign="middle" align="center"> * <table> * <tr><td align="center"><small>numDocs</small></td></tr> * <tr><td align="center">&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;</td></tr> * <tr><td align="center"><small>docFreq+1</small></td></tr> * </table> * </td> * <td valign="middle" align="center"> * <big>)</big> * </td> * </tr> * </table> * <br>&nbsp;<br> * </li> * * <li> * <A NAME="formula_coord"></A> * <b><i>coord(q,d)</i></b> * is a score factor based on how many of the query terms are found in the specified document. * Typically, a document that contains more of the query's terms will receive a higher score * than another document with fewer query terms. * This is a search time factor computed in * {@link #coord(int, int) coord(q,d)} * by the Similarity in effect at search time. * <br>&nbsp;<br> * </li> * * <li><b> * <A NAME="formula_queryNorm"></A> * <i>queryNorm(q)</i> * </b> * is a normalizing factor used to make scores between queries comparable. * This factor does not affect document ranking (since all ranked documents are multiplied by the same factor), * but rather just attempts to make scores from different queries (or even different indexes) comparable. * This is a search time factor computed by the Similarity in effect at search time. * * The default computation in * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) DefaultSimilarity} * produces a <a href="http://en.wikipedia.org/wiki/Euclidean_norm#Euclidean_norm">Euclidean norm</a>: * <br>&nbsp;<br> * <table cellpadding="1" cellspacing="0" border="0" align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * queryNorm(q) &nbsp; = &nbsp; * {@link org.apache.lucene.search.DefaultSimilarity#queryNorm(float) queryNorm(sumOfSquaredWeights)} * &nbsp; = &nbsp; * </td> * <td valign="middle" align="center" rowspan="1"> * <table> * <tr><td align="center"><big>1</big></td></tr> * <tr><td align="center"><big> * &ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash;&ndash; * </big></td></tr> * <tr><td align="center">sumOfSquaredWeights<sup><big>&frac12;</big></sup></td></tr> * </table> * </td> * </tr> * </table> * <br>&nbsp;<br> * * The sum of squared weights (of the query terms) is * computed by the query {@link org.apache.lucene.search.Weight} object. * For example, a {@link org.apache.lucene.search.BooleanQuery boolean query} * computes this value as: * * <br>&nbsp;<br> * <table cellpadding="1" cellspacing="0" border="0"n align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * {@link org.apache.lucene.search.Weight#sumOfSquaredWeights() sumOfSquaredWeights} &nbsp; = &nbsp; * {@link org.apache.lucene.search.Query#getBoost() q.getBoost()} <sup><big>2</big></sup> * &nbsp;&middot;&nbsp; * </td> * <td valign="bottom" align="center" rowspan="1"> * <big><big><big>&sum;</big></big></big> * </td> * <td valign="middle" align="right" rowspan="1"> * <big><big>(</big></big> * <A HREF="#formula_idf">idf(t)</A> &nbsp;&middot;&nbsp; * <A HREF="#formula_termBoost">t.getBoost()</A> * <big><big>) <sup>2</sup> </big></big> * </td> * </tr> * <tr valigh="top"> * <td></td> * <td align="center"><small>t in q</small></td> * <td></td> * </tr> * </table> * <br>&nbsp;<br> * * </li> * * <li> * <A NAME="formula_termBoost"></A> * <b><i>t.getBoost()</i></b> * is a search time boost of term <i>t</i> in the query <i>q</i> as * specified in the query text * (see <A HREF="../../../../../../queryparsersyntax.html#Boosting a Term">query syntax</A>), * or as set by application calls to * {@link org.apache.lucene.search.Query#setBoost(float) setBoost()}. * Notice that there is really no direct API for accessing a boost of one term in a multi term query, * but rather multi terms are represented in a query as multi * {@link org.apache.lucene.search.TermQuery TermQuery} objects, * and so the boost of a term in the query is accessible by calling the sub-query * {@link org.apache.lucene.search.Query#getBoost() getBoost()}. * <br>&nbsp;<br> * </li> * * <li> * <A NAME="formula_norm"></A> * <b><i>norm(t,d)</i></b> encapsulates a few (indexing time) boost and length factors: * * <ul> * <li><b>Document boost</b> - set by calling * {@link org.apache.lucene.document.Document#setBoost(float) doc.setBoost()} * before adding the document to the index. * </li> * <li><b>Field boost</b> - set by calling * {@link org.apache.lucene.document.Fieldable#setBoost(float) field.setBoost()} * before adding the field to a document. * </li> * <li>{@link #lengthNorm(String, int) <b>lengthNorm</b>(field)} - computed * when the document is added to the index in accordance with the number of tokens * of this field in the document, so that shorter fields contribute more to the score. * LengthNorm is computed by the Similarity class in effect at indexing. * </li> * </ul> * * <p> * When a document is added to the index, all the above factors are multiplied. * If the document has multiple fields with the same name, all their boosts are multiplied together: * * <br>&nbsp;<br> * <table cellpadding="1" cellspacing="0" border="0"n align="center"> * <tr> * <td valign="middle" align="right" rowspan="1"> * norm(t,d) &nbsp; = &nbsp; * {@link org.apache.lucene.document.Document#getBoost() doc.getBoost()} * &nbsp;&middot;&nbsp; * {@link #lengthNorm(String, int) lengthNorm(field)} * &nbsp;&middot;&nbsp; * </td> * <td valign="bottom" align="center" rowspan="1"> * <big><big><big>&prod;</big></big></big> * </td> * <td valign="middle" align="right" rowspan="1"> * {@link org.apache.lucene.document.Fieldable#getBoost() f.getBoost}() * </td> * </tr> * <tr valigh="top"> * <td></td> * <td align="center"><small>field <i><b>f</b></i> in <i>d</i> named as <i><b>t</b></i></small></td> * <td></td> * </tr> * </table> * <br>&nbsp;<br> * However the resulted <i>norm</i> value is {@link #encodeNorm(float) encoded} as a single byte * before being stored. * At search time, the norm byte value is read from the index * {@link org.apache.lucene.store.Directory directory} and * {@link #decodeNorm(byte) decoded} back to a float <i>norm</i> value. * This encoding/decoding, while reducing index size, comes with the price of * precision loss - it is not guaranteed that <i>decode(encode(x)) = x</i>. * For instance, <i>decode(encode(0.89)) = 0.75</i>. * <br>&nbsp;<br> * Compression of norm values to a single byte saves memory at search time, * because once a field is referenced at search time, its norms - for * all documents - are maintained in memory. * <br>&nbsp;<br> * The rationale supporting such lossy compression of norm values is that * given the difficulty (and inaccuracy) of users to express their true information * need by a query, only big differences matter. * <br>&nbsp;<br> * Last, note that search time is too late to modify this <i>norm</i> part of scoring, e.g. by * using a different {@link Similarity} for search. * <br>&nbsp;<br> * </li> * </ol> * * @see #setDefault(Similarity) * @see org.apache.lucene.index.IndexWriter#setSimilarity(Similarity) * @see Searcher#setSimilarity(Similarity) */ public abstract class Similarity implements Serializable { /** * The Similarity implementation used by default. **/ private static Similarity defaultImpl = new DefaultSimilarity(); public static final int NO_DOC_ID_PROVIDED = -1; /** Set the default Similarity implementation used by indexing and search * code. * * @see Searcher#setSimilarity(Similarity) * @see org.apache.lucene.index.IndexWriter#setSimilarity(Similarity) */ public static void setDefault(Similarity similarity) { Similarity.defaultImpl = similarity; } /** Return the default Similarity implementation used by indexing and search * code. * * <p>This is initially an instance of {@link DefaultSimilarity}. * * @see Searcher#setSimilarity(Similarity) * @see org.apache.lucene.index.IndexWriter#setSimilarity(Similarity) */ public static Similarity getDefault() { return Similarity.defaultImpl; } /** Cache of decoded bytes. */ private static final float[] NORM_TABLE = new float[256]; static { for (int i = 0; i < 256; i++) NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i); } /** Decodes a normalization factor stored in an index. * @see #encodeNorm(float) */ public static float decodeNorm(byte b) { return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127 } /** Returns a table for decoding normalization bytes. * @see #encodeNorm(float) */ public static float[] getNormDecoder() { return NORM_TABLE; } /** * Compute the normalization value for a field, given the accumulated * state of term processing for this field (see {@link FieldInvertState}). * * <p>Implementations should calculate a float value based on the field * state and then return that value. * * <p>For backward compatibility this method by default calls * {@link #lengthNorm(String, int)} passing * {@link FieldInvertState#getLength()} as the second argument, and * then multiplies this value by {@link FieldInvertState#getBoost()}.</p> * * <p><b>WARNING</b>: This API is new and experimental and may * suddenly change.</p> * * @param field field name * @param state current processing state for this field * @return the calculated float norm */ public float computeNorm(String field, FieldInvertState state) { return (float) (state.getBoost() * lengthNorm(field, state.getLength())); } /** Computes the normalization value for a field given the total number of * terms contained in a field. These values, together with field boosts, are * stored in an index and multipled into scores for hits on each field by the * search code. * * <p>Matches in longer fields are less precise, so implementations of this * method usually return smaller values when <code>numTokens</code> is large, * and larger values when <code>numTokens</code> is small. * * <p>Note that the return values are computed under * {@link org.apache.lucene.index.IndexWriter#addDocument(org.apache.lucene.document.Document)} * and then stored using * {@link #encodeNorm(float)}. * Thus they have limited precision, and documents * must be re-indexed if this method is altered. * * @param fieldName the name of the field * @param numTokens the total number of tokens contained in fields named * <i>fieldName</i> of <i>doc</i>. * @return a normalization factor for hits on this field of this document * * @see org.apache.lucene.document.Field#setBoost(float) */ public abstract float lengthNorm(String fieldName, int numTokens); /** Computes the normalization value for a query given the sum of the squared * weights of each of the query terms. This value is multiplied into the * weight of each query term. While the classic query normalization factor is * computed as 1/sqrt(sumOfSquaredWeights), other implementations might * completely ignore sumOfSquaredWeights (ie return 1). * * <p>This does not affect ranking, but the default implementation does make scores * from different queries more comparable than they would be by eliminating the * magnitude of the Query vector as a factor in the score. * * @param sumOfSquaredWeights the sum of the squares of query term weights * @return a normalization factor for query weights */ public abstract float queryNorm(float sumOfSquaredWeights); /** Encodes a normalization factor for storage in an index. * * <p>The encoding uses a three-bit mantissa, a five-bit exponent, and * the zero-exponent point at 15, thus * representing values from around 7x10^9 to 2x10^-9 with about one * significant decimal digit of accuracy. Zero is also represented. * Negative numbers are rounded up to zero. Values too large to represent * are rounded down to the largest representable value. Positive values too * small to represent are rounded up to the smallest positive representable * value. * * @see org.apache.lucene.document.Field#setBoost(float) * @see org.apache.lucene.util.SmallFloat */ public static byte encodeNorm(float f) { return SmallFloat.floatToByte315(f); } /** Computes a score factor based on a term or phrase's frequency in a * document. This value is multiplied by the {@link #idf(int, int)} * factor for each term in the query and these products are then summed to * form the initial score for a document. * * <p>Terms and phrases repeated in a document indicate the topic of the * document, so implementations of this method usually return larger values * when <code>freq</code> is large, and smaller values when <code>freq</code> * is small. * * <p>The default implementation calls {@link #tf(float)}. * * @param freq the frequency of a term within a document * @return a score factor based on a term's within-document frequency */ public float tf(int freq) { return tf((float)freq); } /** Computes the amount of a sloppy phrase match, based on an edit distance. * This value is summed for each sloppy phrase match in a document to form * the frequency that is passed to {@link #tf(float)}. * * <p>A phrase match with a small edit distance to a document passage more * closely matches the document, so implementations of this method usually * return larger values when the edit distance is small and smaller values * when it is large. * * @see PhraseQuery#setSlop(int) * @param distance the edit distance of this sloppy phrase match * @return the frequency increment for this match */ public abstract float sloppyFreq(int distance); /** Computes a score factor based on a term or phrase's frequency in a * document. This value is multiplied by the {@link #idf(int, int)} * factor for each term in the query and these products are then summed to * form the initial score for a document. * * <p>Terms and phrases repeated in a document indicate the topic of the * document, so implementations of this method usually return larger values * when <code>freq</code> is large, and smaller values when <code>freq</code> * is small. * * @param freq the frequency of a term within a document * @return a score factor based on a term's within-document frequency */ public abstract float tf(float freq); /** * Computes a score factor for a simple term and returns an explanation * for that score factor. * * <p> * The default implementation uses: * * <pre> * idf(searcher.docFreq(term), searcher.maxDoc()); * </pre> * * Note that {@link Searcher#maxDoc()} is used instead of * {@link org.apache.lucene.index.IndexReader#numDocs() IndexReader#numDocs()} because also * {@link Searcher#docFreq(Term)} is used, and when the latter * is inaccurate, so is {@link Searcher#maxDoc()}, and in the same direction. * In addition, {@link Searcher#maxDoc()} is more efficient to compute * * @param term the term in question * @param searcher the document collection being searched * @return an IDFExplain object that includes both an idf score factor and an explanation for the term. * @throws IOException */ public IDFExplanation idfExplain(final Term term, final Searcher searcher) throws IOException { final int df = searcher.docFreq(term); final int max = searcher.maxDoc(); final float idf = idf(df, max); return new IDFExplanation() { @Override public String explain() { return "idf(docFreq=" + df + ", maxDocs=" + max + ")"; } @Override public float getIdf() { return idf; }}; } /** * Computes a score factor for a phrase. * * <p> * The default implementation sums the idf factor for * each term in the phrase. * * @param terms the terms in the phrase * @param searcher the document collection being searched * @return an IDFExplain object that includes both an idf * score factor for the phrase and an explanation * for each term. * @throws IOException */ public IDFExplanation idfExplain(Collection<Term> terms, Searcher searcher) throws IOException { final int max = searcher.maxDoc(); float idf = 0.0f; final StringBuilder exp = new StringBuilder(); for (final Term term : terms ) { final int df = searcher.docFreq(term); idf += idf(df, max); exp.append(" "); exp.append(term.text()); exp.append("="); exp.append(df); } final float fIdf = idf; return new IDFExplanation() { @Override public float getIdf() { return fIdf; } @Override public String explain() { return exp.toString(); } }; } /** Computes a score factor based on a term's document frequency (the number * of documents which contain the term). This value is multiplied by the * {@link #tf(int)} factor for each term in the query and these products are * then summed to form the initial score for a document. * * <p>Terms that occur in fewer documents are better indicators of topic, so * implementations of this method usually return larger values for rare terms, * and smaller values for common terms. * * @param docFreq the number of documents which contain the term * @param numDocs the total number of documents in the collection * @return a score factor based on the term's document frequency */ public abstract float idf(int docFreq, int numDocs); /** Computes a score factor based on the fraction of all query terms that a * document contains. This value is multiplied into scores. * * <p>The presence of a large portion of the query terms indicates a better * match with the query, so implementations of this method usually return * larger values when the ratio between these parameters is large and smaller * values when the ratio between them is small. * * @param overlap the number of query terms matched in the document * @param maxOverlap the total number of terms in the query * @return a score factor based on term overlap with the query */ public abstract float coord(int overlap, int maxOverlap); /** * Calculate a scoring factor based on the data in the payload. Overriding implementations * are responsible for interpreting what is in the payload. Lucene makes no assumptions about * what is in the byte array. * <p> * The default implementation returns 1. * * @param docId The docId currently being scored. If this value is {@link #NO_DOC_ID_PROVIDED}, then it should be assumed that the PayloadQuery implementation does not provide document information * @param fieldName The fieldName of the term this payload belongs to * @param start The start position of the payload * @param end The end position of the payload * @param payload The payload byte array to be scored * @param offset The offset into the payload array * @param length The length in the array * @return An implementation dependent float to be used as a scoring factor * */ public float scorePayload(int docId, String fieldName, int start, int end, byte [] payload, int offset, int length) { return 1; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/Similarity.java
Java
art
35,698
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.*; /** * Position of a term in a document that takes into account the term offset within the phrase. */ final class PhrasePositions { int doc; // current doc int position; // position in doc int count; // remaining pos in this doc int offset; // position in phrase TermPositions tp; // stream of positions PhrasePositions next; // used to make lists boolean repeats; // there's other pp for same term (e.g. query="1st word 2nd word"~1) PhrasePositions(TermPositions t, int o) { tp = t; offset = o; } final boolean next() throws IOException { // increments to next doc if (!tp.next()) { tp.close(); // close stream doc = Integer.MAX_VALUE; // sentinel value return false; } doc = tp.doc(); position = 0; return true; } final boolean skipTo(int target) throws IOException { if (!tp.skipTo(target)) { tp.close(); // close stream doc = Integer.MAX_VALUE; // sentinel value return false; } doc = tp.doc(); position = 0; return true; } final void firstPosition() throws IOException { count = tp.freq(); // read first pos nextPosition(); } /** * Go to next location of this term current document, and set * <code>position</code> as <code>location - offset</code>, so that a * matching exact phrase is easily identified when all PhrasePositions * have exactly the same <code>position</code>. */ final boolean nextPosition() throws IOException { if (count-- > 0) { // read subsequent pos's position = tp.nextPosition() - offset; return true; } else return false; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/PhrasePositions.java
Java
art
2,609
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.document.NumericField; // for javadocs import org.apache.lucene.analysis.NumericTokenStream; // for javadocs import java.io.IOException; import java.io.Serializable; import java.io.PrintStream; import java.text.DecimalFormat; /** * Expert: Maintains caches of term values. * * <p>Created: May 19, 2004 11:13:14 AM * * @since lucene 1.4 * @see org.apache.lucene.util.FieldCacheSanityChecker */ public interface FieldCache { public static final class CreationPlaceholder { Object value; } /** Indicator for StringIndex values in the cache. */ // NOTE: the value assigned to this constant must not be // the same as any of those in SortField!! public static final int STRING_INDEX = -1; /** Expert: Stores term text values and document ordering data. */ public static class StringIndex { public int binarySearchLookup(String key) { // this special case is the reason that Arrays.binarySearch() isn't useful. if (key == null) return 0; int low = 1; int high = lookup.length-1; while (low <= high) { int mid = (low + high) >>> 1; int cmp = lookup[mid].compareTo(key); if (cmp < 0) low = mid + 1; else if (cmp > 0) high = mid - 1; else return mid; // key found } return -(low + 1); // key not found. } /** All the term values, in natural order. */ public final String[] lookup; /** For each document, an index into the lookup array. */ public final int[] order; /** Creates one of these objects */ public StringIndex (int[] values, String[] lookup) { this.order = values; this.lookup = lookup; } } /** * Marker interface as super-interface to all parsers. It * is used to specify a custom parser to {@link * SortField#SortField(String, FieldCache.Parser)}. */ public interface Parser extends Serializable { } /** Interface to parse bytes from document fields. * @see FieldCache#getBytes(IndexReader, String, FieldCache.ByteParser) */ public interface ByteParser extends Parser { /** Return a single Byte representation of this field's value. */ public byte parseByte(String string); } /** Interface to parse shorts from document fields. * @see FieldCache#getShorts(IndexReader, String, FieldCache.ShortParser) */ public interface ShortParser extends Parser { /** Return a short representation of this field's value. */ public short parseShort(String string); } /** Interface to parse ints from document fields. * @see FieldCache#getInts(IndexReader, String, FieldCache.IntParser) */ public interface IntParser extends Parser { /** Return an integer representation of this field's value. */ public int parseInt(String string); } /** Interface to parse floats from document fields. * @see FieldCache#getFloats(IndexReader, String, FieldCache.FloatParser) */ public interface FloatParser extends Parser { /** Return an float representation of this field's value. */ public float parseFloat(String string); } /** Interface to parse long from document fields. * @see FieldCache#getLongs(IndexReader, String, FieldCache.LongParser) */ public interface LongParser extends Parser { /** Return an long representation of this field's value. */ public long parseLong(String string); } /** Interface to parse doubles from document fields. * @see FieldCache#getDoubles(IndexReader, String, FieldCache.DoubleParser) */ public interface DoubleParser extends Parser { /** Return an long representation of this field's value. */ public double parseDouble(String string); } /** Expert: The cache used internally by sorting and range query classes. */ public static FieldCache DEFAULT = new FieldCacheImpl(); /** The default parser for byte values, which are encoded by {@link Byte#toString(byte)} */ public static final ByteParser DEFAULT_BYTE_PARSER = new ByteParser() { public byte parseByte(String value) { return Byte.parseByte(value); } protected Object readResolve() { return DEFAULT_BYTE_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".DEFAULT_BYTE_PARSER"; } }; /** The default parser for short values, which are encoded by {@link Short#toString(short)} */ public static final ShortParser DEFAULT_SHORT_PARSER = new ShortParser() { public short parseShort(String value) { return Short.parseShort(value); } protected Object readResolve() { return DEFAULT_SHORT_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".DEFAULT_SHORT_PARSER"; } }; /** The default parser for int values, which are encoded by {@link Integer#toString(int)} */ public static final IntParser DEFAULT_INT_PARSER = new IntParser() { public int parseInt(String value) { return Integer.parseInt(value); } protected Object readResolve() { return DEFAULT_INT_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".DEFAULT_INT_PARSER"; } }; /** The default parser for float values, which are encoded by {@link Float#toString(float)} */ public static final FloatParser DEFAULT_FLOAT_PARSER = new FloatParser() { public float parseFloat(String value) { return Float.parseFloat(value); } protected Object readResolve() { return DEFAULT_FLOAT_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".DEFAULT_FLOAT_PARSER"; } }; /** The default parser for long values, which are encoded by {@link Long#toString(long)} */ public static final LongParser DEFAULT_LONG_PARSER = new LongParser() { public long parseLong(String value) { return Long.parseLong(value); } protected Object readResolve() { return DEFAULT_LONG_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".DEFAULT_LONG_PARSER"; } }; /** The default parser for double values, which are encoded by {@link Double#toString(double)} */ public static final DoubleParser DEFAULT_DOUBLE_PARSER = new DoubleParser() { public double parseDouble(String value) { return Double.parseDouble(value); } protected Object readResolve() { return DEFAULT_DOUBLE_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".DEFAULT_DOUBLE_PARSER"; } }; /** * A parser instance for int values encoded by {@link NumericUtils#intToPrefixCoded(int)}, e.g. when indexed * via {@link NumericField}/{@link NumericTokenStream}. */ public static final IntParser NUMERIC_UTILS_INT_PARSER=new IntParser(){ public int parseInt(String val) { final int shift = val.charAt(0)-NumericUtils.SHIFT_START_INT; if (shift>0 && shift<=31) throw new FieldCacheImpl.StopFillCacheException(); return NumericUtils.prefixCodedToInt(val); } protected Object readResolve() { return NUMERIC_UTILS_INT_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".NUMERIC_UTILS_INT_PARSER"; } }; /** * A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed * via {@link NumericField}/{@link NumericTokenStream}. */ public static final FloatParser NUMERIC_UTILS_FLOAT_PARSER=new FloatParser(){ public float parseFloat(String val) { final int shift = val.charAt(0)-NumericUtils.SHIFT_START_INT; if (shift>0 && shift<=31) throw new FieldCacheImpl.StopFillCacheException(); return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(val)); } protected Object readResolve() { return NUMERIC_UTILS_FLOAT_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER"; } }; /** * A parser instance for long values encoded by {@link NumericUtils#longToPrefixCoded(long)}, e.g. when indexed * via {@link NumericField}/{@link NumericTokenStream}. */ public static final LongParser NUMERIC_UTILS_LONG_PARSER = new LongParser(){ public long parseLong(String val) { final int shift = val.charAt(0)-NumericUtils.SHIFT_START_LONG; if (shift>0 && shift<=63) throw new FieldCacheImpl.StopFillCacheException(); return NumericUtils.prefixCodedToLong(val); } protected Object readResolve() { return NUMERIC_UTILS_LONG_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".NUMERIC_UTILS_LONG_PARSER"; } }; /** * A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed * via {@link NumericField}/{@link NumericTokenStream}. */ public static final DoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new DoubleParser(){ public double parseDouble(String val) { final int shift = val.charAt(0)-NumericUtils.SHIFT_START_LONG; if (shift>0 && shift<=63) throw new FieldCacheImpl.StopFillCacheException(); return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(val)); } protected Object readResolve() { return NUMERIC_UTILS_DOUBLE_PARSER; } @Override public String toString() { return FieldCache.class.getName()+".NUMERIC_UTILS_DOUBLE_PARSER"; } }; /** Checks the internal cache for an appropriate entry, and if none is * found, reads the terms in <code>field</code> as a single byte and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * @param reader Used to get field values. * @param field Which field contains the single byte values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public byte[] getBytes (IndexReader reader, String field) throws IOException; /** Checks the internal cache for an appropriate entry, and if none is found, * reads the terms in <code>field</code> as bytes and returns an array of * size <code>reader.maxDoc()</code> of the value each document has in the * given field. * @param reader Used to get field values. * @param field Which field contains the bytes. * @param parser Computes byte for string values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public byte[] getBytes (IndexReader reader, String field, ByteParser parser) throws IOException; /** Checks the internal cache for an appropriate entry, and if none is * found, reads the terms in <code>field</code> as shorts and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * @param reader Used to get field values. * @param field Which field contains the shorts. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public short[] getShorts (IndexReader reader, String field) throws IOException; /** Checks the internal cache for an appropriate entry, and if none is found, * reads the terms in <code>field</code> as shorts and returns an array of * size <code>reader.maxDoc()</code> of the value each document has in the * given field. * @param reader Used to get field values. * @param field Which field contains the shorts. * @param parser Computes short for string values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public short[] getShorts (IndexReader reader, String field, ShortParser parser) throws IOException; /** Checks the internal cache for an appropriate entry, and if none is * found, reads the terms in <code>field</code> as integers and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * @param reader Used to get field values. * @param field Which field contains the integers. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public int[] getInts (IndexReader reader, String field) throws IOException; /** Checks the internal cache for an appropriate entry, and if none is found, * reads the terms in <code>field</code> as integers and returns an array of * size <code>reader.maxDoc()</code> of the value each document has in the * given field. * @param reader Used to get field values. * @param field Which field contains the integers. * @param parser Computes integer for string values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public int[] getInts (IndexReader reader, String field, IntParser parser) throws IOException; /** Checks the internal cache for an appropriate entry, and if * none is found, reads the terms in <code>field</code> as floats and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * @param reader Used to get field values. * @param field Which field contains the floats. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public float[] getFloats (IndexReader reader, String field) throws IOException; /** Checks the internal cache for an appropriate entry, and if * none is found, reads the terms in <code>field</code> as floats and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * @param reader Used to get field values. * @param field Which field contains the floats. * @param parser Computes float for string values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public float[] getFloats (IndexReader reader, String field, FloatParser parser) throws IOException; /** * Checks the internal cache for an appropriate entry, and if none is * found, reads the terms in <code>field</code> as longs and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * * @param reader Used to get field values. * @param field Which field contains the longs. * @return The values in the given field for each document. * @throws java.io.IOException If any error occurs. */ public long[] getLongs(IndexReader reader, String field) throws IOException; /** * Checks the internal cache for an appropriate entry, and if none is found, * reads the terms in <code>field</code> as longs and returns an array of * size <code>reader.maxDoc()</code> of the value each document has in the * given field. * * @param reader Used to get field values. * @param field Which field contains the longs. * @param parser Computes integer for string values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public long[] getLongs(IndexReader reader, String field, LongParser parser) throws IOException; /** * Checks the internal cache for an appropriate entry, and if none is * found, reads the terms in <code>field</code> as integers and returns an array * of size <code>reader.maxDoc()</code> of the value each document * has in the given field. * * @param reader Used to get field values. * @param field Which field contains the doubles. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public double[] getDoubles(IndexReader reader, String field) throws IOException; /** * Checks the internal cache for an appropriate entry, and if none is found, * reads the terms in <code>field</code> as doubles and returns an array of * size <code>reader.maxDoc()</code> of the value each document has in the * given field. * * @param reader Used to get field values. * @param field Which field contains the doubles. * @param parser Computes integer for string values. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public double[] getDoubles(IndexReader reader, String field, DoubleParser parser) throws IOException; /** Checks the internal cache for an appropriate entry, and if none * is found, reads the term values in <code>field</code> and returns an array * of size <code>reader.maxDoc()</code> containing the value each document * has in the given field. * @param reader Used to get field values. * @param field Which field contains the strings. * @return The values in the given field for each document. * @throws IOException If any error occurs. */ public String[] getStrings (IndexReader reader, String field) throws IOException; /** Checks the internal cache for an appropriate entry, and if none * is found reads the term values in <code>field</code> and returns * an array of them in natural order, along with an array telling * which element in the term array each document uses. * @param reader Used to get field values. * @param field Which field contains the strings. * @return Array of terms and index into the array for each document. * @throws IOException If any error occurs. */ public StringIndex getStringIndex (IndexReader reader, String field) throws IOException; /** * EXPERT: A unique Identifier/Description for each item in the FieldCache. * Can be useful for logging/debugging. * <p> * <b>EXPERIMENTAL API:</b> This API is considered extremely advanced * and experimental. It may be removed or altered w/o warning in future * releases * of Lucene. * </p> */ public static abstract class CacheEntry { public abstract Object getReaderKey(); public abstract String getFieldName(); public abstract Class getCacheType(); public abstract Object getCustom(); public abstract Object getValue(); private String size = null; protected final void setEstimatedSize(String size) { this.size = size; } /** * @see #estimateSize(RamUsageEstimator) */ public void estimateSize() { estimateSize(new RamUsageEstimator(false)); // doesn't check for interned } /** * Computes (and stores) the estimated size of the cache Value * @see #getEstimatedSize */ public void estimateSize(RamUsageEstimator ramCalc) { long size = ramCalc.estimateRamUsage(getValue()); setEstimatedSize(RamUsageEstimator.humanReadableUnits (size, new DecimalFormat("0.#"))); } /** * The most recently estimated size of the value, null unless * estimateSize has been called. */ public final String getEstimatedSize() { return size; } @Override public String toString() { StringBuilder b = new StringBuilder(); b.append("'").append(getReaderKey()).append("'=>"); b.append("'").append(getFieldName()).append("',"); b.append(getCacheType()).append(",").append(getCustom()); b.append("=>").append(getValue().getClass().getName()).append("#"); b.append(System.identityHashCode(getValue())); String s = getEstimatedSize(); if(null != s) { b.append(" (size =~ ").append(s).append(')'); } return b.toString(); } } /** * EXPERT: Generates an array of CacheEntry objects representing all items * currently in the FieldCache. * <p> * NOTE: These CacheEntry objects maintain a strong reference to the * Cached Values. Maintaining references to a CacheEntry the IndexReader * associated with it has garbage collected will prevent the Value itself * from being garbage collected when the Cache drops the WeakRefrence. * </p> * <p> * <b>EXPERIMENTAL API:</b> This API is considered extremely advanced * and experimental. It may be removed or altered w/o warning in future * releases * of Lucene. * </p> */ public abstract CacheEntry[] getCacheEntries(); /** * <p> * EXPERT: Instructs the FieldCache to forcibly expunge all entries * from the underlying caches. This is intended only to be used for * test methods as a way to ensure a known base state of the Cache * (with out needing to rely on GC to free WeakReferences). * It should not be relied on for "Cache maintenance" in general * application code. * </p> * <p> * <b>EXPERIMENTAL API:</b> This API is considered extremely advanced * and experimental. It may be removed or altered w/o warning in future * releases * of Lucene. * </p> */ public abstract void purgeAllCaches(); /** * Expert: drops all cache entries associated with this * reader. NOTE: this reader must precisely match the * reader that the cache entry is keyed on. If you pass a * top-level reader, it usually will have no effect as * Lucene now caches at the segment reader level. */ public abstract void purge(IndexReader r); /** * If non-null, FieldCacheImpl will warn whenever * entries are created that are not sane according to * {@link org.apache.lucene.util.FieldCacheSanityChecker}. */ public void setInfoStream(PrintStream stream); /** counterpart of {@link #setInfoStream(PrintStream)} */ public PrintStream getInfoStream(); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FieldCache.java
Java
art
22,907
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.function.DocValues; import java.io.IOException; /** * Expert: obtains single byte field values from the * {@link org.apache.lucene.search.FieldCache FieldCache} * using <code>getBytes()</code> and makes those values * available as other numeric types, casting as needed. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * @see org.apache.lucene.search.function.FieldCacheSource for requirements * on the field. * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API. Alternatively, for * a short-term fix, you could wrap your ValueSource using * {@link MultiValueSource}, which costs more CPU per lookup * but will not consume double the FieldCache RAM.</p> */ public class ByteFieldSource extends FieldCacheSource { private FieldCache.ByteParser parser; /** * Create a cached byte field source with default string-to-byte parser. */ public ByteFieldSource(String field) { this(field, null); } /** * Create a cached byte field source with a specific string-to-byte parser. */ public ByteFieldSource(String field, FieldCache.ByteParser parser) { super(field); this.parser = parser; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return "byte(" + super.description() + ')'; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */ @Override public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException { final byte[] arr = cache.getBytes(reader, field, parser); return new DocValues() { /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */ @Override public float floatVal(int doc) { return (float) arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */ @Override public int intVal(int doc) { return arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */ @Override public String toString(int doc) { return description() + '=' + intVal(doc); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */ @Override Object getInnerArray() { return arr; } }; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */ @Override public boolean cachedFieldSourceEquals(FieldCacheSource o) { if (o.getClass() != ByteFieldSource.class) { return false; } ByteFieldSource other = (ByteFieldSource)o; return this.parser==null ? other.parser==null : this.parser.getClass() == other.parser.getClass(); } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */ @Override public int cachedFieldSourceHashCode() { return parser==null ? Byte.class.hashCode() : parser.getClass().hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/ByteFieldSource.java
Java
art
4,623
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.search.function; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import java.io.IOException; /** * Expert: obtains the ordinal of the field value from the default Lucene * {@link org.apache.lucene.search.FieldCache Fieldcache} using getStringIndex(). * <p> * The native lucene index order is used to assign an ordinal value for each field value. * <p * Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1. * <p> * Example: * <br>If there were only three field values: "apple","banana","pear" * <br>then ord("apple")=1, ord("banana")=2, ord("pear")=3 * <p> * WARNING: * ord() depends on the position in an index and can thus change * when other documents are inserted or deleted, * or if a MultiSearcher is used. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API.</p> */ public class OrdFieldSource extends ValueSource { protected String field; /** * Constructor for a certain field. * @param field field whose values order is used. */ public OrdFieldSource(String field) { this.field = field; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return "ord(" + field + ')'; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#getValues(org.apache.lucene.index.IndexReader) */ @Override public DocValues getValues(IndexReader reader) throws IOException { final int[] arr = FieldCache.DEFAULT.getStringIndex(reader, field).order; return new DocValues() { /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */ @Override public float floatVal(int doc) { return (float)arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#strVal(int) */ @Override public String strVal(int doc) { // the string value of the ordinal, not the string itself return Integer.toString(arr[doc]); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */ @Override public String toString(int doc) { return description() + '=' + intVal(doc); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */ @Override Object getInnerArray() { return arr; } }; } /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object o) { if (o.getClass() != OrdFieldSource.class) return false; OrdFieldSource other = (OrdFieldSource)o; return this.field.equals(other.field); } private static final int hcode = OrdFieldSource.class.hashCode(); /*(non-Javadoc) @see java.lang.Object#hashCode() */ @Override public int hashCode() { return hcode + field.hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/OrdFieldSource.java
Java
art
4,238
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermDocs; import org.apache.lucene.search.*; import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.Set; /** * Expert: A Query that sets the scores of document to the * values obtained from a {@link org.apache.lucene.search.function.ValueSource ValueSource}. * <p> * This query provides a score for <em>each and every</em> undeleted document in the index. * <p> * The value source can be based on a (cached) value of an indexed field, but it * can also be based on an external source, e.g. values read from an external database. * <p> * Score is set as: Score(doc,query) = query.getBoost()<sup>2</sup> * valueSource(doc). * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> */ public class ValueSourceQuery extends Query { ValueSource valSrc; /** * Create a value source query * @param valSrc provides the values defines the function to be used for scoring */ public ValueSourceQuery(ValueSource valSrc) { this.valSrc=valSrc; } /*(non-Javadoc) @see org.apache.lucene.search.Query#rewrite(org.apache.lucene.index.IndexReader) */ @Override public Query rewrite(IndexReader reader) throws IOException { return this; } /*(non-Javadoc) @see org.apache.lucene.search.Query#extractTerms(java.util.Set) */ @Override public void extractTerms(Set<Term> terms) { // no terms involved here } class ValueSourceWeight extends Weight { Similarity similarity; float queryNorm; float queryWeight; public ValueSourceWeight(Searcher searcher) { this.similarity = getSimilarity(searcher); } /*(non-Javadoc) @see org.apache.lucene.search.Weight#getQuery() */ @Override public Query getQuery() { return ValueSourceQuery.this; } /*(non-Javadoc) @see org.apache.lucene.search.Weight#getValue() */ @Override public float getValue() { return queryWeight; } /*(non-Javadoc) @see org.apache.lucene.search.Weight#sumOfSquaredWeights() */ @Override public float sumOfSquaredWeights() throws IOException { queryWeight = getBoost(); return queryWeight * queryWeight; } /*(non-Javadoc) @see org.apache.lucene.search.Weight#normalize(float) */ @Override public void normalize(float norm) { this.queryNorm = norm; queryWeight *= this.queryNorm; } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new ValueSourceScorer(similarity, reader, this); } /*(non-Javadoc) @see org.apache.lucene.search.Weight#explain(org.apache.lucene.index.IndexReader, int) */ @Override public Explanation explain(IndexReader reader, int doc) throws IOException { DocValues vals = valSrc.getValues(reader); float sc = queryWeight * vals.floatVal(doc); Explanation result = new ComplexExplanation( true, sc, ValueSourceQuery.this.toString() + ", product of:"); result.addDetail(vals.explain(doc)); result.addDetail(new Explanation(getBoost(), "boost")); result.addDetail(new Explanation(queryNorm,"queryNorm")); return result; } } /** * A scorer that (simply) matches all documents, and scores each document with * the value of the value source in effect. As an example, if the value source * is a (cached) field source, then value of that field in that document will * be used. (assuming field is indexed for this doc, with a single token.) */ private class ValueSourceScorer extends Scorer { private final ValueSourceWeight weight; private final float qWeight; private final DocValues vals; private final TermDocs termDocs; private int doc = -1; // constructor private ValueSourceScorer(Similarity similarity, IndexReader reader, ValueSourceWeight w) throws IOException { super(similarity); this.weight = w; this.qWeight = w.getValue(); // this is when/where the values are first created. vals = valSrc.getValues(reader); termDocs = reader.termDocs(null); } @Override public int nextDoc() throws IOException { return doc = termDocs.next() ? termDocs.doc() : NO_MORE_DOCS; } @Override public int docID() { return doc; } @Override public int advance(int target) throws IOException { return doc = termDocs.skipTo(target) ? termDocs.doc() : NO_MORE_DOCS; } /*(non-Javadoc) @see org.apache.lucene.search.Scorer#score() */ @Override public float score() throws IOException { return qWeight * vals.floatVal(termDocs.doc()); } } @Override public Weight createWeight(Searcher searcher) { return new ValueSourceQuery.ValueSourceWeight(searcher); } @Override public String toString(String field) { return valSrc.toString() + ToStringUtils.boost(getBoost()); } /** Returns true if <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (getClass() != o.getClass()) { return false; } ValueSourceQuery other = (ValueSourceQuery)o; return this.getBoost() == other.getBoost() && this.valSrc.equals(other.valSrc); } /** Returns a hash code value for this object. */ @Override public int hashCode() { return (getClass().hashCode() + valSrc.hashCode()) ^ Float.floatToIntBits(getBoost()); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/ValueSourceQuery.java
Java
art
6,547
<HTML> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <HEAD> <TITLE>org.apache.lucene.search.function</TITLE> </HEAD> <BODY> <DIV> Programmatic control over documents scores. </DIV> <DIV> The <code>function</code> package provides tight control over documents scores. </DIV> <DIV> <font color="#FF0000"> WARNING: The status of the <b>search.function</b> package is experimental. The APIs introduced here might change in the future and will not be supported anymore in such a case. </font> </DIV> <DIV> Two types of queries are available in this package: </DIV> <DIV> <ol> <li> <b>Custom Score queries</b> - allowing to set the score of a matching document as a mathematical expression over scores of that document by contained (sub) queries. </li> <li> <b>Field score queries</b> - allowing to base the score of a document on <b>numeric values</b> of <b>indexed fields</b>. </li> </ol> </DIV> <DIV>&nbsp;</DIV> <DIV> <b>Some possible uses of these queries:</b> </DIV> <DIV> <ol> <li> Normalizing the document scores by values indexed in a special field - for instance, experimenting with a different doc length normalization. </li> <li> Introducing some static scoring element, to the score of a document, - for instance using some topological attribute of the links to/from a document. </li> <li> Computing the score of a matching document as an arbitrary odd function of its score by a certain query. </li> </ol> </DIV> <DIV> <b>Performance and Quality Considerations:</b> </DIV> <DIV> <ol> <li> When scoring by values of indexed fields, these values are loaded into memory. Unlike the regular scoring, where the required information is read from disk as necessary, here field values are loaded once and cached by Lucene in memory for further use, anticipating reuse by further queries. While all this is carefully cached with performance in mind, it is recommended to use these features only when the default Lucene scoring does not match your "special" application needs. </li> <li> Use only with carefully selected fields, because in most cases, search quality with regular Lucene scoring would outperform that of scoring by field values. </li> <li> Values of fields used for scoring should match. Do not apply on a field containing arbitrary (long) text. Do not mix values in the same field if that field is used for scoring. </li> <li> Smaller (shorter) field tokens means less RAM (something always desired). When using <a href=FieldScoreQuery.html>FieldScoreQuery</a>, select the shortest <a href=FieldScoreQuery.html#Type>FieldScoreQuery.Type</a> that is sufficient for the used field values. </li> <li> Reusing IndexReaders/IndexSearchers is essential, because the caching of field tokens is based on an IndexReader. Whenever a new IndexReader is used, values currently in the cache cannot be used and new values must be loaded from disk. So replace/refresh readers/searchers in a controlled manner. </li> </ol> </DIV> <DIV> <b>History and Credits:</b> <ul> <li> A large part of the code of this package was originated from Yonik's FunctionQuery code that was imported from <a href="http://lucene.apache.org/solr">Solr</a> (see <a href="http://issues.apache.org/jira/browse/LUCENE-446">LUCENE-446</a>). </li> <li> The idea behind CustomScoreQurey is borrowed from the "Easily create queries that transform sub-query scores arbitrarily" contribution by Mike Klaas (see <a href="http://issues.apache.org/jira/browse/LUCENE-850">LUCENE-850</a>) though the implementation and API here are different. </li> </ul> </DIV> <DIV> <b>Code sample:</b> <P> Note: code snippets here should work, but they were never really compiled... so, tests sources under TestCustomScoreQuery, TestFieldScoreQuery and TestOrdValues may also be useful. <ol> <li> Using field (byte) values to as scores: <p> Indexing: <pre> f = new Field("score", "7", Field.Store.NO, Field.Index.UN_TOKENIZED); f.setOmitNorms(true); d1.add(f); </pre> <p> Search: <pre> Query q = new FieldScoreQuery("score", FieldScoreQuery.Type.BYTE); </pre> Document d1 above would get a score of 7. </li> <p> <li> Manipulating scores <p> Dividing the original score of each document by a square root of its docid (just to demonstrate what it takes to manipulate scores this way) <pre> Query q = queryParser.parse("my query text"); CustomScoreQuery customQ = new CustomScoreQuery(q) { public float customScore(int doc, float subQueryScore, float valSrcScore) { return subQueryScore / Math.sqrt(docid); } }; </pre> <p> For more informative debug info on the custom query, also override the name() method: <pre> CustomScoreQuery customQ = new CustomScoreQuery(q) { public float customScore(int doc, float subQueryScore, float valSrcScore) { return subQueryScore / Math.sqrt(docid); } public String name() { return "1/sqrt(docid)"; } }; </pre> <p> Taking the square root of the original score and multiplying it by a "short field driven score", ie, the short value that was indexed for the scored doc in a certain field: <pre> Query q = queryParser.parse("my query text"); FieldScoreQuery qf = new FieldScoreQuery("shortScore", FieldScoreQuery.Type.SHORT); CustomScoreQuery customQ = new CustomScoreQuery(q,qf) { public float customScore(int doc, float subQueryScore, float valSrcScore) { return Math.sqrt(subQueryScore) * valSrcScore; } public String name() { return "shortVal*sqrt(score)"; } }; </pre> </li> </ol> </DIV> </BODY> </HTML>
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/package.html
HTML
art
6,921
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.search.Explanation; /** * Expert: represents field values as different types. * Normally created via a * {@link org.apache.lucene.search.function.ValueSource ValueSuorce} * for a particular field and reader. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * */ public abstract class DocValues { /* * DocValues is distinct from ValueSource because * there needs to be an object created at query evaluation time that * is not referenced by the query itself because: * - Query objects should be MT safe * - For caching, Query objects are often used as keys... you don't * want the Query carrying around big objects */ /** * Return doc value as a float. * <P>Mandatory: every DocValues implementation must implement at least this method. * @param doc document whose float value is requested. */ public abstract float floatVal(int doc); /** * Return doc value as an int. * <P>Optional: DocValues implementation can (but don't have to) override this method. * @param doc document whose int value is requested. */ public int intVal(int doc) { return (int) floatVal(doc); } /** * Return doc value as a long. * <P>Optional: DocValues implementation can (but don't have to) override this method. * @param doc document whose long value is requested. */ public long longVal(int doc) { return (long) floatVal(doc); } /** * Return doc value as a double. * <P>Optional: DocValues implementation can (but don't have to) override this method. * @param doc document whose double value is requested. */ public double doubleVal(int doc) { return (double) floatVal(doc); } /** * Return doc value as a string. * <P>Optional: DocValues implementation can (but don't have to) override this method. * @param doc document whose string value is requested. */ public String strVal(int doc) { return Float.toString(floatVal(doc)); } /** * Return a string representation of a doc value, as required for Explanations. */ public abstract String toString(int doc); /** * Explain the scoring value for the input doc. */ public Explanation explain(int doc) { return new Explanation(floatVal(doc), toString(doc)); } /** * Expert: for test purposes only, return the inner array of values, or null if not applicable. * <p> * Allows tests to verify that loaded values are: * <ol> * <li>indeed cached/reused.</li> * <li>stored in the expected size/type (byte/short/int/float).</li> * </ol> * Note: implementations of DocValues must override this method for * these test elements to be tested, Otherwise the test would not fail, just * print a warning. */ Object getInnerArray() { throw new UnsupportedOperationException("this optional method is for test purposes only"); } // --- some simple statistics on values private float minVal = Float.NaN; private float maxVal = Float.NaN; private float avgVal = Float.NaN; private boolean computed=false; // compute optional values private void compute() { if (computed) { return; } float sum = 0; int n = 0; while (true) { float val; try { val = floatVal(n); } catch (ArrayIndexOutOfBoundsException e) { break; } sum += val; minVal = Float.isNaN(minVal) ? val : Math.min(minVal, val); maxVal = Float.isNaN(maxVal) ? val : Math.max(maxVal, val); ++n; } avgVal = n == 0 ? Float.NaN : sum / n; computed = true; } /** * Returns the minimum of all values or <code>Float.NaN</code> if this * DocValues instance does not contain any value. * <p> * This operation is optional * </p> * * @return the minimum of all values or <code>Float.NaN</code> if this * DocValues instance does not contain any value. */ public float getMinValue() { compute(); return minVal; } /** * Returns the maximum of all values or <code>Float.NaN</code> if this * DocValues instance does not contain any value. * <p> * This operation is optional * </p> * * @return the maximum of all values or <code>Float.NaN</code> if this * DocValues instance does not contain any value. */ public float getMaxValue() { compute(); return maxVal; } /** * Returns the average of all values or <code>Float.NaN</code> if this * DocValues instance does not contain any value. * * <p> * This operation is optional * </p> * * @return the average of all values or <code>Float.NaN</code> if this * DocValues instance does not contain any value */ public float getAverageValue() { compute(); return avgVal; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/DocValues.java
Java
art
5,833
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; /** * Expert: A base class for ValueSource implementations that retrieve values for * a single field from the {@link org.apache.lucene.search.FieldCache FieldCache}. * <p> * Fields used herein must be indexed (doesn't matter if these fields are stored or not). * <p> * It is assumed that each such indexed field is untokenized, or at least has a single token in a document. * For documents with multiple tokens of the same field, behavior is undefined (It is likely that current * code would use the value of one of these tokens, but this is not guaranteed). * <p> * Document with no tokens in this field are assigned the <code>Zero</code> value. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API.</p> */ public abstract class FieldCacheSource extends ValueSource { private String field; /** * Create a cached field source for the input field. */ public FieldCacheSource(String field) { this.field=field; } /* (non-Javadoc) @see org.apache.lucene.search.function.ValueSource#getValues(org.apache.lucene.index.IndexReader) */ @Override public final DocValues getValues(IndexReader reader) throws IOException { return getCachedFieldValues(FieldCache.DEFAULT, field, reader); } /* (non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return field; } /** * Return cached DocValues for input field and reader. * @param cache FieldCache so that values of a field are loaded once per reader (RAM allowing) * @param field Field for which values are required. * @see ValueSource */ public abstract DocValues getCachedFieldValues(FieldCache cache, String field, IndexReader reader) throws IOException; /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */ @Override public final boolean equals(Object o) { if (!(o instanceof FieldCacheSource)) { return false; } FieldCacheSource other = (FieldCacheSource) o; return this.field.equals(other.field) && cachedFieldSourceEquals(other); } /*(non-Javadoc) @see java.lang.Object#hashCode() */ @Override public final int hashCode() { return field.hashCode() + cachedFieldSourceHashCode(); } /** * Check if equals to another {@link FieldCacheSource}, already knowing that cache and field are equal. * @see Object#equals(java.lang.Object) */ public abstract boolean cachedFieldSourceEquals(FieldCacheSource other); /** * Return a hash code of a {@link FieldCacheSource}, without the hash-codes of the field * and the cache (those are taken care of elsewhere). * @see Object#hashCode() */ public abstract int cachedFieldSourceHashCode(); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/FieldCacheSource.java
Java
art
4,215
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.function.DocValues; import java.io.IOException; /** * Expert: obtains short field values from the * {@link org.apache.lucene.search.FieldCache FieldCache} * using <code>getShorts()</code> and makes those values * available as other numeric types, casting as needed. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * @see org.apache.lucene.search.function.FieldCacheSource for requirements * on the field. * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API. Alternatively, for * a short-term fix, you could wrap your ValueSource using * {@link MultiValueSource}, which costs more CPU per lookup * but will not consume double the FieldCache RAM.</p> */ public class ShortFieldSource extends FieldCacheSource { private FieldCache.ShortParser parser; /** * Create a cached short field source with default string-to-short parser. */ public ShortFieldSource(String field) { this(field, null); } /** * Create a cached short field source with a specific string-to-short parser. */ public ShortFieldSource(String field, FieldCache.ShortParser parser) { super(field); this.parser = parser; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return "short(" + super.description() + ')'; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */ @Override public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException { final short[] arr = cache.getShorts(reader, field, parser); return new DocValues() { /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */ @Override public float floatVal(int doc) { return (float) arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */ @Override public int intVal(int doc) { return arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */ @Override public String toString(int doc) { return description() + '=' + intVal(doc); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */ @Override Object getInnerArray() { return arr; } }; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */ @Override public boolean cachedFieldSourceEquals(FieldCacheSource o) { if (o.getClass() != ShortFieldSource.class) { return false; } ShortFieldSource other = (ShortFieldSource)o; return this.parser==null ? other.parser==null : this.parser.getClass() == other.parser.getClass(); } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */ @Override public int cachedFieldSourceHashCode() { return parser==null ? Short.class.hashCode() : parser.getClass().hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/ShortFieldSource.java
Java
art
4,632
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.function.DocValues; import java.io.IOException; /** * Expert: obtains int field values from the * {@link org.apache.lucene.search.FieldCache FieldCache} * using <code>getInts()</code> and makes those values * available as other numeric types, casting as needed. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * @see org.apache.lucene.search.function.FieldCacheSource for requirements * on the field. * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API. Alternatively, for * a short-term fix, you could wrap your ValueSource using * {@link MultiValueSource}, which costs more CPU per lookup * but will not consume double the FieldCache RAM.</p> */ public class IntFieldSource extends FieldCacheSource { private FieldCache.IntParser parser; /** * Create a cached int field source with default string-to-int parser. */ public IntFieldSource(String field) { this(field, null); } /** * Create a cached int field source with a specific string-to-int parser. */ public IntFieldSource(String field, FieldCache.IntParser parser) { super(field); this.parser = parser; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return "int(" + super.description() + ')'; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */ @Override public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException { final int[] arr = cache.getInts(reader, field, parser); return new DocValues() { /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */ @Override public float floatVal(int doc) { return (float) arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */ @Override public int intVal(int doc) { return arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */ @Override public String toString(int doc) { return description() + '=' + intVal(doc); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */ @Override Object getInnerArray() { return arr; } }; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */ @Override public boolean cachedFieldSourceEquals(FieldCacheSource o) { if (o.getClass() != IntFieldSource.class) { return false; } IntFieldSource other = (IntFieldSource)o; return this.parser==null ? other.parser==null : this.parser.getClass() == other.parser.getClass(); } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */ @Override public int cachedFieldSourceHashCode() { return parser==null ? Integer.class.hashCode() : parser.getClass().hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/IntFieldSource.java
Java
art
4,601
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.function.DocValues; import java.io.IOException; /** * Expert: obtains float field values from the * {@link org.apache.lucene.search.FieldCache FieldCache} * using <code>getFloats()</code> and makes those values * available as other numeric types, casting as needed. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * @see org.apache.lucene.search.function.FieldCacheSource for requirements * on the field. * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API. Alternatively, for * a short-term fix, you could wrap your ValueSource using * {@link MultiValueSource}, which costs more CPU per lookup * but will not consume double the FieldCache RAM.</p> */ public class FloatFieldSource extends FieldCacheSource { private FieldCache.FloatParser parser; /** * Create a cached float field source with default string-to-float parser. */ public FloatFieldSource(String field) { this(field, null); } /** * Create a cached float field source with a specific string-to-float parser. */ public FloatFieldSource(String field, FieldCache.FloatParser parser) { super(field); this.parser = parser; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return "float(" + super.description() + ')'; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#getCachedValues(org.apache.lucene.search.FieldCache, java.lang.String, org.apache.lucene.index.IndexReader) */ @Override public DocValues getCachedFieldValues (FieldCache cache, String field, IndexReader reader) throws IOException { final float[] arr = cache.getFloats(reader, field, parser); return new DocValues() { /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */ @Override public float floatVal(int doc) { return arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */ @Override public String toString(int doc) { return description() + '=' + arr[doc]; } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */ @Override Object getInnerArray() { return arr; } }; } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceEquals(org.apache.lucene.search.function.FieldCacheSource) */ @Override public boolean cachedFieldSourceEquals(FieldCacheSource o) { if (o.getClass() != FloatFieldSource.class) { return false; } FloatFieldSource other = (FloatFieldSource)o; return this.parser==null ? other.parser==null : this.parser.getClass() == other.parser.getClass(); } /*(non-Javadoc) @see org.apache.lucene.search.function.FieldCacheSource#cachedFieldSourceHashCode() */ @Override public int cachedFieldSourceHashCode() { return parser==null ? Float.class.hashCode() : parser.getClass().hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/FloatFieldSource.java
Java
art
4,451
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Set; import java.util.Arrays; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.Weight; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Similarity; import org.apache.lucene.util.ToStringUtils; /** * Query that sets document score as a programmatic function of several (sub) scores: * <ol> * <li>the score of its subQuery (any query)</li> * <li>(optional) the score of its ValueSourceQuery (or queries). * For most simple/convenient use cases this query is likely to be a * {@link org.apache.lucene.search.function.FieldScoreQuery FieldScoreQuery}</li> * </ol> * Subclasses can modify the computation by overriding {@link #getCustomScoreProvider}. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> */ public class CustomScoreQuery extends Query { private Query subQuery; private ValueSourceQuery[] valSrcQueries; // never null (empty array if there are no valSrcQueries). private boolean strict = false; // if true, valueSource part of query does not take part in weights normalization. /** * Create a CustomScoreQuery over input subQuery. * @param subQuery the sub query whose scored is being customed. Must not be null. */ public CustomScoreQuery(Query subQuery) { this(subQuery, new ValueSourceQuery[0]); } /** * Create a CustomScoreQuery over input subQuery and a {@link ValueSourceQuery}. * @param subQuery the sub query whose score is being customized. Must not be null. * @param valSrcQuery a value source query whose scores are used in the custom score * computation. For most simple/convenient use case this would be a * {@link org.apache.lucene.search.function.FieldScoreQuery FieldScoreQuery}. * This parameter is optional - it can be null. */ public CustomScoreQuery(Query subQuery, ValueSourceQuery valSrcQuery) { this(subQuery, valSrcQuery!=null ? // don't want an array that contains a single null.. new ValueSourceQuery[] {valSrcQuery} : new ValueSourceQuery[0]); } /** * Create a CustomScoreQuery over input subQuery and a {@link ValueSourceQuery}. * @param subQuery the sub query whose score is being customized. Must not be null. * @param valSrcQueries value source queries whose scores are used in the custom score * computation. For most simple/convenient use case these would be * {@link org.apache.lucene.search.function.FieldScoreQuery FieldScoreQueries}. * This parameter is optional - it can be null or even an empty array. */ public CustomScoreQuery(Query subQuery, ValueSourceQuery... valSrcQueries) { this.subQuery = subQuery; this.valSrcQueries = valSrcQueries!=null? valSrcQueries : new ValueSourceQuery[0]; if (subQuery == null) throw new IllegalArgumentException("<subquery> must not be null!"); } /*(non-Javadoc) @see org.apache.lucene.search.Query#rewrite(org.apache.lucene.index.IndexReader) */ @Override public Query rewrite(IndexReader reader) throws IOException { CustomScoreQuery clone = null; final Query sq = subQuery.rewrite(reader); if (sq != subQuery) { clone = (CustomScoreQuery) clone(); clone.subQuery = sq; } for(int i = 0; i < valSrcQueries.length; i++) { final ValueSourceQuery v = (ValueSourceQuery) valSrcQueries[i].rewrite(reader); if (v != valSrcQueries[i]) { if (clone == null) clone = (CustomScoreQuery) clone(); clone.valSrcQueries[i] = v; } } return (clone == null) ? this : clone; } /*(non-Javadoc) @see org.apache.lucene.search.Query#extractTerms(java.util.Set) */ @Override public void extractTerms(Set<Term> terms) { subQuery.extractTerms(terms); for(int i = 0; i < valSrcQueries.length; i++) { valSrcQueries[i].extractTerms(terms); } } /*(non-Javadoc) @see org.apache.lucene.search.Query#clone() */ @Override public Object clone() { CustomScoreQuery clone = (CustomScoreQuery)super.clone(); clone.subQuery = (Query) subQuery.clone(); clone.valSrcQueries = new ValueSourceQuery[valSrcQueries.length]; for(int i = 0; i < valSrcQueries.length; i++) { clone.valSrcQueries[i] = (ValueSourceQuery) valSrcQueries[i].clone(); } return clone; } /* (non-Javadoc) @see org.apache.lucene.search.Query#toString(java.lang.String) */ @Override public String toString(String field) { StringBuilder sb = new StringBuilder(name()).append("("); sb.append(subQuery.toString(field)); for(int i = 0; i < valSrcQueries.length; i++) { sb.append(", ").append(valSrcQueries[i].toString(field)); } sb.append(")"); sb.append(strict?" STRICT" : ""); return sb.toString() + ToStringUtils.boost(getBoost()); } /** Returns true if <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (getClass() != o.getClass()) { return false; } CustomScoreQuery other = (CustomScoreQuery)o; if (this.getBoost() != other.getBoost() || !this.subQuery.equals(other.subQuery) || this.strict != other.strict || this.valSrcQueries.length != other.valSrcQueries.length) { return false; } return Arrays.equals(valSrcQueries, other.valSrcQueries); } /** Returns a hash code value for this object. */ @Override public int hashCode() { return (getClass().hashCode() + subQuery.hashCode() + Arrays.hashCode(valSrcQueries)) ^ Float.floatToIntBits(getBoost()) ^ (strict ? 1234 : 4321); } /** * Returns a {@link CustomScoreProvider} that calculates the custom scores * for the given {@link IndexReader}. The default implementation returns a default * implementation as specified in the docs of {@link CustomScoreProvider}. * @since 2.9.2 */ protected CustomScoreProvider getCustomScoreProvider(IndexReader reader) throws IOException { // when deprecated methods are removed, do not extend class here, just return new default CustomScoreProvider return new CustomScoreProvider(reader) { @Override public float customScore(int doc, float subQueryScore, float valSrcScores[]) throws IOException { return CustomScoreQuery.this.customScore(doc, subQueryScore, valSrcScores); } @Override public float customScore(int doc, float subQueryScore, float valSrcScore) throws IOException { return CustomScoreQuery.this.customScore(doc, subQueryScore, valSrcScore); } @Override public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpls[]) throws IOException { return CustomScoreQuery.this.customExplain(doc, subQueryExpl, valSrcExpls); } @Override public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl) throws IOException { return CustomScoreQuery.this.customExplain(doc, subQueryExpl, valSrcExpl); } }; } /** * Compute a custom score by the subQuery score and a number of * ValueSourceQuery scores. * @deprecated Will be removed in Lucene 3.1. * The doc is relative to the current reader, which is * unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9). * Please override {@link #getCustomScoreProvider} and return a subclass * of {@link CustomScoreProvider} for the given {@link IndexReader}. * @see CustomScoreProvider#customScore(int,float,float[]) */ @Deprecated public float customScore(int doc, float subQueryScore, float valSrcScores[]) { if (valSrcScores.length == 1) { return customScore(doc, subQueryScore, valSrcScores[0]); } if (valSrcScores.length == 0) { return customScore(doc, subQueryScore, 1); } float score = subQueryScore; for(int i = 0; i < valSrcScores.length; i++) { score *= valSrcScores[i]; } return score; } /** * Compute a custom score by the subQuery score and the ValueSourceQuery score. * @deprecated Will be removed in Lucene 3.1. * The doc is relative to the current reader, which is * unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9). * Please override {@link #getCustomScoreProvider} and return a subclass * of {@link CustomScoreProvider} for the given {@link IndexReader}. * @see CustomScoreProvider#customScore(int,float,float) */ @Deprecated public float customScore(int doc, float subQueryScore, float valSrcScore) { return subQueryScore * valSrcScore; } /** * Explain the custom score. * @deprecated Will be removed in Lucene 3.1. * The doc is relative to the current reader, which is * unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9). * Please override {@link #getCustomScoreProvider} and return a subclass * of {@link CustomScoreProvider} for the given {@link IndexReader}. * @see CustomScoreProvider#customExplain(int,Explanation,Explanation[]) */ @Deprecated public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpls[]) { if (valSrcExpls.length == 1) { return customExplain(doc, subQueryExpl, valSrcExpls[0]); } if (valSrcExpls.length == 0) { return subQueryExpl; } float valSrcScore = 1; for (int i = 0; i < valSrcExpls.length; i++) { valSrcScore *= valSrcExpls[i].getValue(); } Explanation exp = new Explanation( valSrcScore * subQueryExpl.getValue(), "custom score: product of:"); exp.addDetail(subQueryExpl); for (int i = 0; i < valSrcExpls.length; i++) { exp.addDetail(valSrcExpls[i]); } return exp; } /** * Explain the custom score. * @deprecated Will be removed in Lucene 3.1. * The doc is relative to the current reader, which is * unknown to CustomScoreQuery when using per-segment search (since Lucene 2.9). * Please override {@link #getCustomScoreProvider} and return a subclass * of {@link CustomScoreProvider} for the given {@link IndexReader}. * @see CustomScoreProvider#customExplain(int,Explanation,Explanation[]) */ @Deprecated public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl) { float valSrcScore = 1; if (valSrcExpl != null) { valSrcScore *= valSrcExpl.getValue(); } Explanation exp = new Explanation( valSrcScore * subQueryExpl.getValue(), "custom score: product of:"); exp.addDetail(subQueryExpl); exp.addDetail(valSrcExpl); return exp; } //=========================== W E I G H T ============================ private class CustomWeight extends Weight { Similarity similarity; Weight subQueryWeight; Weight[] valSrcWeights; boolean qStrict; public CustomWeight(Searcher searcher) throws IOException { this.similarity = getSimilarity(searcher); this.subQueryWeight = subQuery.weight(searcher); this.valSrcWeights = new Weight[valSrcQueries.length]; for(int i = 0; i < valSrcQueries.length; i++) { this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher); } this.qStrict = strict; } /*(non-Javadoc) @see org.apache.lucene.search.Weight#getQuery() */ @Override public Query getQuery() { return CustomScoreQuery.this; } /*(non-Javadoc) @see org.apache.lucene.search.Weight#getValue() */ @Override public float getValue() { return getBoost(); } /*(non-Javadoc) @see org.apache.lucene.search.Weight#sumOfSquaredWeights() */ @Override public float sumOfSquaredWeights() throws IOException { float sum = subQueryWeight.sumOfSquaredWeights(); for(int i = 0; i < valSrcWeights.length; i++) { if (qStrict) { valSrcWeights[i].sumOfSquaredWeights(); // do not include ValueSource part in the query normalization } else { sum += valSrcWeights[i].sumOfSquaredWeights(); } } sum *= getBoost() * getBoost(); // boost each sub-weight return sum ; } /*(non-Javadoc) @see org.apache.lucene.search.Weight#normalize(float) */ @Override public void normalize(float norm) { norm *= getBoost(); // incorporate boost subQueryWeight.normalize(norm); for(int i = 0; i < valSrcWeights.length; i++) { if (qStrict) { valSrcWeights[i].normalize(1); // do not normalize the ValueSource part } else { valSrcWeights[i].normalize(norm); } } } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { // Pass true for "scoresDocsInOrder", because we // require in-order scoring, even if caller does not, // since we call advance on the valSrcScorers. Pass // false for "topScorer" because we will not invoke // score(Collector) on these scorers: Scorer subQueryScorer = subQueryWeight.scorer(reader, true, false); if (subQueryScorer == null) { return null; } Scorer[] valSrcScorers = new Scorer[valSrcWeights.length]; for(int i = 0; i < valSrcScorers.length; i++) { valSrcScorers[i] = valSrcWeights[i].scorer(reader, true, topScorer); } return new CustomScorer(similarity, reader, this, subQueryScorer, valSrcScorers); } @Override public Explanation explain(IndexReader reader, int doc) throws IOException { Explanation explain = doExplain(reader, doc); return explain == null ? new Explanation(0.0f, "no matching docs") : explain; } private Explanation doExplain(IndexReader reader, int doc) throws IOException { Explanation subQueryExpl = subQueryWeight.explain(reader, doc); if (!subQueryExpl.isMatch()) { return subQueryExpl; } // match Explanation[] valSrcExpls = new Explanation[valSrcWeights.length]; for(int i = 0; i < valSrcWeights.length; i++) { valSrcExpls[i] = valSrcWeights[i].explain(reader, doc); } Explanation customExp = CustomScoreQuery.this.getCustomScoreProvider(reader).customExplain(doc,subQueryExpl,valSrcExpls); float sc = getValue() * customExp.getValue(); Explanation res = new ComplexExplanation( true, sc, CustomScoreQuery.this.toString() + ", product of:"); res.addDetail(customExp); res.addDetail(new Explanation(getValue(), "queryBoost")); // actually using the q boost as q weight (== weight value) return res; } @Override public boolean scoresDocsOutOfOrder() { return false; } } //=========================== S C O R E R ============================ /** * A scorer that applies a (callback) function on scores of the subQuery. */ private class CustomScorer extends Scorer { private final float qWeight; private Scorer subQueryScorer; private Scorer[] valSrcScorers; private IndexReader reader; private final CustomScoreProvider provider; private float vScores[]; // reused in score() to avoid allocating this array for each doc // constructor private CustomScorer(Similarity similarity, IndexReader reader, CustomWeight w, Scorer subQueryScorer, Scorer[] valSrcScorers) throws IOException { super(similarity); this.qWeight = w.getValue(); this.subQueryScorer = subQueryScorer; this.valSrcScorers = valSrcScorers; this.reader = reader; this.vScores = new float[valSrcScorers.length]; this.provider = CustomScoreQuery.this.getCustomScoreProvider(reader); } @Override public int nextDoc() throws IOException { int doc = subQueryScorer.nextDoc(); if (doc != NO_MORE_DOCS) { for (int i = 0; i < valSrcScorers.length; i++) { valSrcScorers[i].advance(doc); } } return doc; } @Override public int docID() { return subQueryScorer.docID(); } /*(non-Javadoc) @see org.apache.lucene.search.Scorer#score() */ @Override public float score() throws IOException { for (int i = 0; i < valSrcScorers.length; i++) { vScores[i] = valSrcScorers[i].score(); } return qWeight * provider.customScore(subQueryScorer.docID(), subQueryScorer.score(), vScores); } @Override public int advance(int target) throws IOException { int doc = subQueryScorer.advance(target); if (doc != NO_MORE_DOCS) { for (int i = 0; i < valSrcScorers.length; i++) { valSrcScorers[i].advance(doc); } } return doc; } } @Override public Weight createWeight(Searcher searcher) throws IOException { return new CustomWeight(searcher); } /** * Checks if this is strict custom scoring. * In strict custom scoring, the ValueSource part does not participate in weight normalization. * This may be useful when one wants full control over how scores are modified, and does * not care about normalizing by the ValueSource part. * One particular case where this is useful if for testing this query. * <P> * Note: only has effect when the ValueSource part is not null. */ public boolean isStrict() { return strict; } /** * Set the strict mode of this query. * @param strict The strict mode to set. * @see #isStrict() */ public void setStrict(boolean strict) { this.strict = strict; } /** * A short name of this query, used in {@link #toString(String)}. */ public String name() { return "custom"; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
Java
art
18,821
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.search.function; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import java.io.IOException; /** * Expert: obtains the ordinal of the field value from the default Lucene * {@link org.apache.lucene.search.FieldCache FieldCache} using getStringIndex() * and reverses the order. * <p> * The native lucene index order is used to assign an ordinal value for each field value. * <p> * Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1. * <br> * Example of reverse ordinal (rord): * <br>If there were only three field values: "apple","banana","pear" * <br>then rord("apple")=3, rord("banana")=2, ord("pear")=1 * <p> * WARNING: * rord() depends on the position in an index and can thus change * when other documents are inserted or deleted, * or if a MultiSearcher is used. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * <p><b>NOTE</b>: with the switch in 2.9 to segment-based * searching, if {@link #getValues} is invoked with a * composite (multi-segment) reader, this can easily cause * double RAM usage for the values in the FieldCache. It's * best to switch your application to pass only atomic * (single segment) readers to this API.</p> */ public class ReverseOrdFieldSource extends ValueSource { public String field; /** * Contructor for a certain field. * @param field field whose values reverse order is used. */ public ReverseOrdFieldSource(String field) { this.field = field; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#description() */ @Override public String description() { return "rord("+field+')'; } /*(non-Javadoc) @see org.apache.lucene.search.function.ValueSource#getValues(org.apache.lucene.index.IndexReader) */ @Override public DocValues getValues(IndexReader reader) throws IOException { final FieldCache.StringIndex sindex = FieldCache.DEFAULT.getStringIndex(reader, field); final int arr[] = sindex.order; final int end = sindex.lookup.length; return new DocValues() { /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#floatVal(int) */ @Override public float floatVal(int doc) { return (float)(end - arr[doc]); } /* (non-Javadoc) @see org.apache.lucene.search.function.DocValues#intVal(int) */ @Override public int intVal(int doc) { return end - arr[doc]; } /* (non-Javadoc) @see org.apache.lucene.search.function.DocValues#strVal(int) */ @Override public String strVal(int doc) { // the string value of the ordinal, not the string itself return Integer.toString(intVal(doc)); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#toString(int) */ @Override public String toString(int doc) { return description() + '=' + strVal(doc); } /*(non-Javadoc) @see org.apache.lucene.search.function.DocValues#getInnerArray() */ @Override Object getInnerArray() { return arr; } }; } /*(non-Javadoc) @see java.lang.Object#equals(java.lang.Object) */ @Override public boolean equals(Object o) { if (o.getClass() != ReverseOrdFieldSource.class) return false; ReverseOrdFieldSource other = (ReverseOrdFieldSource)o; return this.field.equals(other.field); } private static final int hcode = ReverseOrdFieldSource.class.hashCode(); /*(non-Javadoc) @see java.lang.Object#hashCode() */ @Override public int hashCode() { return hcode + field.hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/ReverseOrdFieldSource.java
Java
art
4,623
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * A query that scores each document as the value of the numeric input field. * <p> * The query matches all documents, and scores each document according to the numeric * value of that field. * <p> * It is assumed, and expected, that: * <ul> * <li>The field used here is indexed, and has exactly * one token in every scored document.</li> * <li>Best if this field is un_tokenized.</li> * <li>That token is parseable to the selected type.</li> * </ul> * <p> * Combining this query in a FunctionQuery allows much freedom in affecting document scores. * Note, that with this freedom comes responsibility: it is more than likely that the * default Lucene scoring is superior in quality to scoring modified as explained here. * However, in some cases, and certainly for research experiments, this capability may turn useful. * <p> * When constructing this query, select the appropriate type. That type should match the data stored in the * field. So in fact the "right" type should be selected before indexing. Type selection * has effect on the RAM usage: * <ul> * <li>{@link Type#BYTE} consumes 1 * maxDocs bytes.</li> * <li>{@link Type#SHORT} consumes 2 * maxDocs bytes.</li> * <li>{@link Type#INT} consumes 4 * maxDocs bytes.</li> * <li>{@link Type#FLOAT} consumes 8 * maxDocs bytes.</li> * </ul> * <p> * <b>Caching:</b> * Values for the numeric field are loaded once and cached in memory for further use with the same IndexReader. * To take advantage of this, it is extremely important to reuse index-readers or index-searchers, * otherwise, for instance if for each query a new index reader is opened, large penalties would be * paid for loading the field values into memory over and over again! * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> */ public class FieldScoreQuery extends ValueSourceQuery { /** * Type of score field, indicating how field values are interpreted/parsed. * <p> * The type selected at search search time should match the data stored in the field. * Different types have different RAM requirements: * <ul> * <li>{@link #BYTE} consumes 1 * maxDocs bytes.</li> * <li>{@link #SHORT} consumes 2 * maxDocs bytes.</li> * <li>{@link #INT} consumes 4 * maxDocs bytes.</li> * <li>{@link #FLOAT} consumes 8 * maxDocs bytes.</li> * </ul> */ public static class Type { /** field values are interpreted as numeric byte values. */ public static final Type BYTE = new Type("byte"); /** field values are interpreted as numeric short values. */ public static final Type SHORT = new Type("short"); /** field values are interpreted as numeric int values. */ public static final Type INT = new Type("int"); /** field values are interpreted as numeric float values. */ public static final Type FLOAT = new Type("float"); private String typeName; private Type (String name) { this.typeName = name; } /*(non-Javadoc) @see java.lang.Object#toString() */ @Override public String toString() { return getClass().getName()+"::"+typeName; } } /** * Create a FieldScoreQuery - a query that scores each document as the value of the numeric input field. * <p> * The <code>type</code> param tells how to parse the field string values into a numeric score value. * @param field the numeric field to be used. * @param type the type of the field: either * {@link Type#BYTE}, {@link Type#SHORT}, {@link Type#INT}, or {@link Type#FLOAT}. */ public FieldScoreQuery(String field, Type type) { super(getValueSource(field,type)); } // create the appropriate (cached) field value source. private static ValueSource getValueSource(String field, Type type) { if (type == Type.BYTE) { return new ByteFieldSource(field); } if (type == Type.SHORT) { return new ShortFieldSource(field); } if (type == Type.INT) { return new IntFieldSource(field); } if (type == Type.FLOAT) { return new FloatFieldSource(field); } throw new IllegalArgumentException(type+" is not a known Field Score Query Type!"); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/FieldScoreQuery.java
Java
art
5,212
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.function.DocValues; import java.io.IOException; import java.io.Serializable; /** * Expert: source of values for basic function queries. * <P>At its default/simplest form, values - one per doc - are used as the score of that doc. * <P>Values are instantiated as * {@link org.apache.lucene.search.function.DocValues DocValues} for a particular reader. * <P>ValueSource implementations differ in RAM requirements: it would always be a factor * of the number of documents, but for each document the number of bytes can be 1, 2, 4, or 8. * * <p><font color="#FF0000"> * WARNING: The status of the <b>search.function</b> package is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * */ public abstract class ValueSource implements Serializable { /** * Return the DocValues used by the function query. * @param reader the IndexReader used to read these values. * If any caching is involved, that caching would also be IndexReader based. * @throws IOException for any error. */ public abstract DocValues getValues(IndexReader reader) throws IOException; /** * description of field, used in explain() */ public abstract String description(); /* (non-Javadoc) @see java.lang.Object#toString() */ @Override public String toString() { return description(); } /** * Needed for possible caching of query results - used by {@link ValueSourceQuery#equals(Object)}. * @see Object#equals(Object) */ @Override public abstract boolean equals(Object o); /** * Needed for possible caching of query results - used by {@link ValueSourceQuery#hashCode()}. * @see Object#hashCode() */ @Override public abstract int hashCode(); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/ValueSource.java
Java
art
2,710
package org.apache.lucene.search.function; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldCache; // for javadocs /** * An instance of this subclass should be returned by * {@link CustomScoreQuery#getCustomScoreProvider}, if you want * to modify the custom score calculation of a {@link CustomScoreQuery}. * <p>Since Lucene 2.9, queries operate on each segment of an Index separately, * so overriding the similar (now deprecated) methods in {@link CustomScoreQuery} * is no longer suitable, as the supplied <code>doc</code> ID is per-segment * and without knowledge of the IndexReader you cannot access the * document or {@link FieldCache}. * * @lucene.experimental * @since 2.9.2 */ public class CustomScoreProvider { protected final IndexReader reader; /** * Creates a new instance of the provider class for the given {@link IndexReader}. */ public CustomScoreProvider(IndexReader reader) { this.reader = reader; } /** * Compute a custom score by the subQuery score and a number of * {@link ValueSourceQuery} scores. * <p> * Subclasses can override this method to modify the custom score. * <p> * If your custom scoring is different than the default herein you * should override at least one of the two customScore() methods. * If the number of ValueSourceQueries is always &lt; 2 it is * sufficient to override the other * {@link #customScore(int, float, float) customScore()} * method, which is simpler. * <p> * The default computation herein is a multiplication of given scores: * <pre> * ModifiedScore = valSrcScore * valSrcScores[0] * valSrcScores[1] * ... * </pre> * * @param doc id of scored doc. * @param subQueryScore score of that doc by the subQuery. * @param valSrcScores scores of that doc by the ValueSourceQuery. * @return custom score. */ public float customScore(int doc, float subQueryScore, float valSrcScores[]) throws IOException { if (valSrcScores.length == 1) { return customScore(doc, subQueryScore, valSrcScores[0]); } if (valSrcScores.length == 0) { return customScore(doc, subQueryScore, 1); } float score = subQueryScore; for(int i = 0; i < valSrcScores.length; i++) { score *= valSrcScores[i]; } return score; } /** * Compute a custom score by the subQuery score and the ValueSourceQuery score. * <p> * Subclasses can override this method to modify the custom score. * <p> * If your custom scoring is different than the default herein you * should override at least one of the two customScore() methods. * If the number of ValueSourceQueries is always &lt; 2 it is * sufficient to override this customScore() method, which is simpler. * <p> * The default computation herein is a multiplication of the two scores: * <pre> * ModifiedScore = subQueryScore * valSrcScore * </pre> * * @param doc id of scored doc. * @param subQueryScore score of that doc by the subQuery. * @param valSrcScore score of that doc by the ValueSourceQuery. * @return custom score. */ public float customScore(int doc, float subQueryScore, float valSrcScore) throws IOException { return subQueryScore * valSrcScore; } /** * Explain the custom score. * Whenever overriding {@link #customScore(int, float, float[])}, * this method should also be overridden to provide the correct explanation * for the part of the custom scoring. * * @param doc doc being explained. * @param subQueryExpl explanation for the sub-query part. * @param valSrcExpls explanation for the value source part. * @return an explanation for the custom score */ public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpls[]) throws IOException { if (valSrcExpls.length == 1) { return customExplain(doc, subQueryExpl, valSrcExpls[0]); } if (valSrcExpls.length == 0) { return subQueryExpl; } float valSrcScore = 1; for (int i = 0; i < valSrcExpls.length; i++) { valSrcScore *= valSrcExpls[i].getValue(); } Explanation exp = new Explanation( valSrcScore * subQueryExpl.getValue(), "custom score: product of:"); exp.addDetail(subQueryExpl); for (int i = 0; i < valSrcExpls.length; i++) { exp.addDetail(valSrcExpls[i]); } return exp; } /** * Explain the custom score. * Whenever overriding {@link #customScore(int, float, float)}, * this method should also be overridden to provide the correct explanation * for the part of the custom scoring. * * @param doc doc being explained. * @param subQueryExpl explanation for the sub-query part. * @param valSrcExpl explanation for the value source part. * @return an explanation for the custom score */ public Explanation customExplain(int doc, Explanation subQueryExpl, Explanation valSrcExpl) throws IOException { float valSrcScore = 1; if (valSrcExpl != null) { valSrcScore *= valSrcExpl.getValue(); } Explanation exp = new Explanation( valSrcScore * subQueryExpl.getValue(), "custom score: product of:"); exp.addDetail(subQueryExpl); exp.addDetail(valSrcExpl); return exp; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/function/CustomScoreProvider.java
Java
art
6,226
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.search.BooleanClause.Occur; import java.io.IOException; import java.util.*; /** A Query that matches documents matching boolean combinations of other * queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other * BooleanQuerys. */ public class BooleanQuery extends Query implements Iterable<BooleanClause> { private static int maxClauseCount = 1024; /** Thrown when an attempt is made to add more than {@link * #getMaxClauseCount()} clauses. This typically happens if * a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery * is expanded to many terms during search. */ public static class TooManyClauses extends RuntimeException { public TooManyClauses() {} @Override public String getMessage() { return "maxClauseCount is set to " + maxClauseCount; } } /** Return the maximum number of clauses permitted, 1024 by default. * Attempts to add more than the permitted number of clauses cause {@link * TooManyClauses} to be thrown. * @see #setMaxClauseCount(int) */ public static int getMaxClauseCount() { return maxClauseCount; } /** * Set the maximum number of clauses permitted per BooleanQuery. * Default value is 1024. */ public static void setMaxClauseCount(int maxClauseCount) { if (maxClauseCount < 1) throw new IllegalArgumentException("maxClauseCount must be >= 1"); BooleanQuery.maxClauseCount = maxClauseCount; } private ArrayList<BooleanClause> clauses = new ArrayList<BooleanClause>(); private boolean disableCoord; /** Constructs an empty boolean query. */ public BooleanQuery() {} /** Constructs an empty boolean query. * * {@link Similarity#coord(int,int)} may be disabled in scoring, as * appropriate. For example, this score factor does not make sense for most * automatically generated queries, like {@link WildcardQuery} and {@link * FuzzyQuery}. * * @param disableCoord disables {@link Similarity#coord(int,int)} in scoring. */ public BooleanQuery(boolean disableCoord) { this.disableCoord = disableCoord; } /** Returns true iff {@link Similarity#coord(int,int)} is disabled in * scoring for this query instance. * @see #BooleanQuery(boolean) */ public boolean isCoordDisabled() { return disableCoord; } // Implement coord disabling. // Inherit javadoc. @Override public Similarity getSimilarity(Searcher searcher) { Similarity result = super.getSimilarity(searcher); if (disableCoord) { // disable coord as requested result = new SimilarityDelegator(result) { @Override public float coord(int overlap, int maxOverlap) { return 1.0f; } }; } return result; } /** * Specifies a minimum number of the optional BooleanClauses * which must be satisfied. * * <p> * By default no optional clauses are necessary for a match * (unless there are no required clauses). If this method is used, * then the specified number of clauses is required. * </p> * <p> * Use of this method is totally independent of specifying that * any specific clauses are required (or prohibited). This number will * only be compared against the number of matching optional clauses. * </p> * * @param min the number of optional clauses that must match */ public void setMinimumNumberShouldMatch(int min) { this.minNrShouldMatch = min; } protected int minNrShouldMatch = 0; /** * Gets the minimum number of the optional BooleanClauses * which must be satisfied. */ public int getMinimumNumberShouldMatch() { return minNrShouldMatch; } /** Adds a clause to a boolean query. * * @throws TooManyClauses if the new number of clauses exceeds the maximum clause number * @see #getMaxClauseCount() */ public void add(Query query, BooleanClause.Occur occur) { add(new BooleanClause(query, occur)); } /** Adds a clause to a boolean query. * @throws TooManyClauses if the new number of clauses exceeds the maximum clause number * @see #getMaxClauseCount() */ public void add(BooleanClause clause) { if (clauses.size() >= maxClauseCount) throw new TooManyClauses(); clauses.add(clause); } /** Returns the set of clauses in this query. */ public BooleanClause[] getClauses() { return clauses.toArray(new BooleanClause[clauses.size()]); } /** Returns the list of clauses in this query. */ public List<BooleanClause> clauses() { return clauses; } /** Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to * make it possible to do: * <pre>for (BooleanClause clause : booleanQuery) {}</pre> */ public final Iterator<BooleanClause> iterator() { return clauses().iterator(); } /** * Expert: the Weight for BooleanQuery, used to * normalize, score and explain these queries. * * <p>NOTE: this API and implementation is subject to * change suddenly in the next release.</p> */ protected class BooleanWeight extends Weight { /** The Similarity implementation. */ protected Similarity similarity; protected ArrayList<Weight> weights; public BooleanWeight(Searcher searcher) throws IOException { this.similarity = getSimilarity(searcher); weights = new ArrayList<Weight>(clauses.size()); for (int i = 0 ; i < clauses.size(); i++) { weights.add(clauses.get(i).getQuery().createWeight(searcher)); } } @Override public Query getQuery() { return BooleanQuery.this; } @Override public float getValue() { return getBoost(); } @Override public float sumOfSquaredWeights() throws IOException { float sum = 0.0f; for (int i = 0 ; i < weights.size(); i++) { // call sumOfSquaredWeights for all clauses in case of side effects float s = weights.get(i).sumOfSquaredWeights(); // sum sub weights if (!clauses.get(i).isProhibited()) // only add to sum for non-prohibited clauses sum += s; } sum *= getBoost() * getBoost(); // boost each sub-weight return sum ; } @Override public void normalize(float norm) { norm *= getBoost(); // incorporate boost for (Weight w : weights) { // normalize all clauses, (even if prohibited in case of side affects) w.normalize(norm); } } @Override public Explanation explain(IndexReader reader, int doc) throws IOException { final int minShouldMatch = BooleanQuery.this.getMinimumNumberShouldMatch(); ComplexExplanation sumExpl = new ComplexExplanation(); sumExpl.setDescription("sum of:"); int coord = 0; int maxCoord = 0; float sum = 0.0f; boolean fail = false; int shouldMatchCount = 0; Iterator<BooleanClause> cIter = clauses.iterator(); for (Iterator<Weight> wIter = weights.iterator(); wIter.hasNext();) { Weight w = wIter.next(); BooleanClause c = cIter.next(); if (w.scorer(reader, true, true) == null) { continue; } Explanation e = w.explain(reader, doc); if (!c.isProhibited()) maxCoord++; if (e.isMatch()) { if (!c.isProhibited()) { sumExpl.addDetail(e); sum += e.getValue(); coord++; } else { Explanation r = new Explanation(0.0f, "match on prohibited clause (" + c.getQuery().toString() + ")"); r.addDetail(e); sumExpl.addDetail(r); fail = true; } if (c.getOccur() == Occur.SHOULD) shouldMatchCount++; } else if (c.isRequired()) { Explanation r = new Explanation(0.0f, "no match on required clause (" + c.getQuery().toString() + ")"); r.addDetail(e); sumExpl.addDetail(r); fail = true; } } if (fail) { sumExpl.setMatch(Boolean.FALSE); sumExpl.setValue(0.0f); sumExpl.setDescription ("Failure to meet condition(s) of required/prohibited clause(s)"); return sumExpl; } else if (shouldMatchCount < minShouldMatch) { sumExpl.setMatch(Boolean.FALSE); sumExpl.setValue(0.0f); sumExpl.setDescription("Failure to match minimum number "+ "of optional clauses: " + minShouldMatch); return sumExpl; } sumExpl.setMatch(0 < coord ? Boolean.TRUE : Boolean.FALSE); sumExpl.setValue(sum); float coordFactor = similarity.coord(coord, maxCoord); if (coordFactor == 1.0f) // coord is no-op return sumExpl; // eliminate wrapper else { ComplexExplanation result = new ComplexExplanation(sumExpl.isMatch(), sum*coordFactor, "product of:"); result.addDetail(sumExpl); result.addDetail(new Explanation(coordFactor, "coord("+coord+"/"+maxCoord+")")); return result; } } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { List<Scorer> required = new ArrayList<Scorer>(); List<Scorer> prohibited = new ArrayList<Scorer>(); List<Scorer> optional = new ArrayList<Scorer>(); Iterator<BooleanClause> cIter = clauses.iterator(); for (Weight w : weights) { BooleanClause c = cIter.next(); Scorer subScorer = w.scorer(reader, true, false); if (subScorer == null) { if (c.isRequired()) { return null; } } else if (c.isRequired()) { required.add(subScorer); } else if (c.isProhibited()) { prohibited.add(subScorer); } else { optional.add(subScorer); } } // Check if we can return a BooleanScorer if (!scoreDocsInOrder && topScorer && required.size() == 0 && prohibited.size() < 32) { return new BooleanScorer(similarity, minNrShouldMatch, optional, prohibited); } if (required.size() == 0 && optional.size() == 0) { // no required and optional clauses. return null; } else if (optional.size() < minNrShouldMatch) { // either >1 req scorer, or there are 0 req scorers and at least 1 // optional scorer. Therefore if there are not enough optional scorers // no documents will be matched by the query return null; } // Return a BooleanScorer2 return new BooleanScorer2(similarity, minNrShouldMatch, required, prohibited, optional); } @Override public boolean scoresDocsOutOfOrder() { int numProhibited = 0; for (BooleanClause c : clauses) { if (c.isRequired()) { return false; // BS2 (in-order) will be used by scorer() } else if (c.isProhibited()) { ++numProhibited; } } if (numProhibited > 32) { // cannot use BS return false; } // scorer() will return an out-of-order scorer if requested. return true; } } @Override public Weight createWeight(Searcher searcher) throws IOException { return new BooleanWeight(searcher); } @Override public Query rewrite(IndexReader reader) throws IOException { if (minNrShouldMatch == 0 && clauses.size() == 1) { // optimize 1-clause queries BooleanClause c = clauses.get(0); if (!c.isProhibited()) { // just return clause Query query = c.getQuery().rewrite(reader); // rewrite first if (getBoost() != 1.0f) { // incorporate boost if (query == c.getQuery()) // if rewrite was no-op query = (Query)query.clone(); // then clone before boost query.setBoost(getBoost() * query.getBoost()); } return query; } } BooleanQuery clone = null; // recursively rewrite for (int i = 0 ; i < clauses.size(); i++) { BooleanClause c = clauses.get(i); Query query = c.getQuery().rewrite(reader); if (query != c.getQuery()) { // clause rewrote: must clone if (clone == null) clone = (BooleanQuery)this.clone(); clone.clauses.set(i, new BooleanClause(query, c.getOccur())); } } if (clone != null) { return clone; // some clauses rewrote } else return this; // no clauses rewrote } // inherit javadoc @Override public void extractTerms(Set<Term> terms) { for (BooleanClause clause : clauses) { clause.getQuery().extractTerms(terms); } } @Override @SuppressWarnings("unchecked") public Object clone() { BooleanQuery clone = (BooleanQuery)super.clone(); clone.clauses = (ArrayList<BooleanClause>) this.clauses.clone(); return clone; } /** Prints a user-readable version of this query. */ @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); boolean needParens=(getBoost() != 1.0) || (getMinimumNumberShouldMatch()>0) ; if (needParens) { buffer.append("("); } for (int i = 0 ; i < clauses.size(); i++) { BooleanClause c = clauses.get(i); if (c.isProhibited()) buffer.append("-"); else if (c.isRequired()) buffer.append("+"); Query subQuery = c.getQuery(); if (subQuery != null) { if (subQuery instanceof BooleanQuery) { // wrap sub-bools in parens buffer.append("("); buffer.append(subQuery.toString(field)); buffer.append(")"); } else { buffer.append(subQuery.toString(field)); } } else { buffer.append("null"); } if (i != clauses.size()-1) buffer.append(" "); } if (needParens) { buffer.append(")"); } if (getMinimumNumberShouldMatch()>0) { buffer.append('~'); buffer.append(getMinimumNumberShouldMatch()); } if (getBoost() != 1.0f) { buffer.append(ToStringUtils.boost(getBoost())); } return buffer.toString(); } /** Returns true iff <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (!(o instanceof BooleanQuery)) return false; BooleanQuery other = (BooleanQuery)o; return (this.getBoost() == other.getBoost()) && this.clauses.equals(other.clauses) && this.getMinimumNumberShouldMatch() == other.getMinimumNumberShouldMatch() && this.disableCoord == other.disableCoord; } /** Returns a hash code value for this object.*/ @Override public int hashCode() { return Float.floatToIntBits(getBoost()) ^ clauses.hashCode() + getMinimumNumberShouldMatch() + (disableCoord ? 17:0); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/BooleanQuery.java
Java
art
16,201
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.ToStringUtils; import java.io.IOException; /** Implements the wildcard search query. Supported wildcards are <code>*</code>, which * matches any character sequence (including the empty one), and <code>?</code>, * which matches any single character. Note this query can be slow, as it * needs to iterate over many terms. In order to prevent extremely slow WildcardQueries, * a Wildcard term should not start with one of the wildcards <code>*</code> or * <code>?</code>. * * <p>This query uses the {@link * MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} * rewrite method. * * @see WildcardTermEnum */ public class WildcardQuery extends MultiTermQuery { private boolean termContainsWildcard; private boolean termIsPrefix; protected Term term; public WildcardQuery(Term term) { this.term = term; String text = term.text(); this.termContainsWildcard = (text.indexOf('*') != -1) || (text.indexOf('?') != -1); this.termIsPrefix = termContainsWildcard && (text.indexOf('?') == -1) && (text.indexOf('*') == text.length() - 1); } @Override protected FilteredTermEnum getEnum(IndexReader reader) throws IOException { if (termContainsWildcard) return new WildcardTermEnum(reader, getTerm()); else return new SingleTermEnum(reader, getTerm()); } /** * Returns the pattern term. */ public Term getTerm() { return term; } @Override public Query rewrite(IndexReader reader) throws IOException { if (termIsPrefix) { MultiTermQuery rewritten = new PrefixQuery(term.createTerm(term.text() .substring(0, term.text().indexOf('*')))); rewritten.setBoost(getBoost()); rewritten.setRewriteMethod(getRewriteMethod()); return rewritten; } else { return super.rewrite(reader); } } /** Prints a user-readable version of this query. */ @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); if (!term.field().equals(field)) { buffer.append(term.field()); buffer.append(":"); } buffer.append(term.text()); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((term == null) ? 0 : term.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; WildcardQuery other = (WildcardQuery) obj; if (term == null) { if (other.term != null) return false; } else if (!term.equals(other.term)) return false; return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/WildcardQuery.java
Java
art
3,766
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.io.Serializable; import java.util.Locale; import org.apache.lucene.document.NumericField; // javadocs import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermEnum; import org.apache.lucene.util.StringHelper; /** * Stores information about how to sort documents by terms in an individual * field. Fields must be indexed in order to sort by them. * * <p>Created: Feb 11, 2004 1:25:29 PM * * @since lucene 1.4 * @see Sort */ public class SortField implements Serializable { /** Sort by document score (relevancy). Sort values are Float and higher * values are at the front. */ public static final int SCORE = 0; /** Sort by document number (index order). Sort values are Integer and lower * values are at the front. */ public static final int DOC = 1; // reserved, in Lucene 2.9, there was a constant: AUTO = 2; /** Sort using term values as Strings. Sort values are String and lower * values are at the front. */ public static final int STRING = 3; /** Sort using term values as encoded Integers. Sort values are Integer and * lower values are at the front. */ public static final int INT = 4; /** Sort using term values as encoded Floats. Sort values are Float and * lower values are at the front. */ public static final int FLOAT = 5; /** Sort using term values as encoded Longs. Sort values are Long and * lower values are at the front. */ public static final int LONG = 6; /** Sort using term values as encoded Doubles. Sort values are Double and * lower values are at the front. */ public static final int DOUBLE = 7; /** Sort using term values as encoded Shorts. Sort values are Short and * lower values are at the front. */ public static final int SHORT = 8; /** Sort using a custom Comparator. Sort values are any Comparable and * sorting is done according to natural order. */ public static final int CUSTOM = 9; /** Sort using term values as encoded Bytes. Sort values are Byte and * lower values are at the front. */ public static final int BYTE = 10; /** Sort using term values as Strings, but comparing by * value (using String.compareTo) for all comparisons. * This is typically slower than {@link #STRING}, which * uses ordinals to do the sorting. */ public static final int STRING_VAL = 11; // IMPLEMENTATION NOTE: the FieldCache.STRING_INDEX is in the same "namespace" // as the above static int values. Any new values must not have the same value // as FieldCache.STRING_INDEX. /** Represents sorting by document score (relevancy). */ public static final SortField FIELD_SCORE = new SortField (null, SCORE); /** Represents sorting by document number (index order). */ public static final SortField FIELD_DOC = new SortField (null, DOC); private String field; private int type; // defaults to determining type dynamically private Locale locale; // defaults to "natural order" (no Locale) boolean reverse = false; // defaults to natural order private FieldCache.Parser parser; // Used for CUSTOM sort private FieldComparatorSource comparatorSource; /** Creates a sort by terms in the given field with the type of term * values explicitly given. * @param field Name of field to sort by. Can be <code>null</code> if * <code>type</code> is SCORE or DOC. * @param type Type of values in the terms. */ public SortField (String field, int type) { initFieldType(field, type); } /** Creates a sort, possibly in reverse, by terms in the given field with the * type of term values explicitly given. * @param field Name of field to sort by. Can be <code>null</code> if * <code>type</code> is SCORE or DOC. * @param type Type of values in the terms. * @param reverse True if natural order should be reversed. */ public SortField (String field, int type, boolean reverse) { initFieldType(field, type); this.reverse = reverse; } /** Creates a sort by terms in the given field, parsed * to numeric values using a custom {@link FieldCache.Parser}. * @param field Name of field to sort by. Must not be null. * @param parser Instance of a {@link FieldCache.Parser}, * which must subclass one of the existing numeric * parsers from {@link FieldCache}. Sort type is inferred * by testing which numeric parser the parser subclasses. * @throws IllegalArgumentException if the parser fails to * subclass an existing numeric parser, or field is null */ public SortField (String field, FieldCache.Parser parser) { this(field, parser, false); } /** Creates a sort, possibly in reverse, by terms in the given field, parsed * to numeric values using a custom {@link FieldCache.Parser}. * @param field Name of field to sort by. Must not be null. * @param parser Instance of a {@link FieldCache.Parser}, * which must subclass one of the existing numeric * parsers from {@link FieldCache}. Sort type is inferred * by testing which numeric parser the parser subclasses. * @param reverse True if natural order should be reversed. * @throws IllegalArgumentException if the parser fails to * subclass an existing numeric parser, or field is null */ public SortField (String field, FieldCache.Parser parser, boolean reverse) { if (parser instanceof FieldCache.IntParser) initFieldType(field, INT); else if (parser instanceof FieldCache.FloatParser) initFieldType(field, FLOAT); else if (parser instanceof FieldCache.ShortParser) initFieldType(field, SHORT); else if (parser instanceof FieldCache.ByteParser) initFieldType(field, BYTE); else if (parser instanceof FieldCache.LongParser) initFieldType(field, LONG); else if (parser instanceof FieldCache.DoubleParser) initFieldType(field, DOUBLE); else throw new IllegalArgumentException("Parser instance does not subclass existing numeric parser from FieldCache (got " + parser + ")"); this.reverse = reverse; this.parser = parser; } /** Creates a sort by terms in the given field sorted * according to the given locale. * @param field Name of field to sort by, cannot be <code>null</code>. * @param locale Locale of values in the field. */ public SortField (String field, Locale locale) { initFieldType(field, STRING); this.locale = locale; } /** Creates a sort, possibly in reverse, by terms in the given field sorted * according to the given locale. * @param field Name of field to sort by, cannot be <code>null</code>. * @param locale Locale of values in the field. */ public SortField (String field, Locale locale, boolean reverse) { initFieldType(field, STRING); this.locale = locale; this.reverse = reverse; } /** Creates a sort with a custom comparison function. * @param field Name of field to sort by; cannot be <code>null</code>. * @param comparator Returns a comparator for sorting hits. */ public SortField (String field, FieldComparatorSource comparator) { initFieldType(field, CUSTOM); this.comparatorSource = comparator; } /** Creates a sort, possibly in reverse, with a custom comparison function. * @param field Name of field to sort by; cannot be <code>null</code>. * @param comparator Returns a comparator for sorting hits. * @param reverse True if natural order should be reversed. */ public SortField (String field, FieldComparatorSource comparator, boolean reverse) { initFieldType(field, CUSTOM); this.reverse = reverse; this.comparatorSource = comparator; } // Sets field & type, and ensures field is not NULL unless // type is SCORE or DOC private void initFieldType(String field, int type) { this.type = type; if (field == null) { if (type != SCORE && type != DOC) throw new IllegalArgumentException("field can only be null when type is SCORE or DOC"); } else { this.field = StringHelper.intern(field); } } /** Returns the name of the field. Could return <code>null</code> * if the sort is by SCORE or DOC. * @return Name of field, possibly <code>null</code>. */ public String getField() { return field; } /** Returns the type of contents in the field. * @return One of the constants SCORE, DOC, STRING, INT or FLOAT. */ public int getType() { return type; } /** Returns the Locale by which term values are interpreted. * May return <code>null</code> if no Locale was specified. * @return Locale, or <code>null</code>. */ public Locale getLocale() { return locale; } /** Returns the instance of a {@link FieldCache} parser that fits to the given sort type. * May return <code>null</code> if no parser was specified. Sorting is using the default parser then. * @return An instance of a {@link FieldCache} parser, or <code>null</code>. */ public FieldCache.Parser getParser() { return parser; } /** Returns whether the sort should be reversed. * @return True if natural order should be reversed. */ public boolean getReverse() { return reverse; } /** Returns the {@link FieldComparatorSource} used for * custom sorting */ public FieldComparatorSource getComparatorSource() { return comparatorSource; } @Override public String toString() { StringBuilder buffer = new StringBuilder(); switch (type) { case SCORE: buffer.append("<score>"); break; case DOC: buffer.append("<doc>"); break; case STRING: buffer.append("<string: \"").append(field).append("\">"); break; case STRING_VAL: buffer.append("<string_val: \"").append(field).append("\">"); break; case BYTE: buffer.append("<byte: \"").append(field).append("\">"); break; case SHORT: buffer.append("<short: \"").append(field).append("\">"); break; case INT: buffer.append("<int: \"").append(field).append("\">"); break; case LONG: buffer.append("<long: \"").append(field).append("\">"); break; case FLOAT: buffer.append("<float: \"").append(field).append("\">"); break; case DOUBLE: buffer.append("<double: \"").append(field).append("\">"); break; case CUSTOM: buffer.append("<custom:\"").append(field).append("\": ").append(comparatorSource).append('>'); break; default: buffer.append("<???: \"").append(field).append("\">"); break; } if (locale != null) buffer.append('(').append(locale).append(')'); if (parser != null) buffer.append('(').append(parser).append(')'); if (reverse) buffer.append('!'); return buffer.toString(); } /** Returns true if <code>o</code> is equal to this. If a * {@link FieldComparatorSource} or {@link * FieldCache.Parser} was provided, it must properly * implement equals (unless a singleton is always used). */ @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof SortField)) return false; final SortField other = (SortField)o; return ( other.field == this.field // field is always interned && other.type == this.type && other.reverse == this.reverse && (other.locale == null ? this.locale == null : other.locale.equals(this.locale)) && (other.comparatorSource == null ? this.comparatorSource == null : other.comparatorSource.equals(this.comparatorSource)) && (other.parser == null ? this.parser == null : other.parser.equals(this.parser)) ); } /** Returns true if <code>o</code> is equal to this. If a * {@link FieldComparatorSource} or {@link * FieldCache.Parser} was provided, it must properly * implement hashCode (unless a singleton is always * used). */ @Override public int hashCode() { int hash=type^0x346565dd + Boolean.valueOf(reverse).hashCode()^0xaf5998bb; if (field != null) hash += field.hashCode()^0xff5685dd; if (locale != null) hash += locale.hashCode()^0x08150815; if (comparatorSource != null) hash += comparatorSource.hashCode(); if (parser != null) hash += parser.hashCode()^0x3aaf56ff; return hash; } // field must be interned after reading from stream private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { in.defaultReadObject(); if (field != null) field = StringHelper.intern(field); } /** Returns the {@link FieldComparator} to use for * sorting. * * <b>NOTE:</b> This API is experimental and might change in * incompatible ways in the next release. * * @param numHits number of top hits the queue will store * @param sortPos position of this SortField within {@link * Sort}. The comparator is primary if sortPos==0, * secondary if sortPos==1, etc. Some comparators can * optimize themselves when they are the primary sort. * @return {@link FieldComparator} to use when sorting */ public FieldComparator getComparator(final int numHits, final int sortPos) throws IOException { if (locale != null) { // TODO: it'd be nice to allow FieldCache.getStringIndex // to optionally accept a Locale so sorting could then use // the faster StringComparator impls return new FieldComparator.StringComparatorLocale(numHits, field, locale); } switch (type) { case SortField.SCORE: return new FieldComparator.RelevanceComparator(numHits); case SortField.DOC: return new FieldComparator.DocComparator(numHits); case SortField.INT: return new FieldComparator.IntComparator(numHits, field, parser); case SortField.FLOAT: return new FieldComparator.FloatComparator(numHits, field, parser); case SortField.LONG: return new FieldComparator.LongComparator(numHits, field, parser); case SortField.DOUBLE: return new FieldComparator.DoubleComparator(numHits, field, parser); case SortField.BYTE: return new FieldComparator.ByteComparator(numHits, field, parser); case SortField.SHORT: return new FieldComparator.ShortComparator(numHits, field, parser); case SortField.CUSTOM: assert comparatorSource != null; return comparatorSource.newComparator(field, numHits, sortPos, reverse); case SortField.STRING: return new FieldComparator.StringOrdValComparator(numHits, field, sortPos, reverse); case SortField.STRING_VAL: return new FieldComparator.StringValComparator(numHits, field); default: throw new IllegalStateException("Illegal sort type: " + type); } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/SortField.java
Java
art
15,684
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermEnum; /** Abstract class for enumerating a subset of all terms. <p>Term enumerations are always ordered by Term.compareTo(). Each term in the enumeration is greater than all that precede it. */ public abstract class FilteredTermEnum extends TermEnum { /** the current term */ protected Term currentTerm = null; /** the delegate enum - to set this member use {@link #setEnum} */ protected TermEnum actualEnum = null; public FilteredTermEnum() {} /** Equality compare on the term */ protected abstract boolean termCompare(Term term); /** Equality measure on the term */ public abstract float difference(); /** Indicates the end of the enumeration has been reached */ protected abstract boolean endEnum(); /** * use this method to set the actual TermEnum (e.g. in ctor), * it will be automatically positioned on the first matching term. */ protected void setEnum(TermEnum actualEnum) throws IOException { this.actualEnum = actualEnum; // Find the first term that matches Term term = actualEnum.term(); if (term != null && termCompare(term)) currentTerm = term; else next(); } /** * Returns the docFreq of the current Term in the enumeration. * Returns -1 if no Term matches or all terms have been enumerated. */ @Override public int docFreq() { if (currentTerm == null) return -1; assert actualEnum != null; return actualEnum.docFreq(); } /** Increments the enumeration to the next element. True if one exists. */ @Override public boolean next() throws IOException { if (actualEnum == null) return false; // the actual enumerator is not initialized! currentTerm = null; while (currentTerm == null) { if (endEnum()) return false; if (actualEnum.next()) { Term term = actualEnum.term(); if (termCompare(term)) { currentTerm = term; return true; } } else return false; } currentTerm = null; return false; } /** Returns the current Term in the enumeration. * Returns null if no Term matches or all terms have been enumerated. */ @Override public Term term() { return currentTerm; } /** Closes the enumeration to further activity, freeing resources. */ @Override public void close() throws IOException { if (actualEnum != null) actualEnum.close(); currentTerm = null; actualEnum = null; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FilteredTermEnum.java
Java
art
3,624
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.text.Collator; /** * A Filter that restricts search results to a range of term * values in a given field. * * <p>This filter matches the documents looking for terms that fall into the * supplied range according to {@link * String#compareTo(String)}, unless a <code>Collator</code> is provided. It is not intended * for numerical ranges; use {@link NumericRangeFilter} instead. * * <p>If you construct a large number of range filters with different ranges but on the * same field, {@link FieldCacheRangeFilter} may have significantly better performance. * @since 2.9 */ public class TermRangeFilter extends MultiTermQueryWrapperFilter<TermRangeQuery> { /** * @param fieldName The field this range applies to * @param lowerTerm The lower bound on this range * @param upperTerm The upper bound on this range * @param includeLower Does this range include the lower bound? * @param includeUpper Does this range include the upper bound? * @throws IllegalArgumentException if both terms are null or if * lowerTerm is null and includeLower is true (similar for upperTerm * and includeUpper) */ public TermRangeFilter(String fieldName, String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) { super(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper)); } /** * <strong>WARNING:</strong> Using this constructor and supplying a non-null * value in the <code>collator</code> parameter will cause every single * index Term in the Field referenced by lowerTerm and/or upperTerm to be * examined. Depending on the number of index Terms in this Field, the * operation could be very slow. * * @param lowerTerm The lower bound on this range * @param upperTerm The upper bound on this range * @param includeLower Does this range include the lower bound? * @param includeUpper Does this range include the upper bound? * @param collator The collator to use when determining range inclusion; set * to null to use Unicode code point ordering instead of collation. * @throws IllegalArgumentException if both terms are null or if * lowerTerm is null and includeLower is true (similar for upperTerm * and includeUpper) */ public TermRangeFilter(String fieldName, String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper, Collator collator) { super(new TermRangeQuery(fieldName, lowerTerm, upperTerm, includeLower, includeUpper, collator)); } /** * Constructs a filter for field <code>fieldName</code> matching * less than or equal to <code>upperTerm</code>. */ public static TermRangeFilter Less(String fieldName, String upperTerm) { return new TermRangeFilter(fieldName, null, upperTerm, false, true); } /** * Constructs a filter for field <code>fieldName</code> matching * greater than or equal to <code>lowerTerm</code>. */ public static TermRangeFilter More(String fieldName, String lowerTerm) { return new TermRangeFilter(fieldName, lowerTerm, null, true, false); } /** Returns the field name for this filter */ public String getField() { return query.getField(); } /** Returns the lower value of this range filter */ public String getLowerTerm() { return query.getLowerTerm(); } /** Returns the upper value of this range filter */ public String getUpperTerm() { return query.getUpperTerm(); } /** Returns <code>true</code> if the lower endpoint is inclusive */ public boolean includesLower() { return query.includesLower(); } /** Returns <code>true</code> if the upper endpoint is inclusive */ public boolean includesUpper() { return query.includesUpper(); } /** Returns the collator used to determine range inclusion, if any. */ public Collator getCollator() { return query.getCollator(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/TermRangeFilter.java
Java
art
4,771
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Represents hits returned by {@link * Searcher#search(Query,Filter,int)} and {@link * Searcher#search(Query,int)}. */ public class TopDocs implements java.io.Serializable { /** The total number of hits for the query. */ public int totalHits; /** The top hits for the query. */ public ScoreDoc[] scoreDocs; /** Stores the maximum score value encountered, needed for normalizing. */ private float maxScore; /** * Returns the maximum score value encountered. Note that in case * scores are not tracked, this returns {@link Float#NaN}. */ public float getMaxScore() { return maxScore; } /** Sets the maximum score value encountered. */ public void setMaxScore(float maxScore) { this.maxScore=maxScore; } /** Constructs a TopDocs with a default maxScore=Float.NaN. */ TopDocs(int totalHits, ScoreDoc[] scoreDocs) { this(totalHits, scoreDocs, Float.NaN); } public TopDocs(int totalHits, ScoreDoc[] scoreDocs, float maxScore) { this.totalHits = totalHits; this.scoreDocs = scoreDocs; this.maxScore = maxScore; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/TopDocs.java
Java
art
1,932
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.text.Collator; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.StringHelper; /** * Subclass of FilteredTermEnum for enumerating all terms that match the * specified range parameters. * <p> * Term enumerations are always ordered by Term.compareTo(). Each term in * the enumeration is greater than all that precede it. * @since 2.9 */ public class TermRangeTermEnum extends FilteredTermEnum { private Collator collator = null; private boolean endEnum = false; private String field; private String upperTermText; private String lowerTermText; private boolean includeLower; private boolean includeUpper; /** * Enumerates all terms greater/equal than <code>lowerTerm</code> * but less/equal than <code>upperTerm</code>. * * If an endpoint is null, it is said to be "open". Either or both * endpoints may be open. Open endpoints may not be exclusive * (you can't select all but the first or last term without * explicitly specifying the term to exclude.) * * @param reader * @param field * An interned field that holds both lower and upper terms. * @param lowerTermText * The term text at the lower end of the range * @param upperTermText * The term text at the upper end of the range * @param includeLower * If true, the <code>lowerTerm</code> is included in the range. * @param includeUpper * If true, the <code>upperTerm</code> is included in the range. * @param collator * The collator to use to collate index Terms, to determine their * membership in the range bounded by <code>lowerTerm</code> and * <code>upperTerm</code>. * * @throws IOException */ public TermRangeTermEnum(IndexReader reader, String field, String lowerTermText, String upperTermText, boolean includeLower, boolean includeUpper, Collator collator) throws IOException { this.collator = collator; this.upperTermText = upperTermText; this.lowerTermText = lowerTermText; this.includeLower = includeLower; this.includeUpper = includeUpper; this.field = StringHelper.intern(field); // do a little bit of normalization... // open ended range queries should always be inclusive. if (this.lowerTermText == null) { this.lowerTermText = ""; this.includeLower = true; } if (this.upperTermText == null) { this.includeUpper = true; } String startTermText = collator == null ? this.lowerTermText : ""; setEnum(reader.terms(new Term(this.field, startTermText))); } @Override public float difference() { return 1.0f; } @Override protected boolean endEnum() { return endEnum; } @Override protected boolean termCompare(Term term) { if (collator == null) { // Use Unicode code point ordering boolean checkLower = false; if (!includeLower) // make adjustments to set to exclusive checkLower = true; if (term != null && term.field() == field) { // interned comparison if (!checkLower || null==lowerTermText || term.text().compareTo(lowerTermText) > 0) { checkLower = false; if (upperTermText != null) { int compare = upperTermText.compareTo(term.text()); /* * if beyond the upper term, or is exclusive and this is equal to * the upper term, break out */ if ((compare < 0) || (!includeUpper && compare==0)) { endEnum = true; return false; } } return true; } } else { // break endEnum = true; return false; } return false; } else { if (term != null && term.field() == field) { // interned comparison if ((lowerTermText == null || (includeLower ? collator.compare(term.text(), lowerTermText) >= 0 : collator.compare(term.text(), lowerTermText) > 0)) && (upperTermText == null || (includeUpper ? collator.compare(term.text(), upperTermText) <= 0 : collator.compare(term.text(), upperTermText) < 0))) { return true; } return false; } endEnum = true; return false; } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/TermRangeTermEnum.java
Java
art
5,302
package org.apache.lucene.search; /** * Copyright 2007 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.Spans; import org.apache.lucene.util.OpenBitSet; import java.io.IOException; import java.util.ArrayList; import java.util.List; /** * Constrains search results to only match those which also match a provided * query. Also provides position information about where each document matches * at the cost of extra space compared with the QueryWrapperFilter. * There is an added cost to this above what is stored in a {@link QueryWrapperFilter}. Namely, * the position information for each matching document is stored. * <p/> * This filter does not cache. See the {@link org.apache.lucene.search.CachingSpanFilter} for a wrapper that * caches. * * * @version $Id:$ */ public class SpanQueryFilter extends SpanFilter { protected SpanQuery query; protected SpanQueryFilter() { } /** Constructs a filter which only matches documents matching * <code>query</code>. * @param query The {@link org.apache.lucene.search.spans.SpanQuery} to use as the basis for the Filter. */ public SpanQueryFilter(SpanQuery query) { this.query = query; } @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { SpanFilterResult result = bitSpans(reader); return result.getDocIdSet(); } @Override public SpanFilterResult bitSpans(IndexReader reader) throws IOException { final OpenBitSet bits = new OpenBitSet(reader.maxDoc()); Spans spans = query.getSpans(reader); List<SpanFilterResult.PositionInfo> tmp = new ArrayList<SpanFilterResult.PositionInfo>(20); int currentDoc = -1; SpanFilterResult.PositionInfo currentInfo = null; while (spans.next()) { int doc = spans.doc(); bits.set(doc); if (currentDoc != doc) { currentInfo = new SpanFilterResult.PositionInfo(doc); tmp.add(currentInfo); currentDoc = doc; } currentInfo.addPosition(spans.start(), spans.end()); } return new SpanFilterResult(bits, tmp); } public SpanQuery getQuery() { return query; } @Override public String toString() { return "SpanQueryFilter(" + query + ")"; } @Override public boolean equals(Object o) { return o instanceof SpanQueryFilter && this.query.equals(((SpanQueryFilter) o).query); } @Override public int hashCode() { return query.hashCode() ^ 0x923F64B9; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/SpanQueryFilter.java
Java
art
3,130
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; /** * Expert: Common scoring functionality for different types of queries. * * <p> * A <code>Scorer</code> iterates over documents matching a * query in increasing order of doc Id. * </p> * <p> * Document scores are computed using a given <code>Similarity</code> * implementation. * </p> * * <p><b>NOTE</b>: The values Float.Nan, * Float.NEGATIVE_INFINITY and Float.POSITIVE_INFINITY are * not valid scores. Certain collectors (eg {@link * TopScoreDocCollector}) will not properly collect hits * with these scores. */ public abstract class Scorer extends DocIdSetIterator { private Similarity similarity; /** Constructs a Scorer. * @param similarity The <code>Similarity</code> implementation used by this scorer. */ protected Scorer(Similarity similarity) { this.similarity = similarity; } /** Returns the Similarity implementation used by this scorer. */ public Similarity getSimilarity() { return this.similarity; } /** Scores and collects all matching documents. * @param collector The collector to which all matching documents are passed. */ public void score(Collector collector) throws IOException { collector.setScorer(this); int doc; while ((doc = nextDoc()) != NO_MORE_DOCS) { collector.collect(doc); } } /** * Expert: Collects matching documents in a range. Hook for optimization. * Note, <code>firstDocID</code> is added to ensure that {@link #nextDoc()} * was called before this method. * * @param collector * The collector to which all matching documents are passed. * @param max * Do not score documents past this. * @param firstDocID * The first document ID (ensures {@link #nextDoc()} is called before * this method. * @return true if more matching documents may remain. */ protected boolean score(Collector collector, int max, int firstDocID) throws IOException { collector.setScorer(this); int doc = firstDocID; while (doc < max) { collector.collect(doc); doc = nextDoc(); } return doc != NO_MORE_DOCS; } /** Returns the score of the current document matching the query. * Initially invalid, until {@link #nextDoc()} or {@link #advance(int)} * is called the first time, or when called from within * {@link Collector#collect}. */ public abstract float score() throws IOException; }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/Scorer.java
Java
art
3,286
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Collection; /** Expert: an enumeration of span matches. Used to implement span searching. * Each span represents a range of term positions within a document. Matches * are enumerated in order, by increasing document number, within that by * increasing start position and finally by increasing end position. */ public abstract class Spans { /** Move to the next match, returning true iff any such exists. */ public abstract boolean next() throws IOException; /** Skips to the first match beyond the current, whose document number is * greater than or equal to <i>target</i>. <p>Returns true iff there is such * a match. <p>Behaves as if written: <pre> * boolean skipTo(int target) { * do { * if (!next()) * return false; * } while (target > doc()); * return true; * } * </pre> * Most implementations are considerably more efficient than that. */ public abstract boolean skipTo(int target) throws IOException; /** Returns the document number of the current match. Initially invalid. */ public abstract int doc(); /** Returns the start position of the current match. Initially invalid. */ public abstract int start(); /** Returns the end position of the current match. Initially invalid. */ public abstract int end(); /** * Returns the payload data for the current span. * This is invalid until {@link #next()} is called for * the first time. * This method must not be called more than once after each call * of {@link #next()}. However, most payloads are loaded lazily, * so if the payload data for the current position is not needed, * this method may not be called at all for performance reasons. An ordered * SpanQuery does not lazy load, so if you have payloads in your index and * you do not want ordered SpanNearQuerys to collect payloads, you can * disable collection with a constructor option.<br> * <br> * Note that the return type is a collection, thus the ordering should not be relied upon. * <br/> * <p><font color="#FF0000"> * WARNING: The status of the <b>Payloads</b> feature is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * * @return a List of byte arrays containing the data of this payload, otherwise null if isPayloadAvailable is false * @throws java.io.IOException */ // TODO: Remove warning after API has been finalized public abstract Collection<byte[]> getPayload() throws IOException; /** * Checks if a payload can be loaded at this position. * <p/> * Payloads can only be loaded once per call to * {@link #next()}. * * @return true if there is a payload available at this position that can be loaded */ public abstract boolean isPayloadAvailable(); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/Spans.java
Java
art
3,748
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Query; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Weight; /** Base class for span-based queries. */ public abstract class SpanQuery extends Query { /** Expert: Returns the matches for this query in an index. Used internally * to search for spans. */ public abstract Spans getSpans(IndexReader reader) throws IOException; /** Returns the name of the field matched by this query.*/ public abstract String getField(); @Override public Weight createWeight(Searcher searcher) throws IOException { return new SpanWeight(this, searcher); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanQuery.java
Java
art
1,542
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.PriorityQueue; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Set; import java.util.HashSet; /** * Similar to {@link NearSpansOrdered}, but for the unordered case. * * Expert: * Only public for subclassing. Most implementations should not need this class */ public class NearSpansUnordered extends Spans { private SpanNearQuery query; private List<SpansCell> ordered = new ArrayList<SpansCell>(); // spans in query order private Spans[] subSpans; private int slop; // from query private SpansCell first; // linked list of spans private SpansCell last; // sorted by doc only private int totalLength; // sum of current lengths private CellQueue queue; // sorted queue of spans private SpansCell max; // max element in queue private boolean more = true; // true iff not done private boolean firstTime = true; // true before first next() private class CellQueue extends PriorityQueue<SpansCell> { public CellQueue(int size) { initialize(size); } @Override protected final boolean lessThan(SpansCell spans1, SpansCell spans2) { if (spans1.doc() == spans2.doc()) { return NearSpansOrdered.docSpansOrdered(spans1, spans2); } else { return spans1.doc() < spans2.doc(); } } } /** Wraps a Spans, and can be used to form a linked list. */ private class SpansCell extends Spans { private Spans spans; private SpansCell next; private int length = -1; private int index; public SpansCell(Spans spans, int index) { this.spans = spans; this.index = index; } @Override public boolean next() throws IOException { return adjust(spans.next()); } @Override public boolean skipTo(int target) throws IOException { return adjust(spans.skipTo(target)); } private boolean adjust(boolean condition) { if (length != -1) { totalLength -= length; // subtract old length } if (condition) { length = end() - start(); totalLength += length; // add new length if (max == null || doc() > max.doc() || (doc() == max.doc()) && (end() > max.end())) { max = this; } } more = condition; return condition; } @Override public int doc() { return spans.doc(); } @Override public int start() { return spans.start(); } @Override public int end() { return spans.end(); } // TODO: Remove warning after API has been finalized @Override public Collection<byte[]> getPayload() throws IOException { return new ArrayList<byte[]>(spans.getPayload()); } // TODO: Remove warning after API has been finalized @Override public boolean isPayloadAvailable() { return spans.isPayloadAvailable(); } @Override public String toString() { return spans.toString() + "#" + index; } } public NearSpansUnordered(SpanNearQuery query, IndexReader reader) throws IOException { this.query = query; this.slop = query.getSlop(); SpanQuery[] clauses = query.getClauses(); queue = new CellQueue(clauses.length); subSpans = new Spans[clauses.length]; for (int i = 0; i < clauses.length; i++) { SpansCell cell = new SpansCell(clauses[i].getSpans(reader), i); ordered.add(cell); subSpans[i] = cell.spans; } } public Spans[] getSubSpans() { return subSpans; } @Override public boolean next() throws IOException { if (firstTime) { initList(true); listToQueue(); // initialize queue firstTime = false; } else if (more) { if (min().next()) { // trigger further scanning queue.updateTop(); // maintain queue } else { more = false; } } while (more) { boolean queueStale = false; if (min().doc() != max.doc()) { // maintain list queueToList(); queueStale = true; } // skip to doc w/ all clauses while (more && first.doc() < last.doc()) { more = first.skipTo(last.doc()); // skip first upto last firstToLast(); // and move it to the end queueStale = true; } if (!more) return false; // found doc w/ all clauses if (queueStale) { // maintain the queue listToQueue(); queueStale = false; } if (atMatch()) { return true; } more = min().next(); if (more) { queue.updateTop(); // maintain queue } } return false; // no more matches } @Override public boolean skipTo(int target) throws IOException { if (firstTime) { // initialize initList(false); for (SpansCell cell = first; more && cell!=null; cell=cell.next) { more = cell.skipTo(target); // skip all } if (more) { listToQueue(); } firstTime = false; } else { // normal case while (more && min().doc() < target) { // skip as needed if (min().skipTo(target)) { queue.updateTop(); } else { more = false; } } } return more && (atMatch() || next()); } private SpansCell min() { return queue.top(); } @Override public int doc() { return min().doc(); } @Override public int start() { return min().start(); } @Override public int end() { return max.end(); } // TODO: Remove warning after API has been finalized /** * WARNING: The List is not necessarily in order of the the positions * @return Collection of <code>byte[]</code> payloads * @throws IOException */ @Override public Collection<byte[]> getPayload() throws IOException { Set<byte[]> matchPayload = new HashSet<byte[]>(); for (SpansCell cell = first; cell != null; cell = cell.next) { if (cell.isPayloadAvailable()) { matchPayload.addAll(cell.getPayload()); } } return matchPayload; } // TODO: Remove warning after API has been finalized @Override public boolean isPayloadAvailable() { SpansCell pointer = min(); while (pointer != null) { if (pointer.isPayloadAvailable()) { return true; } pointer = pointer.next; } return false; } @Override public String toString() { return getClass().getName() + "("+query.toString()+")@"+ (firstTime?"START":(more?(doc()+":"+start()+"-"+end()):"END")); } private void initList(boolean next) throws IOException { for (int i = 0; more && i < ordered.size(); i++) { SpansCell cell = ordered.get(i); if (next) more = cell.next(); // move to first entry if (more) { addToList(cell); // add to list } } } private void addToList(SpansCell cell) throws IOException { if (last != null) { // add next to end of list last.next = cell; } else first = cell; last = cell; cell.next = null; } private void firstToLast() { last.next = first; // move first to end of list last = first; first = first.next; last.next = null; } private void queueToList() throws IOException { last = first = null; while (queue.top() != null) { addToList(queue.pop()); } } private void listToQueue() { queue.clear(); // rebuild queue for (SpansCell cell = first; cell != null; cell = cell.next) { queue.add(cell); // add to queue from list } } private boolean atMatch() { return (min().doc() == max.doc()) && ((max.end() - min().start() - totalLength) <= slop); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/NearSpansUnordered.java
Java
art
9,047
<!doctype html public "-//w3c//dtd html 4.0 transitional//en"> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <html> <head></head> <body> The calculus of spans. <p>A span is a <code>&lt;doc,startPosition,endPosition&gt;</code> tuple.</p> <p>The following span query operators are implemented: <ul> <li>A <a href="SpanTermQuery.html">SpanTermQuery</a> matches all spans containing a particular <a href="../../index/Term.html">Term</a>.</li> <li> A <a href="SpanNearQuery.html">SpanNearQuery</a> matches spans which occur near one another, and can be used to implement things like phrase search (when constructed from <a href="SpanTermQuery.html">SpanTermQueries</a>) and inter-phrase proximity (when constructed from other <a href="SpanNearQuery.html">SpanNearQueries</a>).</li> <li>A <a href="SpanOrQuery.html">SpanOrQuery</a> merges spans from a number of other <a href="SpanQuery.html">SpanQueries</a>.</li> <li>A <a href="SpanNotQuery.html">SpanNotQuery</a> removes spans matching one <a href="SpanQuery.html">SpanQuery</a> which overlap another. This can be used, e.g., to implement within-paragraph search.</li> <li>A <a href="SpanFirstQuery.html">SpanFirstQuery</a> matches spans matching <code>q</code> whose end position is less than <code>n</code>. This can be used to constrain matches to the first part of the document.</li> </ul> In all cases, output spans are minimally inclusive. In other words, a span formed by matching a span in x and y starts at the lesser of the two starts and ends at the greater of the two ends. </p> <p>For example, a span query which matches "John Kerry" within ten words of "George Bush" within the first 100 words of the document could be constructed with: <pre> SpanQuery john = new SpanTermQuery(new Term("content", "john")); SpanQuery kerry = new SpanTermQuery(new Term("content", "kerry")); SpanQuery george = new SpanTermQuery(new Term("content", "george")); SpanQuery bush = new SpanTermQuery(new Term("content", "bush")); SpanQuery johnKerry = new SpanNearQuery(new SpanQuery[] {john, kerry}, 0, true); SpanQuery georgeBush = new SpanNearQuery(new SpanQuery[] {george, bush}, 0, true); SpanQuery johnKerryNearGeorgeBush = new SpanNearQuery(new SpanQuery[] {johnKerry, georgeBush}, 10, false); SpanQuery johnKerryNearGeorgeBushAtStart = new SpanFirstQuery(johnKerryNearGeorgeBush, 100); </pre> <p>Span queries may be freely intermixed with other Lucene queries. So, for example, the above query can be restricted to documents which also use the word "iraq" with: <pre> Query query = new BooleanQuery(); query.add(johnKerryNearGeorgeBushAtStart, true, false); query.add(new TermQuery("content", "iraq"), true, false); </pre> </body> </html>
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/package.html
HTML
art
3,455
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Weight; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Similarity; /** * Public for extension only. */ public class SpanScorer extends Scorer { protected Spans spans; protected Weight weight; protected byte[] norms; protected float value; protected boolean more = true; protected int doc; protected float freq; protected SpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms) throws IOException { super(similarity); this.spans = spans; this.norms = norms; this.weight = weight; this.value = weight.getValue(); if (this.spans.next()) { doc = -1; } else { doc = NO_MORE_DOCS; more = false; } } @Override public int nextDoc() throws IOException { if (!setFreqCurrentDoc()) { doc = NO_MORE_DOCS; } return doc; } @Override public int advance(int target) throws IOException { if (!more) { return doc = NO_MORE_DOCS; } if (spans.doc() < target) { // setFreqCurrentDoc() leaves spans.doc() ahead more = spans.skipTo(target); } if (!setFreqCurrentDoc()) { doc = NO_MORE_DOCS; } return doc; } protected boolean setFreqCurrentDoc() throws IOException { if (!more) { return false; } doc = spans.doc(); freq = 0.0f; do { int matchLength = spans.end() - spans.start(); freq += getSimilarity().sloppyFreq(matchLength); more = spans.next(); } while (more && (doc == spans.doc())); return true; } @Override public int docID() { return doc; } @Override public float score() throws IOException { float raw = getSimilarity().tf(freq) * value; // raw score return norms == null? raw : raw * Similarity.decodeNorm(norms[doc]); // normalize } /** This method is no longer an official member of {@link Scorer}, * but it is needed by SpanWeight to build an explanation. */ protected Explanation explain(final int doc) throws IOException { Explanation tfExplanation = new Explanation(); int expDoc = advance(doc); float phraseFreq = (expDoc == doc) ? freq : 0.0f; tfExplanation.setValue(getSimilarity().tf(phraseFreq)); tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")"); return tfExplanation; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanScorer.java
Java
art
3,255
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Set; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.search.Weight; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Similarity; import org.apache.lucene.util.ToStringUtils; /** * <p>Wrapper to allow {@link SpanQuery} objects participate in composite * single-field SpanQueries by 'lying' about their search field. That is, * the masked SpanQuery will function as normal, * but {@link SpanQuery#getField()} simply hands back the value supplied * in this class's constructor.</p> * * <p>This can be used to support Queries like {@link SpanNearQuery} or * {@link SpanOrQuery} across different fields, which is not ordinarily * permitted.</p> * * <p>This can be useful for denormalized relational data: for example, when * indexing a document with conceptually many 'children': </p> * * <pre> * teacherid: 1 * studentfirstname: james * studentsurname: jones * * teacherid: 2 * studenfirstname: james * studentsurname: smith * studentfirstname: sally * studentsurname: jones * </pre> * * <p>a SpanNearQuery with a slop of 0 can be applied across two * {@link SpanTermQuery} objects as follows: * <pre> * SpanQuery q1 = new SpanTermQuery(new Term("studentfirstname", "james")); * SpanQuery q2 = new SpanTermQuery(new Term("studentsurname", "jones")); * SpanQuery q2m new FieldMaskingSpanQuery(q2, "studentfirstname"); * Query q = new SpanNearQuery(new SpanQuery[]{q1, q2m}, -1, false); * </pre> * to search for 'studentfirstname:james studentsurname:jones' and find * teacherid 1 without matching teacherid 2 (which has a 'james' in position 0 * and 'jones' in position 1). </p> * * <p>Note: as {@link #getField()} returns the masked field, scoring will be * done using the norms of the field name supplied. This may lead to unexpected * scoring behaviour.</p> */ public class FieldMaskingSpanQuery extends SpanQuery { private SpanQuery maskedQuery; private String field; public FieldMaskingSpanQuery(SpanQuery maskedQuery, String maskedField) { this.maskedQuery = maskedQuery; this.field = maskedField; } @Override public String getField() { return field; } public SpanQuery getMaskedQuery() { return maskedQuery; } // :NOTE: getBoost and setBoost are not proxied to the maskedQuery // ...this is done to be more consistent with things like SpanFirstQuery @Override public Spans getSpans(IndexReader reader) throws IOException { return maskedQuery.getSpans(reader); } @Override public void extractTerms(Set<Term> terms) { maskedQuery.extractTerms(terms); } @Override public Weight createWeight(Searcher searcher) throws IOException { return maskedQuery.createWeight(searcher); } @Override public Similarity getSimilarity(Searcher searcher) { return maskedQuery.getSimilarity(searcher); } @Override public Query rewrite(IndexReader reader) throws IOException { FieldMaskingSpanQuery clone = null; SpanQuery rewritten = (SpanQuery) maskedQuery.rewrite(reader); if (rewritten != maskedQuery) { clone = (FieldMaskingSpanQuery) this.clone(); clone.maskedQuery = rewritten; } if (clone != null) { return clone; } else { return this; } } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("mask("); buffer.append(maskedQuery.toString(field)); buffer.append(")"); buffer.append(ToStringUtils.boost(getBoost())); buffer.append(" as "); buffer.append(this.field); return buffer.toString(); } @Override public boolean equals(Object o) { if (!(o instanceof FieldMaskingSpanQuery)) return false; FieldMaskingSpanQuery other = (FieldMaskingSpanQuery) o; return (this.getField().equals(other.getField()) && (this.getBoost() == other.getBoost()) && this.getMaskedQuery().equals(other.getMaskedQuery())); } @Override public int hashCode() { return getMaskedQuery().hashCode() ^ getField().hashCode() ^ Float.floatToRawIntBits(getBoost()); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/FieldMaskingSpanQuery.java
Java
art
5,144
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Set; /** Removes matches which overlap with another SpanQuery. */ public class SpanNotQuery extends SpanQuery implements Cloneable { private SpanQuery include; private SpanQuery exclude; /** Construct a SpanNotQuery matching spans from <code>include</code> which * have no overlap with spans from <code>exclude</code>.*/ public SpanNotQuery(SpanQuery include, SpanQuery exclude) { this.include = include; this.exclude = exclude; if (!include.getField().equals(exclude.getField())) throw new IllegalArgumentException("Clauses must have same field."); } /** Return the SpanQuery whose matches are filtered. */ public SpanQuery getInclude() { return include; } /** Return the SpanQuery whose matches must not overlap those returned. */ public SpanQuery getExclude() { return exclude; } @Override public String getField() { return include.getField(); } @Override public void extractTerms(Set<Term> terms) { include.extractTerms(terms); } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("spanNot("); buffer.append(include.toString(field)); buffer.append(", "); buffer.append(exclude.toString(field)); buffer.append(")"); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public Object clone() { SpanNotQuery spanNotQuery = new SpanNotQuery((SpanQuery)include.clone(),(SpanQuery) exclude.clone()); spanNotQuery.setBoost(getBoost()); return spanNotQuery; } @Override public Spans getSpans(final IndexReader reader) throws IOException { return new Spans() { private Spans includeSpans = include.getSpans(reader); private boolean moreInclude = true; private Spans excludeSpans = exclude.getSpans(reader); private boolean moreExclude = excludeSpans.next(); @Override public boolean next() throws IOException { if (moreInclude) // move to next include moreInclude = includeSpans.next(); while (moreInclude && moreExclude) { if (includeSpans.doc() > excludeSpans.doc()) // skip exclude moreExclude = excludeSpans.skipTo(includeSpans.doc()); while (moreExclude // while exclude is before && includeSpans.doc() == excludeSpans.doc() && excludeSpans.end() <= includeSpans.start()) { moreExclude = excludeSpans.next(); // increment exclude } if (!moreExclude // if no intersection || includeSpans.doc() != excludeSpans.doc() || includeSpans.end() <= excludeSpans.start()) break; // we found a match moreInclude = includeSpans.next(); // intersected: keep scanning } return moreInclude; } @Override public boolean skipTo(int target) throws IOException { if (moreInclude) // skip include moreInclude = includeSpans.skipTo(target); if (!moreInclude) return false; if (moreExclude // skip exclude && includeSpans.doc() > excludeSpans.doc()) moreExclude = excludeSpans.skipTo(includeSpans.doc()); while (moreExclude // while exclude is before && includeSpans.doc() == excludeSpans.doc() && excludeSpans.end() <= includeSpans.start()) { moreExclude = excludeSpans.next(); // increment exclude } if (!moreExclude // if no intersection || includeSpans.doc() != excludeSpans.doc() || includeSpans.end() <= excludeSpans.start()) return true; // we found a match return next(); // scan to next match } @Override public int doc() { return includeSpans.doc(); } @Override public int start() { return includeSpans.start(); } @Override public int end() { return includeSpans.end(); } // TODO: Remove warning after API has been finalized @Override public Collection<byte[]> getPayload() throws IOException { ArrayList<byte[]> result = null; if (includeSpans.isPayloadAvailable()) { result = new ArrayList<byte[]>(includeSpans.getPayload()); } return result; } // TODO: Remove warning after API has been finalized @Override public boolean isPayloadAvailable() { return includeSpans.isPayloadAvailable(); } @Override public String toString() { return "spans(" + SpanNotQuery.this.toString() + ")"; } }; } @Override public Query rewrite(IndexReader reader) throws IOException { SpanNotQuery clone = null; SpanQuery rewrittenInclude = (SpanQuery) include.rewrite(reader); if (rewrittenInclude != include) { clone = (SpanNotQuery) this.clone(); clone.include = rewrittenInclude; } SpanQuery rewrittenExclude = (SpanQuery) exclude.rewrite(reader); if (rewrittenExclude != exclude) { if (clone == null) clone = (SpanNotQuery) this.clone(); clone.exclude = rewrittenExclude; } if (clone != null) { return clone; // some clauses rewrote } else { return this; // no clauses rewrote } } /** Returns true iff <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof SpanNotQuery)) return false; SpanNotQuery other = (SpanNotQuery)o; return this.include.equals(other.include) && this.exclude.equals(other.exclude) && this.getBoost() == other.getBoost(); } @Override public int hashCode() { int h = include.hashCode(); h = (h<<1) | (h >>> 31); // rotate left h ^= exclude.hashCode(); h = (h<<1) | (h >>> 31); // rotate left h ^= Float.floatToRawIntBits(getBoost()); return h; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanNotQuery.java
Java
art
7,406
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Collection; import java.util.Set; import java.util.ArrayList; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.util.ToStringUtils; /** Matches spans near the beginning of a field. */ public class SpanFirstQuery extends SpanQuery implements Cloneable { private SpanQuery match; private int end; /** Construct a SpanFirstQuery matching spans in <code>match</code> whose end * position is less than or equal to <code>end</code>. */ public SpanFirstQuery(SpanQuery match, int end) { this.match = match; this.end = end; } /** Return the SpanQuery whose matches are filtered. */ public SpanQuery getMatch() { return match; } /** Return the maximum end position permitted in a match. */ public int getEnd() { return end; } @Override public String getField() { return match.getField(); } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("spanFirst("); buffer.append(match.toString(field)); buffer.append(", "); buffer.append(end); buffer.append(")"); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public Object clone() { SpanFirstQuery spanFirstQuery = new SpanFirstQuery((SpanQuery) match.clone(), end); spanFirstQuery.setBoost(getBoost()); return spanFirstQuery; } @Override public void extractTerms(Set<Term> terms) { match.extractTerms(terms); } @Override public Spans getSpans(final IndexReader reader) throws IOException { return new Spans() { private Spans spans = match.getSpans(reader); @Override public boolean next() throws IOException { while (spans.next()) { // scan to next match if (end() <= end) return true; } return false; } @Override public boolean skipTo(int target) throws IOException { if (!spans.skipTo(target)) return false; return spans.end() <= end || next(); } @Override public int doc() { return spans.doc(); } @Override public int start() { return spans.start(); } @Override public int end() { return spans.end(); } // TODO: Remove warning after API has been finalized @Override public Collection<byte[]> getPayload() throws IOException { ArrayList<byte[]> result = null; if (spans.isPayloadAvailable()) { result = new ArrayList<byte[]>(spans.getPayload()); } return result;//TODO: any way to avoid the new construction? } // TODO: Remove warning after API has been finalized @Override public boolean isPayloadAvailable() { return spans.isPayloadAvailable(); } @Override public String toString() { return "spans(" + SpanFirstQuery.this.toString() + ")"; } }; } @Override public Query rewrite(IndexReader reader) throws IOException { SpanFirstQuery clone = null; SpanQuery rewritten = (SpanQuery) match.rewrite(reader); if (rewritten != match) { clone = (SpanFirstQuery) this.clone(); clone.match = rewritten; } if (clone != null) { return clone; // some clauses rewrote } else { return this; // no clauses rewrote } } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof SpanFirstQuery)) return false; SpanFirstQuery other = (SpanFirstQuery)o; return this.end == other.end && this.match.equals(other.match) && this.getBoost() == other.getBoost(); } @Override public int hashCode() { int h = match.hashCode(); h ^= (h << 8) | (h >>> 25); // reversible h ^= Float.floatToRawIntBits(getBoost()) ^ end; return h; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanFirstQuery.java
Java
art
4,909
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.Set; /** Matches spans containing a term. */ public class SpanTermQuery extends SpanQuery { protected Term term; /** Construct a SpanTermQuery matching the named term's spans. */ public SpanTermQuery(Term term) { this.term = term; } /** Return the term whose spans are matched. */ public Term getTerm() { return term; } @Override public String getField() { return term.field(); } @Override public void extractTerms(Set<Term> terms) { terms.add(term); } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); if (term.field().equals(field)) buffer.append(term.text()); else buffer.append(term.toString()); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((term == null) ? 0 : term.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; SpanTermQuery other = (SpanTermQuery) obj; if (term == null) { if (other.term != null) return false; } else if (!term.equals(other.term)) return false; return true; } @Override public Spans getSpans(final IndexReader reader) throws IOException { return new TermSpans(reader.termPositions(term), term); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanTermQuery.java
Java
art
2,551
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.List; import java.util.Collection; import java.util.ArrayList; import java.util.Iterator; import java.util.Set; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.PriorityQueue; import org.apache.lucene.util.ToStringUtils; import org.apache.lucene.search.Query; /** Matches the union of its clauses.*/ public class SpanOrQuery extends SpanQuery implements Cloneable { private List<SpanQuery> clauses; private String field; /** Construct a SpanOrQuery merging the provided clauses. */ public SpanOrQuery(SpanQuery... clauses) { // copy clauses array into an ArrayList this.clauses = new ArrayList<SpanQuery>(clauses.length); for (int i = 0; i < clauses.length; i++) { SpanQuery clause = clauses[i]; if (i == 0) { // check field field = clause.getField(); } else if (!clause.getField().equals(field)) { throw new IllegalArgumentException("Clauses must have same field."); } this.clauses.add(clause); } } /** Return the clauses whose spans are matched. */ public SpanQuery[] getClauses() { return clauses.toArray(new SpanQuery[clauses.size()]); } @Override public String getField() { return field; } @Override public void extractTerms(Set<Term> terms) { for(final SpanQuery clause: clauses) { clause.extractTerms(terms); } } @Override public Object clone() { int sz = clauses.size(); SpanQuery[] newClauses = new SpanQuery[sz]; for (int i = 0; i < sz; i++) { newClauses[i] = (SpanQuery) clauses.get(i).clone(); } SpanOrQuery soq = new SpanOrQuery(newClauses); soq.setBoost(getBoost()); return soq; } @Override public Query rewrite(IndexReader reader) throws IOException { SpanOrQuery clone = null; for (int i = 0 ; i < clauses.size(); i++) { SpanQuery c = clauses.get(i); SpanQuery query = (SpanQuery) c.rewrite(reader); if (query != c) { // clause rewrote: must clone if (clone == null) clone = (SpanOrQuery) this.clone(); clone.clauses.set(i,query); } } if (clone != null) { return clone; // some clauses rewrote } else { return this; // no clauses rewrote } } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("spanOr(["); Iterator<SpanQuery> i = clauses.iterator(); while (i.hasNext()) { SpanQuery clause = i.next(); buffer.append(clause.toString(field)); if (i.hasNext()) { buffer.append(", "); } } buffer.append("])"); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; final SpanOrQuery that = (SpanOrQuery) o; if (!clauses.equals(that.clauses)) return false; if (!clauses.isEmpty() && !field.equals(that.field)) return false; return getBoost() == that.getBoost(); } @Override public int hashCode() { int h = clauses.hashCode(); h ^= (h << 10) | (h >>> 23); h ^= Float.floatToRawIntBits(getBoost()); return h; } private class SpanQueue extends PriorityQueue<Spans> { public SpanQueue(int size) { initialize(size); } @Override protected final boolean lessThan(Spans spans1, Spans spans2) { if (spans1.doc() == spans2.doc()) { if (spans1.start() == spans2.start()) { return spans1.end() < spans2.end(); } else { return spans1.start() < spans2.start(); } } else { return spans1.doc() < spans2.doc(); } } } @Override public Spans getSpans(final IndexReader reader) throws IOException { if (clauses.size() == 1) // optimize 1-clause case return (clauses.get(0)).getSpans(reader); return new Spans() { private SpanQueue queue = null; private boolean initSpanQueue(int target) throws IOException { queue = new SpanQueue(clauses.size()); Iterator<SpanQuery> i = clauses.iterator(); while (i.hasNext()) { Spans spans = i.next().getSpans(reader); if ( ((target == -1) && spans.next()) || ((target != -1) && spans.skipTo(target))) { queue.add(spans); } } return queue.size() != 0; } @Override public boolean next() throws IOException { if (queue == null) { return initSpanQueue(-1); } if (queue.size() == 0) { // all done return false; } if (top().next()) { // move to next queue.updateTop(); return true; } queue.pop(); // exhausted a clause return queue.size() != 0; } private Spans top() { return queue.top(); } @Override public boolean skipTo(int target) throws IOException { if (queue == null) { return initSpanQueue(target); } boolean skipCalled = false; while (queue.size() != 0 && top().doc() < target) { if (top().skipTo(target)) { queue.updateTop(); } else { queue.pop(); } skipCalled = true; } if (skipCalled) { return queue.size() != 0; } return next(); } @Override public int doc() { return top().doc(); } @Override public int start() { return top().start(); } @Override public int end() { return top().end(); } @Override public Collection<byte[]> getPayload() throws IOException { ArrayList<byte[]> result = null; Spans theTop = top(); if (theTop != null && theTop.isPayloadAvailable()) { result = new ArrayList<byte[]>(theTop.getPayload()); } return result; } @Override public boolean isPayloadAvailable() { Spans top = top(); return top != null && top.isPayloadAvailable(); } @Override public String toString() { return "spans("+SpanOrQuery.this+")@"+ ((queue == null)?"START" :(queue.size()>0?(doc()+":"+start()+"-"+end()):"END")); } }; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanOrQuery.java
Java
art
7,473
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.*; import org.apache.lucene.search.Explanation.IDFExplanation; import java.io.IOException; import java.util.HashSet; import java.util.Set; /** * Expert-only. Public for use by other weight implementations */ public class SpanWeight extends Weight { protected Similarity similarity; protected float value; protected float idf; protected float queryNorm; protected float queryWeight; protected Set<Term> terms; protected SpanQuery query; private IDFExplanation idfExp; public SpanWeight(SpanQuery query, Searcher searcher) throws IOException { this.similarity = query.getSimilarity(searcher); this.query = query; terms=new HashSet<Term>(); query.extractTerms(terms); idfExp = similarity.idfExplain(terms, searcher); idf = idfExp.getIdf(); } @Override public Query getQuery() { return query; } @Override public float getValue() { return value; } @Override public float sumOfSquaredWeights() throws IOException { queryWeight = idf * query.getBoost(); // compute query weight return queryWeight * queryWeight; // square it } @Override public void normalize(float queryNorm) { this.queryNorm = queryNorm; queryWeight *= queryNorm; // normalize query weight value = queryWeight * idf; // idf for document } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new SpanScorer(query.getSpans(reader), this, similarity, reader .norms(query.getField())); } @Override public Explanation explain(IndexReader reader, int doc) throws IOException { ComplexExplanation result = new ComplexExplanation(); result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); String field = ((SpanQuery)getQuery()).getField(); Explanation idfExpl = new Explanation(idf, "idf(" + field + ": " + idfExp.explain() + ")"); // explain query weight Explanation queryExpl = new Explanation(); queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:"); Explanation boostExpl = new Explanation(getQuery().getBoost(), "boost"); if (getQuery().getBoost() != 1.0f) queryExpl.addDetail(boostExpl); queryExpl.addDetail(idfExpl); Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm"); queryExpl.addDetail(queryNormExpl); queryExpl.setValue(boostExpl.getValue() * idfExpl.getValue() * queryNormExpl.getValue()); result.addDetail(queryExpl); // explain field weight ComplexExplanation fieldExpl = new ComplexExplanation(); fieldExpl.setDescription("fieldWeight("+field+":"+query.toString(field)+ " in "+doc+"), product of:"); Explanation tfExpl = ((SpanScorer)scorer(reader, true, false)).explain(doc); fieldExpl.addDetail(tfExpl); fieldExpl.addDetail(idfExpl); Explanation fieldNormExpl = new Explanation(); byte[] fieldNorms = reader.norms(field); float fieldNorm = fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f; fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")"); fieldExpl.addDetail(fieldNormExpl); fieldExpl.setMatch(Boolean.valueOf(tfExpl.isMatch())); fieldExpl.setValue(tfExpl.getValue() * idfExpl.getValue() * fieldNormExpl.getValue()); result.addDetail(fieldExpl); result.setMatch(fieldExpl.getMatch()); // combine them result.setValue(queryExpl.getValue() * fieldExpl.getValue()); if (queryExpl.getValue() == 1.0f) return fieldExpl; return result; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanWeight.java
Java
art
4,755
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.List; import java.util.ArrayList; import java.util.Iterator; import java.util.Set; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.Query; import org.apache.lucene.util.ToStringUtils; /** Matches spans which are near one another. One can specify <i>slop</i>, the * maximum number of intervening unmatched positions, as well as whether * matches are required to be in-order. */ public class SpanNearQuery extends SpanQuery implements Cloneable { protected List<SpanQuery> clauses; protected int slop; protected boolean inOrder; protected String field; private boolean collectPayloads; /** Construct a SpanNearQuery. Matches spans matching a span from each * clause, with up to <code>slop</code> total unmatched positions between * them. * When <code>inOrder</code> is true, the spans from each clause * must be * ordered as in <code>clauses</code>. */ public SpanNearQuery(SpanQuery[] clauses, int slop, boolean inOrder) { this(clauses, slop, inOrder, true); } public SpanNearQuery(SpanQuery[] clauses, int slop, boolean inOrder, boolean collectPayloads) { // copy clauses array into an ArrayList this.clauses = new ArrayList<SpanQuery>(clauses.length); for (int i = 0; i < clauses.length; i++) { SpanQuery clause = clauses[i]; if (i == 0) { // check field field = clause.getField(); } else if (!clause.getField().equals(field)) { throw new IllegalArgumentException("Clauses must have same field."); } this.clauses.add(clause); } this.collectPayloads = collectPayloads; this.slop = slop; this.inOrder = inOrder; } /** Return the clauses whose spans are matched. */ public SpanQuery[] getClauses() { return clauses.toArray(new SpanQuery[clauses.size()]); } /** Return the maximum number of intervening unmatched positions permitted.*/ public int getSlop() { return slop; } /** Return true if matches are required to be in-order.*/ public boolean isInOrder() { return inOrder; } @Override public String getField() { return field; } @Override public void extractTerms(Set<Term> terms) { for (final SpanQuery clause : clauses) { clause.extractTerms(terms); } } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("spanNear(["); Iterator<SpanQuery> i = clauses.iterator(); while (i.hasNext()) { SpanQuery clause = i.next(); buffer.append(clause.toString(field)); if (i.hasNext()) { buffer.append(", "); } } buffer.append("], "); buffer.append(slop); buffer.append(", "); buffer.append(inOrder); buffer.append(")"); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public Spans getSpans(final IndexReader reader) throws IOException { if (clauses.size() == 0) // optimize 0-clause case return new SpanOrQuery(getClauses()).getSpans(reader); if (clauses.size() == 1) // optimize 1-clause case return clauses.get(0).getSpans(reader); return inOrder ? (Spans) new NearSpansOrdered(this, reader, collectPayloads) : (Spans) new NearSpansUnordered(this, reader); } @Override public Query rewrite(IndexReader reader) throws IOException { SpanNearQuery clone = null; for (int i = 0 ; i < clauses.size(); i++) { SpanQuery c = clauses.get(i); SpanQuery query = (SpanQuery) c.rewrite(reader); if (query != c) { // clause rewrote: must clone if (clone == null) clone = (SpanNearQuery) this.clone(); clone.clauses.set(i,query); } } if (clone != null) { return clone; // some clauses rewrote } else { return this; // no clauses rewrote } } @Override public Object clone() { int sz = clauses.size(); SpanQuery[] newClauses = new SpanQuery[sz]; for (int i = 0; i < sz; i++) { newClauses[i] = (SpanQuery) clauses.get(i).clone(); } SpanNearQuery spanNearQuery = new SpanNearQuery(newClauses, slop, inOrder); spanNearQuery.setBoost(getBoost()); return spanNearQuery; } /** Returns true iff <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof SpanNearQuery)) return false; final SpanNearQuery spanNearQuery = (SpanNearQuery) o; if (inOrder != spanNearQuery.inOrder) return false; if (slop != spanNearQuery.slop) return false; if (!clauses.equals(spanNearQuery.clauses)) return false; return getBoost() == spanNearQuery.getBoost(); } @Override public int hashCode() { int result; result = clauses.hashCode(); // Mix bits before folding in things like boost, since it could cancel the // last element of clauses. This particular mix also serves to // differentiate SpanNearQuery hashcodes from others. result ^= (result << 14) | (result >>> 19); // reversible result += Float.floatToRawIntBits(getBoost()); result += slop; result ^= (inOrder ? 0x99AFD3BD : 0); return result; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/SpanNearQuery.java
Java
art
6,265
package org.apache.lucene.search.spans; /** * Copyright 2005 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.Term; import org.apache.lucene.index.TermPositions; import java.io.IOException; import java.util.Collections; import java.util.Collection; /** * Expert: * Public for extension only */ public class TermSpans extends Spans { protected TermPositions positions; protected Term term; protected int doc; protected int freq; protected int count; protected int position; public TermSpans(TermPositions positions, Term term) throws IOException { this.positions = positions; this.term = term; doc = -1; } @Override public boolean next() throws IOException { if (count == freq) { if (!positions.next()) { doc = Integer.MAX_VALUE; return false; } doc = positions.doc(); freq = positions.freq(); count = 0; } position = positions.nextPosition(); count++; return true; } @Override public boolean skipTo(int target) throws IOException { if (!positions.skipTo(target)) { doc = Integer.MAX_VALUE; return false; } doc = positions.doc(); freq = positions.freq(); count = 0; position = positions.nextPosition(); count++; return true; } @Override public int doc() { return doc; } @Override public int start() { return position; } @Override public int end() { return position + 1; } // TODO: Remove warning after API has been finalized @Override public Collection<byte[]> getPayload() throws IOException { byte [] bytes = new byte[positions.getPayloadLength()]; bytes = positions.getPayload(bytes, 0); return Collections.singletonList(bytes); } // TODO: Remove warning after API has been finalized @Override public boolean isPayloadAvailable() { return positions.isPayloadAvailable(); } @Override public String toString() { return "spans(" + term.toString() + ")@" + (doc == -1 ? "START" : (doc == Integer.MAX_VALUE) ? "END" : doc + "-" + position); } public TermPositions getPositions() { return positions; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/TermSpans.java
Java
art
2,742
package org.apache.lucene.search.spans; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Collection; import java.util.Set; /** A Spans that is formed from the ordered subspans of a SpanNearQuery * where the subspans do not overlap and have a maximum slop between them. * <p> * The formed spans only contains minimum slop matches.<br> * The matching slop is computed from the distance(s) between * the non overlapping matching Spans.<br> * Successive matches are always formed from the successive Spans * of the SpanNearQuery. * <p> * The formed spans may contain overlaps when the slop is at least 1. * For example, when querying using * <pre>t1 t2 t3</pre> * with slop at least 1, the fragment: * <pre>t1 t2 t1 t3 t2 t3</pre> * matches twice: * <pre>t1 t2 .. t3 </pre> * <pre> t1 .. t2 t3</pre> * * * Expert: * Only public for subclassing. Most implementations should not need this class */ public class NearSpansOrdered extends Spans { private final int allowedSlop; private boolean firstTime = true; private boolean more = false; /** The spans in the same order as the SpanNearQuery */ private final Spans[] subSpans; /** Indicates that all subSpans have same doc() */ private boolean inSameDoc = false; private int matchDoc = -1; private int matchStart = -1; private int matchEnd = -1; private List<byte[]> matchPayload; private final Spans[] subSpansByDoc; private final Comparator<Spans> spanDocComparator = new Comparator<Spans>() { public int compare(Spans o1, Spans o2) { return o1.doc() - o2.doc(); } }; private SpanNearQuery query; private boolean collectPayloads = true; public NearSpansOrdered(SpanNearQuery spanNearQuery, IndexReader reader) throws IOException { this(spanNearQuery, reader, true); } public NearSpansOrdered(SpanNearQuery spanNearQuery, IndexReader reader, boolean collectPayloads) throws IOException { if (spanNearQuery.getClauses().length < 2) { throw new IllegalArgumentException("Less than 2 clauses: " + spanNearQuery); } this.collectPayloads = collectPayloads; allowedSlop = spanNearQuery.getSlop(); SpanQuery[] clauses = spanNearQuery.getClauses(); subSpans = new Spans[clauses.length]; matchPayload = new LinkedList<byte[]>(); subSpansByDoc = new Spans[clauses.length]; for (int i = 0; i < clauses.length; i++) { subSpans[i] = clauses[i].getSpans(reader); subSpansByDoc[i] = subSpans[i]; // used in toSameDoc() } query = spanNearQuery; // kept for toString() only. } // inherit javadocs @Override public int doc() { return matchDoc; } // inherit javadocs @Override public int start() { return matchStart; } // inherit javadocs @Override public int end() { return matchEnd; } public Spans[] getSubSpans() { return subSpans; } // TODO: Remove warning after API has been finalized // TODO: Would be nice to be able to lazy load payloads @Override public Collection<byte[]> getPayload() throws IOException { return matchPayload; } // TODO: Remove warning after API has been finalized @Override public boolean isPayloadAvailable() { return matchPayload.isEmpty() == false; } // inherit javadocs @Override public boolean next() throws IOException { if (firstTime) { firstTime = false; for (int i = 0; i < subSpans.length; i++) { if (! subSpans[i].next()) { more = false; return false; } } more = true; } if(collectPayloads) { matchPayload.clear(); } return advanceAfterOrdered(); } // inherit javadocs @Override public boolean skipTo(int target) throws IOException { if (firstTime) { firstTime = false; for (int i = 0; i < subSpans.length; i++) { if (! subSpans[i].skipTo(target)) { more = false; return false; } } more = true; } else if (more && (subSpans[0].doc() < target)) { if (subSpans[0].skipTo(target)) { inSameDoc = false; } else { more = false; return false; } } if(collectPayloads) { matchPayload.clear(); } return advanceAfterOrdered(); } /** Advances the subSpans to just after an ordered match with a minimum slop * that is smaller than the slop allowed by the SpanNearQuery. * @return true iff there is such a match. */ private boolean advanceAfterOrdered() throws IOException { while (more && (inSameDoc || toSameDoc())) { if (stretchToOrder() && shrinkToAfterShortestMatch()) { return true; } } return false; // no more matches } /** Advance the subSpans to the same document */ private boolean toSameDoc() throws IOException { Arrays.sort(subSpansByDoc, spanDocComparator); int firstIndex = 0; int maxDoc = subSpansByDoc[subSpansByDoc.length - 1].doc(); while (subSpansByDoc[firstIndex].doc() != maxDoc) { if (! subSpansByDoc[firstIndex].skipTo(maxDoc)) { more = false; inSameDoc = false; return false; } maxDoc = subSpansByDoc[firstIndex].doc(); if (++firstIndex == subSpansByDoc.length) { firstIndex = 0; } } for (int i = 0; i < subSpansByDoc.length; i++) { assert (subSpansByDoc[i].doc() == maxDoc) : " NearSpansOrdered.toSameDoc() spans " + subSpansByDoc[0] + "\n at doc " + subSpansByDoc[i].doc() + ", but should be at " + maxDoc; } inSameDoc = true; return true; } /** Check whether two Spans in the same document are ordered. * @param spans1 * @param spans2 * @return true iff spans1 starts before spans2 * or the spans start at the same position, * and spans1 ends before spans2. */ static final boolean docSpansOrdered(Spans spans1, Spans spans2) { assert spans1.doc() == spans2.doc() : "doc1 " + spans1.doc() + " != doc2 " + spans2.doc(); int start1 = spans1.start(); int start2 = spans2.start(); /* Do not call docSpansOrdered(int,int,int,int) to avoid invoking .end() : */ return (start1 == start2) ? (spans1.end() < spans2.end()) : (start1 < start2); } /** Like {@link #docSpansOrdered(Spans,Spans)}, but use the spans * starts and ends as parameters. */ private static final boolean docSpansOrdered(int start1, int end1, int start2, int end2) { return (start1 == start2) ? (end1 < end2) : (start1 < start2); } /** Order the subSpans within the same document by advancing all later spans * after the previous one. */ private boolean stretchToOrder() throws IOException { matchDoc = subSpans[0].doc(); for (int i = 1; inSameDoc && (i < subSpans.length); i++) { while (! docSpansOrdered(subSpans[i-1], subSpans[i])) { if (! subSpans[i].next()) { inSameDoc = false; more = false; break; } else if (matchDoc != subSpans[i].doc()) { inSameDoc = false; break; } } } return inSameDoc; } /** The subSpans are ordered in the same doc, so there is a possible match. * Compute the slop while making the match as short as possible by advancing * all subSpans except the last one in reverse order. */ private boolean shrinkToAfterShortestMatch() throws IOException { matchStart = subSpans[subSpans.length - 1].start(); matchEnd = subSpans[subSpans.length - 1].end(); Set<byte[]> possibleMatchPayloads = new HashSet<byte[]>(); if (subSpans[subSpans.length - 1].isPayloadAvailable()) { possibleMatchPayloads.addAll(subSpans[subSpans.length - 1].getPayload()); } Collection<byte[]> possiblePayload = null; int matchSlop = 0; int lastStart = matchStart; int lastEnd = matchEnd; for (int i = subSpans.length - 2; i >= 0; i--) { Spans prevSpans = subSpans[i]; if (collectPayloads && prevSpans.isPayloadAvailable()) { Collection<byte[]> payload = prevSpans.getPayload(); possiblePayload = new ArrayList<byte[]>(payload.size()); possiblePayload.addAll(payload); } int prevStart = prevSpans.start(); int prevEnd = prevSpans.end(); while (true) { // Advance prevSpans until after (lastStart, lastEnd) if (! prevSpans.next()) { inSameDoc = false; more = false; break; // Check remaining subSpans for final match. } else if (matchDoc != prevSpans.doc()) { inSameDoc = false; // The last subSpans is not advanced here. break; // Check remaining subSpans for last match in this document. } else { int ppStart = prevSpans.start(); int ppEnd = prevSpans.end(); // Cannot avoid invoking .end() if (! docSpansOrdered(ppStart, ppEnd, lastStart, lastEnd)) { break; // Check remaining subSpans. } else { // prevSpans still before (lastStart, lastEnd) prevStart = ppStart; prevEnd = ppEnd; if (collectPayloads && prevSpans.isPayloadAvailable()) { Collection<byte[]> payload = prevSpans.getPayload(); possiblePayload = new ArrayList<byte[]>(payload.size()); possiblePayload.addAll(payload); } } } } if (collectPayloads && possiblePayload != null) { possibleMatchPayloads.addAll(possiblePayload); } assert prevStart <= matchStart; if (matchStart > prevEnd) { // Only non overlapping spans add to slop. matchSlop += (matchStart - prevEnd); } /* Do not break on (matchSlop > allowedSlop) here to make sure * that subSpans[0] is advanced after the match, if any. */ matchStart = prevStart; lastStart = prevStart; lastEnd = prevEnd; } boolean match = matchSlop <= allowedSlop; if(collectPayloads && match && possibleMatchPayloads.size() > 0) { matchPayload.addAll(possibleMatchPayloads); } return match; // ordered and allowed slop } @Override public String toString() { return getClass().getName() + "("+query.toString()+")@"+ (firstTime?"START":(more?(doc()+":"+start()+"-"+end()):"END")); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/spans/NearSpansOrdered.java
Java
art
11,399
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.List; import org.apache.lucene.index.IndexReader; /* Description from Doug Cutting (excerpted from * LUCENE-1483): * * BooleanScorer uses a ~16k array to score windows of * docs. So it scores docs 0-16k first, then docs 16-32k, * etc. For each window it iterates through all query terms * and accumulates a score in table[doc%16k]. It also stores * in the table a bitmask representing which terms * contributed to the score. Non-zero scores are chained in * a linked list. At the end of scoring each window it then * iterates through the linked list and, if the bitmask * matches the boolean constraints, collects a hit. For * boolean queries with lots of frequent terms this can be * much faster, since it does not need to update a priority * queue for each posting, instead performing constant-time * operations per posting. The only downside is that it * results in hits being delivered out-of-order within the * window, which means it cannot be nested within other * scorers. But it works well as a top-level scorer. * * The new BooleanScorer2 implementation instead works by * merging priority queues of postings, albeit with some * clever tricks. For example, a pure conjunction (all terms * required) does not require a priority queue. Instead it * sorts the posting streams at the start, then repeatedly * skips the first to to the last. If the first ever equals * the last, then there's a hit. When some terms are * required and some terms are optional, the conjunction can * be evaluated first, then the optional terms can all skip * to the match and be added to the score. Thus the * conjunction can reduce the number of priority queue * updates for the optional terms. */ final class BooleanScorer extends Scorer { private static final class BooleanScorerCollector extends Collector { private BucketTable bucketTable; private int mask; private Scorer scorer; public BooleanScorerCollector(int mask, BucketTable bucketTable) { this.mask = mask; this.bucketTable = bucketTable; } @Override public final void collect(final int doc) throws IOException { final BucketTable table = bucketTable; final int i = doc & BucketTable.MASK; Bucket bucket = table.buckets[i]; if (bucket == null) table.buckets[i] = bucket = new Bucket(); if (bucket.doc != doc) { // invalid bucket bucket.doc = doc; // set doc bucket.score = scorer.score(); // initialize score bucket.bits = mask; // initialize mask bucket.coord = 1; // initialize coord bucket.next = table.first; // push onto valid list table.first = bucket; } else { // valid bucket bucket.score += scorer.score(); // increment score bucket.bits |= mask; // add bits in mask bucket.coord++; // increment coord } } @Override public void setNextReader(IndexReader reader, int docBase) { // not needed by this implementation } @Override public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } @Override public boolean acceptsDocsOutOfOrder() { return true; } } // An internal class which is used in score(Collector, int) for setting the // current score. This is required since Collector exposes a setScorer method // and implementations that need the score will call scorer.score(). // Therefore the only methods that are implemented are score() and doc(). private static final class BucketScorer extends Scorer { float score; int doc = NO_MORE_DOCS; public BucketScorer() { super(null); } @Override public int advance(int target) throws IOException { return NO_MORE_DOCS; } @Override public int docID() { return doc; } @Override public int nextDoc() throws IOException { return NO_MORE_DOCS; } @Override public float score() throws IOException { return score; } } static final class Bucket { int doc = -1; // tells if bucket is valid float score; // incremental score int bits; // used for bool constraints int coord; // count of terms in score Bucket next; // next valid bucket } /** A simple hash table of document scores within a range. */ static final class BucketTable { public static final int SIZE = 1 << 11; public static final int MASK = SIZE - 1; final Bucket[] buckets = new Bucket[SIZE]; Bucket first = null; // head of valid list public BucketTable() {} public Collector newCollector(int mask) { return new BooleanScorerCollector(mask, this); } public final int size() { return SIZE; } } static final class SubScorer { public Scorer scorer; public boolean required = false; public boolean prohibited = false; public Collector collector; public SubScorer next; public SubScorer(Scorer scorer, boolean required, boolean prohibited, Collector collector, SubScorer next) throws IOException { this.scorer = scorer; this.required = required; this.prohibited = prohibited; this.collector = collector; this.next = next; } } private SubScorer scorers = null; private BucketTable bucketTable = new BucketTable(); private int maxCoord = 1; private final float[] coordFactors; private int requiredMask = 0; private int prohibitedMask = 0; private int nextMask = 1; private final int minNrShouldMatch; private int end; private Bucket current; private int doc = -1; BooleanScorer(Similarity similarity, int minNrShouldMatch, List<Scorer> optionalScorers, List<Scorer> prohibitedScorers) throws IOException { super(similarity); this.minNrShouldMatch = minNrShouldMatch; if (optionalScorers != null && optionalScorers.size() > 0) { for (Scorer scorer : optionalScorers) { maxCoord++; if (scorer.nextDoc() != NO_MORE_DOCS) { scorers = new SubScorer(scorer, false, false, bucketTable.newCollector(0), scorers); } } } if (prohibitedScorers != null && prohibitedScorers.size() > 0) { for (Scorer scorer : prohibitedScorers) { int mask = nextMask; nextMask = nextMask << 1; prohibitedMask |= mask; // update prohibited mask if (scorer.nextDoc() != NO_MORE_DOCS) { scorers = new SubScorer(scorer, false, true, bucketTable.newCollector(mask), scorers); } } } coordFactors = new float[maxCoord]; Similarity sim = getSimilarity(); for (int i = 0; i < maxCoord; i++) { coordFactors[i] = sim.coord(i, maxCoord - 1); } } // firstDocID is ignored since nextDoc() initializes 'current' @Override protected boolean score(Collector collector, int max, int firstDocID) throws IOException { boolean more; Bucket tmp; BucketScorer bs = new BucketScorer(); // The internal loop will set the score and doc before calling collect. collector.setScorer(bs); do { bucketTable.first = null; while (current != null) { // more queued // check prohibited & required if ((current.bits & prohibitedMask) == 0 && (current.bits & requiredMask) == requiredMask) { if (current.doc >= max){ tmp = current; current = current.next; tmp.next = bucketTable.first; bucketTable.first = tmp; continue; } if (current.coord >= minNrShouldMatch) { bs.score = current.score * coordFactors[current.coord]; bs.doc = current.doc; collector.collect(current.doc); } } current = current.next; // pop the queue } if (bucketTable.first != null){ current = bucketTable.first; bucketTable.first = current.next; return true; } // refill the queue more = false; end += BucketTable.SIZE; for (SubScorer sub = scorers; sub != null; sub = sub.next) { int subScorerDocID = sub.scorer.docID(); if (subScorerDocID != NO_MORE_DOCS) { more |= sub.scorer.score(sub.collector, end, subScorerDocID); } } current = bucketTable.first; } while (current != null || more); return false; } @Override public int advance(int target) throws IOException { throw new UnsupportedOperationException(); } @Override public int docID() { return doc; } @Override public int nextDoc() throws IOException { boolean more; do { while (bucketTable.first != null) { // more queued current = bucketTable.first; bucketTable.first = current.next; // pop the queue // check prohibited & required, and minNrShouldMatch if ((current.bits & prohibitedMask) == 0 && (current.bits & requiredMask) == requiredMask && current.coord >= minNrShouldMatch) { return doc = current.doc; } } // refill the queue more = false; end += BucketTable.SIZE; for (SubScorer sub = scorers; sub != null; sub = sub.next) { Scorer scorer = sub.scorer; sub.collector.setScorer(scorer); int doc = scorer.docID(); while (doc < end) { sub.collector.collect(doc); doc = scorer.nextDoc(); } more |= (doc != NO_MORE_DOCS); } } while (bucketTable.first != null || more); return doc = NO_MORE_DOCS; } @Override public float score() { return current.score * coordFactors[current.coord]; } @Override public void score(Collector collector) throws IOException { score(collector, Integer.MAX_VALUE, nextDoc()); } @Override public String toString() { StringBuilder buffer = new StringBuilder(); buffer.append("boolean("); for (SubScorer sub = scorers; sub != null; sub = sub.next) { buffer.append(sub.scorer.toString()); buffer.append(" "); } buffer.append(")"); return buffer.toString(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/BooleanScorer.java
Java
art
11,394
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.*; final class ExactPhraseScorer extends PhraseScorer { ExactPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms) { super(weight, tps, offsets, similarity, norms); } @Override protected final float phraseFreq() throws IOException { // sort list with pq pq.clear(); for (PhrasePositions pp = first; pp != null; pp = pp.next) { pp.firstPosition(); pq.add(pp); // build pq from list } pqToList(); // rebuild list from pq // for counting how many times the exact phrase is found in current document, // just count how many times all PhrasePosition's have exactly the same position. int freq = 0; do { // find position w/ all terms while (first.position < last.position) { // scan forward in first do { if (!first.nextPosition()) return freq; } while (first.position < last.position); firstToLast(); } freq++; // all equal: a match } while (last.nextPosition()); return freq; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/ExactPhraseScorer.java
Java
art
1,983
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.index.TermDocs; // for javadocs /** * A {@link Filter} that only accepts documents whose single * term value in the specified field is contained in the * provided set of allowed terms. * * <p/> * * This is the same functionality as TermsFilter (from * contrib/queries), except this filter requires that the * field contains only a single term for all documents. * Because of drastically different implementations, they * also have different performance characteristics, as * described below. * * <p/> * * The first invocation of this filter on a given field will * be slower, since a {@link FieldCache.StringIndex} must be * created. Subsequent invocations using the same field * will re-use this cache. However, as with all * functionality based on {@link FieldCache}, persistent RAM * is consumed to hold the cache, and is not freed until the * {@link IndexReader} is closed. In contrast, TermsFilter * has no persistent RAM consumption. * * * <p/> * * With each search, this filter translates the specified * set of Terms into a private {@link OpenBitSet} keyed by * term number per unique {@link IndexReader} (normally one * reader per segment). Then, during matching, the term * number for each docID is retrieved from the cache and * then checked for inclusion using the {@link OpenBitSet}. * Since all testing is done using RAM resident data * structures, performance should be very fast, most likely * fast enough to not require further caching of the * DocIdSet for each possible combination of terms. * However, because docIDs are simply scanned linearly, an * index with a great many small documents may find this * linear scan too costly. * * <p/> * * In contrast, TermsFilter builds up an {@link OpenBitSet}, * keyed by docID, every time it's created, by enumerating * through all matching docs using {@link TermDocs} to seek * and scan through each term's docID list. While there is * no linear scan of all docIDs, besides the allocation of * the underlying array in the {@link OpenBitSet}, this * approach requires a number of "disk seeks" in proportion * to the number of terms, which can be exceptionally costly * when there are cache misses in the OS's IO cache. * * <p/> * * Generally, this filter will be slower on the first * invocation for a given field, but subsequent invocations, * even if you change the allowed set of Terms, should be * faster than TermsFilter, especially as the number of * Terms being matched increases. If you are matching only * a very small number of terms, and those terms in turn * match a very small number of documents, TermsFilter may * perform faster. * * <p/> * * Which filter is best is very application dependent. */ public class FieldCacheTermsFilter extends Filter { private String field; private String[] terms; public FieldCacheTermsFilter(String field, String... terms) { this.field = field; this.terms = terms; } public FieldCache getFieldCache() { return FieldCache.DEFAULT; } @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { return new FieldCacheTermsFilterDocIdSet(getFieldCache().getStringIndex(reader, field)); } protected class FieldCacheTermsFilterDocIdSet extends DocIdSet { private FieldCache.StringIndex fcsi; private OpenBitSet openBitSet; public FieldCacheTermsFilterDocIdSet(FieldCache.StringIndex fcsi) { this.fcsi = fcsi; openBitSet = new OpenBitSet(this.fcsi.lookup.length); for (int i=0;i<terms.length;i++) { int termNumber = this.fcsi.binarySearchLookup(terms[i]); if (termNumber > 0) { openBitSet.fastSet(termNumber); } } } @Override public DocIdSetIterator iterator() { return new FieldCacheTermsFilterDocIdSetIterator(); } /** This DocIdSet implementation is cacheable. */ @Override public boolean isCacheable() { return true; } protected class FieldCacheTermsFilterDocIdSetIterator extends DocIdSetIterator { private int doc = -1; @Override public int docID() { return doc; } @Override public int nextDoc() { try { while (!openBitSet.fastGet(fcsi.order[++doc])) {} } catch (ArrayIndexOutOfBoundsException e) { doc = NO_MORE_DOCS; } return doc; } @Override public int advance(int target) { try { doc = target; while (!openBitSet.fastGet(fcsi.order[doc])) { doc++; } } catch (ArrayIndexOutOfBoundsException e) { doc = NO_MORE_DOCS; } return doc; } } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FieldCacheTermsFilter.java
Java
art
5,711
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.DocIdBitSet; /** * Abstract base class for restricting which documents may * be returned during searching. */ public abstract class Filter implements java.io.Serializable { /** * Creates a {@link DocIdSet} enumerating the documents that should be * permitted in search results. <b>NOTE:</b> null can be * returned if no documents are accepted by this Filter. * <p> * Note: This method will be called once per segment in * the index during searching. The returned {@link DocIdSet} * must refer to document IDs for that segment, not for * the top-level reader. * * @param reader a {@link IndexReader} instance opened on the index currently * searched on. Note, it is likely that the provided reader does not * represent the whole underlying index i.e. if the index has more than * one segment the given reader only represents a single segment. * * @return a DocIdSet that provides the documents which should be permitted or * prohibited in search results. <b>NOTE:</b> null can be returned if * no documents will be accepted by this Filter. * * @see DocIdBitSet */ public abstract DocIdSet getDocIdSet(IndexReader reader) throws IOException; }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/Filter.java
Java
art
2,211
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.TermDocs; /** Expert: A <code>Scorer</code> for documents matching a <code>Term</code>. */ final class TermScorer extends Scorer { private static final float[] SIM_NORM_DECODER = Similarity.getNormDecoder(); private Weight weight; private TermDocs termDocs; private byte[] norms; private float weightValue; private int doc = -1; private final int[] docs = new int[32]; // buffered doc numbers private final int[] freqs = new int[32]; // buffered term freqs private int pointer; private int pointerMax; private static final int SCORE_CACHE_SIZE = 32; private float[] scoreCache = new float[SCORE_CACHE_SIZE]; /** * Construct a <code>TermScorer</code>. * * @param weight * The weight of the <code>Term</code> in the query. * @param td * An iterator over the documents matching the <code>Term</code>. * @param similarity * The </code>Similarity</code> implementation to be used for score * computations. * @param norms * The field norms of the document fields for the <code>Term</code>. */ TermScorer(Weight weight, TermDocs td, Similarity similarity, byte[] norms) { super(similarity); this.weight = weight; this.termDocs = td; this.norms = norms; this.weightValue = weight.getValue(); for (int i = 0; i < SCORE_CACHE_SIZE; i++) scoreCache[i] = getSimilarity().tf(i) * weightValue; } @Override public void score(Collector c) throws IOException { score(c, Integer.MAX_VALUE, nextDoc()); } // firstDocID is ignored since nextDoc() sets 'doc' @Override protected boolean score(Collector c, int end, int firstDocID) throws IOException { c.setScorer(this); while (doc < end) { // for docs in window c.collect(doc); // collect score if (++pointer >= pointerMax) { pointerMax = termDocs.read(docs, freqs); // refill buffers if (pointerMax != 0) { pointer = 0; } else { termDocs.close(); // close stream doc = Integer.MAX_VALUE; // set to sentinel value return false; } } doc = docs[pointer]; } return true; } @Override public int docID() { return doc; } /** * Advances to the next document matching the query. <br> * The iterator over the matching documents is buffered using * {@link TermDocs#read(int[],int[])}. * * @return the document matching the query or -1 if there are no more documents. */ @Override public int nextDoc() throws IOException { pointer++; if (pointer >= pointerMax) { pointerMax = termDocs.read(docs, freqs); // refill buffer if (pointerMax != 0) { pointer = 0; } else { termDocs.close(); // close stream return doc = NO_MORE_DOCS; } } doc = docs[pointer]; return doc; } @Override public float score() { assert doc != -1; int f = freqs[pointer]; float raw = // compute tf(f)*weight f < SCORE_CACHE_SIZE // check cache ? scoreCache[f] // cache hit : getSimilarity().tf(f)*weightValue; // cache miss return norms == null ? raw : raw * SIM_NORM_DECODER[norms[doc] & 0xFF]; // normalize for field } /** * Advances to the first match beyond the current whose document number is * greater than or equal to a given target. <br> * The implementation uses {@link TermDocs#skipTo(int)}. * * @param target * The target document number. * @return the matching document or -1 if none exist. */ @Override public int advance(int target) throws IOException { // first scan in cache for (pointer++; pointer < pointerMax; pointer++) { if (docs[pointer] >= target) { return doc = docs[pointer]; } } // not found in cache, seek underlying stream boolean result = termDocs.skipTo(target); if (result) { pointerMax = 1; pointer = 0; docs[pointer] = doc = termDocs.doc(); freqs[pointer] = termDocs.freq(); } else { doc = NO_MORE_DOCS; } return doc; } /** Returns a string representation of this <code>TermScorer</code>. */ @Override public String toString() { return "scorer(" + weight + ")"; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/TermScorer.java
Java
art
5,387
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.util.PriorityQueue; /** * Expert: A hit queue for sorting by hits by terms in more than one field. * Uses <code>FieldCache.DEFAULT</code> for maintaining * internal term lookup tables. * * <b>NOTE:</b> This API is experimental and might change in * incompatible ways in the next release. * * @since 2.9 * @version $Id: * @see Searcher#search(Query,Filter,int,Sort) * @see FieldCache */ public abstract class FieldValueHitQueue extends PriorityQueue<FieldValueHitQueue.Entry> { final static class Entry extends ScoreDoc { int slot; Entry(int slot, int doc, float score) { super(doc, score); this.slot = slot; } @Override public String toString() { return "slot:" + slot + " " + super.toString(); } } /** * An implementation of {@link FieldValueHitQueue} which is optimized in case * there is just one comparator. */ private static final class OneComparatorFieldValueHitQueue extends FieldValueHitQueue { private final FieldComparator comparator; private final int oneReverseMul; public OneComparatorFieldValueHitQueue(SortField[] fields, int size) throws IOException { super(fields); if (fields.length == 0) { throw new IllegalArgumentException("Sort must contain at least one field"); } SortField field = fields[0]; comparator = field.getComparator(size, 0); oneReverseMul = field.reverse ? -1 : 1; comparators[0] = comparator; reverseMul[0] = oneReverseMul; initialize(size); } /** * Returns whether <code>a</code> is less relevant than <code>b</code>. * @param a ScoreDoc * @param b ScoreDoc * @return <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>. */ @Override protected boolean lessThan(final Entry hitA, final Entry hitB) { assert hitA != hitB; assert hitA.slot != hitB.slot; final int c = oneReverseMul * comparator.compare(hitA.slot, hitB.slot); if (c != 0) { return c > 0; } // avoid random sort order that could lead to duplicates (bug #31241): return hitA.doc > hitB.doc; } } /** * An implementation of {@link FieldValueHitQueue} which is optimized in case * there is more than one comparator. */ private static final class MultiComparatorsFieldValueHitQueue extends FieldValueHitQueue { public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size) throws IOException { super(fields); int numComparators = comparators.length; for (int i = 0; i < numComparators; ++i) { SortField field = fields[i]; reverseMul[i] = field.reverse ? -1 : 1; comparators[i] = field.getComparator(size, i); } initialize(size); } @Override protected boolean lessThan(final Entry hitA, final Entry hitB) { assert hitA != hitB; assert hitA.slot != hitB.slot; int numComparators = comparators.length; for (int i = 0; i < numComparators; ++i) { final int c = reverseMul[i] * comparators[i].compare(hitA.slot, hitB.slot); if (c != 0) { // Short circuit return c > 0; } } // avoid random sort order that could lead to duplicates (bug #31241): return hitA.doc > hitB.doc; } } // prevent instantiation and extension. private FieldValueHitQueue(SortField[] fields) { // When we get here, fields.length is guaranteed to be > 0, therefore no // need to check it again. // All these are required by this class's API - need to return arrays. // Therefore even in the case of a single comparator, create an array // anyway. this.fields = fields; int numComparators = fields.length; comparators = new FieldComparator[numComparators]; reverseMul = new int[numComparators]; } /** * Creates a hit queue sorted by the given list of fields. * * <p><b>NOTE</b>: The instances returned by this method * pre-allocate a full array of length <code>numHits</code>. * * @param fields * SortField array we are sorting by in priority order (highest * priority first); cannot be <code>null</code> or empty * @param size * The number of hits to retain. Must be greater than zero. * @throws IOException */ public static FieldValueHitQueue create(SortField[] fields, int size) throws IOException { if (fields.length == 0) { throw new IllegalArgumentException("Sort must contain at least one field"); } if (fields.length == 1) { return new OneComparatorFieldValueHitQueue(fields, size); } else { return new MultiComparatorsFieldValueHitQueue(fields, size); } } FieldComparator[] getComparators() { return comparators; } int[] getReverseMul() { return reverseMul; } /** Stores the sort criteria being used. */ protected final SortField[] fields; protected final FieldComparator[] comparators; protected final int[] reverseMul; @Override protected abstract boolean lessThan (final Entry a, final Entry b); /** * Given a queue Entry, creates a corresponding FieldDoc * that contains the values used to sort the given document. * These values are not the raw values out of the index, but the internal * representation of them. This is so the given search hit can be collated by * a MultiSearcher with other search hits. * * @param entry The Entry used to create a FieldDoc * @return The newly created FieldDoc * @see Searchable#search(Weight,Filter,int,Sort) */ FieldDoc fillFields(final Entry entry) { final int n = comparators.length; final Comparable[] fields = new Comparable[n]; for (int i = 0; i < n; ++i) { fields[i] = comparators[i].value(entry.slot); } //if (maxscore > 1.0f) doc.score /= maxscore; // normalize scores return new FieldDoc(entry.doc, entry.score, fields); } /** Returns the SortFields being used by this hit queue. */ SortField[] getFields() { return fields; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java
Java
art
7,033
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.Serializable; import java.util.Arrays; /** * Encapsulates sort criteria for returned hits. * * <p>The fields used to determine sort order must be carefully chosen. * Documents must contain a single term in such a field, * and the value of the term should indicate the document's relative position in * a given sort order. The field must be indexed, but should not be tokenized, * and does not need to be stored (unless you happen to want it back with the * rest of your document data). In other words: * * <p><code>document.add (new Field ("byNumber", Integer.toString(x), Field.Store.NO, Field.Index.NOT_ANALYZED));</code></p> * * * <p><h3>Valid Types of Values</h3> * * <p>There are four possible kinds of term values which may be put into * sorting fields: Integers, Longs, Floats, or Strings. Unless * {@link SortField SortField} objects are specified, the type of value * in the field is determined by parsing the first term in the field. * * <p>Integer term values should contain only digits and an optional * preceding negative sign. Values must be base 10 and in the range * <code>Integer.MIN_VALUE</code> and <code>Integer.MAX_VALUE</code> inclusive. * Documents which should appear first in the sort * should have low value integers, later documents high values * (i.e. the documents should be numbered <code>1..n</code> where * <code>1</code> is the first and <code>n</code> the last). * * <p>Long term values should contain only digits and an optional * preceding negative sign. Values must be base 10 and in the range * <code>Long.MIN_VALUE</code> and <code>Long.MAX_VALUE</code> inclusive. * Documents which should appear first in the sort * should have low value integers, later documents high values. * * <p>Float term values should conform to values accepted by * {@link Float Float.valueOf(String)} (except that <code>NaN</code> * and <code>Infinity</code> are not supported). * Documents which should appear first in the sort * should have low values, later documents high values. * * <p>String term values can contain any valid String, but should * not be tokenized. The values are sorted according to their * {@link Comparable natural order}. Note that using this type * of term value has higher memory requirements than the other * two types. * * <p><h3>Object Reuse</h3> * * <p>One of these objects can be * used multiple times and the sort order changed between usages. * * <p>This class is thread safe. * * <p><h3>Memory Usage</h3> * * <p>Sorting uses of caches of term values maintained by the * internal HitQueue(s). The cache is static and contains an integer * or float array of length <code>IndexReader.maxDoc()</code> for each field * name for which a sort is performed. In other words, the size of the * cache in bytes is: * * <p><code>4 * IndexReader.maxDoc() * (# of different fields actually used to sort)</code> * * <p>For String fields, the cache is larger: in addition to the * above array, the value of every term in the field is kept in memory. * If there are many unique terms in the field, this could * be quite large. * * <p>Note that the size of the cache is not affected by how many * fields are in the index and <i>might</i> be used to sort - only by * the ones actually used to sort a result set. * * <p>Created: Feb 12, 2004 10:53:57 AM * * @since lucene 1.4 */ public class Sort implements Serializable { /** * Represents sorting by computed relevance. Using this sort criteria returns * the same results as calling * {@link Searcher#search(Query,int) Searcher#search()}without a sort criteria, * only with slightly more overhead. */ public static final Sort RELEVANCE = new Sort(); /** Represents sorting by index order. */ public static final Sort INDEXORDER = new Sort(SortField.FIELD_DOC); // internal representation of the sort criteria SortField[] fields; /** * Sorts by computed relevance. This is the same sort criteria as calling * {@link Searcher#search(Query,int) Searcher#search()}without a sort criteria, * only with slightly more overhead. */ public Sort() { this(SortField.FIELD_SCORE); } /** Sorts by the criteria in the given SortField. */ public Sort(SortField field) { setSort(field); } /** Sorts in succession by the criteria in each SortField. */ public Sort(SortField... fields) { setSort(fields); } /** Sets the sort to the given criteria. */ public void setSort(SortField field) { this.fields = new SortField[] { field }; } /** Sets the sort to the given criteria in succession. */ public void setSort(SortField... fields) { this.fields = fields; } /** * Representation of the sort criteria. * @return Array of SortField objects used in this sort criteria */ public SortField[] getSort() { return fields; } @Override public String toString() { StringBuilder buffer = new StringBuilder(); for (int i = 0; i < fields.length; i++) { buffer.append(fields[i].toString()); if ((i+1) < fields.length) buffer.append(','); } return buffer.toString(); } /** Returns true if <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Sort)) return false; final Sort other = (Sort)o; return Arrays.equals(this.fields, other.fields); } /** Returns a hash code value for this object. */ @Override public int hashCode() { return 0x45aaf665 + Arrays.hashCode(fields); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/Sort.java
Java
art
6,443
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; /** * Abstract decorator class of a DocIdSetIterator * implementation that provides on-demand filter/validation * mechanism on an underlying DocIdSetIterator. See {@link * FilteredDocIdSet}. */ public abstract class FilteredDocIdSetIterator extends DocIdSetIterator { protected DocIdSetIterator _innerIter; private int doc; /** * Constructor. * @param innerIter Underlying DocIdSetIterator. */ public FilteredDocIdSetIterator(DocIdSetIterator innerIter) { if (innerIter == null) { throw new IllegalArgumentException("null iterator"); } _innerIter = innerIter; doc = -1; } /** * Validation method to determine whether a docid should be in the result set. * @param doc docid to be tested * @return true if input docid should be in the result set, false otherwise. * @see #FilteredDocIdSetIterator(DocIdSetIterator). */ abstract protected boolean match(int doc) throws IOException; @Override public int docID() { return doc; } @Override public int nextDoc() throws IOException { while ((doc = _innerIter.nextDoc()) != NO_MORE_DOCS) { if (match(doc)) { return doc; } } return doc; } @Override public int advance(int target) throws IOException { doc = _innerIter.advance(target); if (doc != NO_MORE_DOCS) { if (match(doc)) { return doc; } else { while ((doc = _innerIter.nextDoc()) != NO_MORE_DOCS) { if (match(doc)) { return doc; } } return doc; } } return doc; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FilteredDocIdSetIterator.java
Java
art
2,463
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** A clause in a BooleanQuery. */ public class BooleanClause implements java.io.Serializable { /** Specifies how clauses are to occur in matching documents. */ public static enum Occur { /** Use this operator for clauses that <i>must</i> appear in the matching documents. */ MUST { @Override public String toString() { return "+"; } }, /** Use this operator for clauses that <i>should</i> appear in the * matching documents. For a BooleanQuery with no <code>MUST</code> * clauses one or more <code>SHOULD</code> clauses must match a document * for the BooleanQuery to match. * @see BooleanQuery#setMinimumNumberShouldMatch */ SHOULD { @Override public String toString() { return ""; } }, /** Use this operator for clauses that <i>must not</i> appear in the matching documents. * Note that it is not possible to search for queries that only consist * of a <code>MUST_NOT</code> clause. */ MUST_NOT { @Override public String toString() { return "-"; } }; } /** The query whose matching documents are combined by the boolean query. */ private Query query; private Occur occur; /** Constructs a BooleanClause. */ public BooleanClause(Query query, Occur occur) { this.query = query; this.occur = occur; } public Occur getOccur() { return occur; } public void setOccur(Occur occur) { this.occur = occur; } public Query getQuery() { return query; } public void setQuery(Query query) { this.query = query; } public boolean isProhibited() { return Occur.MUST_NOT == occur; } public boolean isRequired() { return Occur.MUST == occur; } /** Returns true if <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (o == null || !(o instanceof BooleanClause)) return false; BooleanClause other = (BooleanClause)o; return this.query.equals(other.query) && this.occur == other.occur; } /** Returns a hash code value for this object.*/ @Override public int hashCode() { return query.hashCode() ^ (Occur.MUST == occur?1:0) ^ (Occur.MUST_NOT == occur?2:0); } @Override public String toString() { return occur.toString() + query.toString(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/BooleanClause.java
Java
art
3,127
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; /** * Abstract decorator class for a DocIdSet implementation * that provides on-demand filtering/validation * mechanism on a given DocIdSet. * * <p/> * * Technically, this same functionality could be achieved * with ChainedFilter (under contrib/misc), however the * benefit of this class is it never materializes the full * bitset for the filter. Instead, the {@link #match} * method is invoked on-demand, per docID visited during * searching. If you know few docIDs will be visited, and * the logic behind {@link #match} is relatively costly, * this may be a better way to filter than ChainedFilter. * * @see DocIdSet */ public abstract class FilteredDocIdSet extends DocIdSet { private final DocIdSet _innerSet; /** * Constructor. * @param innerSet Underlying DocIdSet */ public FilteredDocIdSet(DocIdSet innerSet) { _innerSet = innerSet; } /** This DocIdSet implementation is cacheable if the inner set is cacheable. */ @Override public boolean isCacheable() { return _innerSet.isCacheable(); } /** * Validation method to determine whether a docid should be in the result set. * @param docid docid to be tested * @return true if input docid should be in the result set, false otherwise. */ protected abstract boolean match(int docid) throws IOException; /** * Implementation of the contract to build a DocIdSetIterator. * @see DocIdSetIterator * @see FilteredDocIdSetIterator */ @Override public DocIdSetIterator iterator() throws IOException { return new FilteredDocIdSetIterator(_innerSet.iterator()) { @Override protected boolean match(int docid) throws IOException { return FilteredDocIdSet.this.match(docid); } }; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FilteredDocIdSet.java
Java
art
2,626
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; /** * A {@link Collector} implementation which wraps another * {@link Collector} and makes sure only documents with * scores &gt; 0 are collected. */ public class PositiveScoresOnlyCollector extends Collector { final private Collector c; private Scorer scorer; public PositiveScoresOnlyCollector(Collector c) { this.c = c; } @Override public void collect(int doc) throws IOException { if (scorer.score() > 0) { c.collect(doc); } } @Override public void setNextReader(IndexReader reader, int docBase) throws IOException { c.setNextReader(reader, docBase); } @Override public void setScorer(Scorer scorer) throws IOException { // Set a ScoreCachingWrappingScorer in case the wrapped Collector will call // score() also. this.scorer = new ScoreCachingWrappingScorer(scorer); c.setScorer(this.scorer); } @Override public boolean acceptsDocsOutOfOrder() { return c.acceptsDocsOutOfOrder(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/PositiveScoresOnlyCollector.java
Java
art
1,891
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.Term; /** * A Filter that restricts search results to values that have a matching prefix in a given * field. */ public class PrefixFilter extends MultiTermQueryWrapperFilter<PrefixQuery> { public PrefixFilter(Term prefix) { super(new PrefixQuery(prefix)); } public Term getPrefix() { return query.getPrefix(); } /** Prints a user-readable version of this query. */ @Override public String toString () { StringBuilder buffer = new StringBuilder(); buffer.append("PrefixFilter("); buffer.append(getPrefix().toString()); buffer.append(")"); return buffer.toString(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/PrefixFilter.java
Java
art
1,487
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.util.ToStringUtils; /** A Query that matches documents containing terms with a specified prefix. A PrefixQuery * is built by QueryParser for input like <code>app*</code>. * * <p>This query uses the {@link * MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} * rewrite method. */ public class PrefixQuery extends MultiTermQuery { private Term prefix; /** Constructs a query for terms starting with <code>prefix</code>. */ public PrefixQuery(Term prefix) { this.prefix = prefix; } /** Returns the prefix of this query. */ public Term getPrefix() { return prefix; } @Override protected FilteredTermEnum getEnum(IndexReader reader) throws IOException { return new PrefixTermEnum(reader, prefix); } /** Prints a user-readable version of this query. */ @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); if (!prefix.field().equals(field)) { buffer.append(prefix.field()); buffer.append(":"); } buffer.append(prefix.text()); buffer.append('*'); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((prefix == null) ? 0 : prefix.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; PrefixQuery other = (PrefixQuery) obj; if (prefix == null) { if (other.prefix != null) return false; } else if (!prefix.equals(other.prefix)) return false; return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/PrefixQuery.java
Java
art
2,723
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.ThreadInterruptedException; /** * The {@link TimeLimitingCollector} is used to timeout search requests that * take longer than the maximum allowed search time limit. After this time is * exceeded, the search thread is stopped by throwing a * {@link TimeExceededException}. */ public class TimeLimitingCollector extends Collector { /** * Default timer resolution. * @see #setResolution(long) */ public static final int DEFAULT_RESOLUTION = 20; /** * Default for {@link #isGreedy()}. * @see #isGreedy() */ public boolean DEFAULT_GREEDY = false; private static long resolution = DEFAULT_RESOLUTION; private boolean greedy = DEFAULT_GREEDY ; private static final class TimerThread extends Thread { // NOTE: we can avoid explicit synchronization here for several reasons: // * updates to volatile long variables are atomic // * only single thread modifies this value // * use of volatile keyword ensures that it does not reside in // a register, but in main memory (so that changes are visible to // other threads). // * visibility of changes does not need to be instantaneous, we can // afford losing a tick or two. // // See section 17 of the Java Language Specification for details. private volatile long time = 0; /** * TimerThread provides a pseudo-clock service to all searching * threads, so that they can count elapsed time with less overhead * than repeatedly calling System.currentTimeMillis. A single * thread should be created to be used for all searches. */ private TimerThread() { super("TimeLimitedCollector timer thread"); this.setDaemon( true ); } @Override public void run() { while (true) { // TODO: Use System.nanoTime() when Lucene moves to Java SE 5. time += resolution; try { Thread.sleep( resolution ); } catch (InterruptedException ie) { throw new ThreadInterruptedException(ie); } } } /** * Get the timer value in milliseconds. */ public long getMilliseconds() { return time; } } /** Thrown when elapsed search time exceeds allowed search time. */ public static class TimeExceededException extends RuntimeException { private long timeAllowed; private long timeElapsed; private int lastDocCollected; private TimeExceededException(long timeAllowed, long timeElapsed, int lastDocCollected) { super("Elapsed time: " + timeElapsed + "Exceeded allowed search time: " + timeAllowed + " ms."); this.timeAllowed = timeAllowed; this.timeElapsed = timeElapsed; this.lastDocCollected = lastDocCollected; } /** Returns allowed time (milliseconds). */ public long getTimeAllowed() { return timeAllowed; } /** Returns elapsed time (milliseconds). */ public long getTimeElapsed() { return timeElapsed; } /** Returns last doc that was collected when the search time exceeded. */ public int getLastDocCollected() { return lastDocCollected; } } // Declare and initialize a single static timer thread to be used by // all TimeLimitedCollector instances. The JVM assures that // this only happens once. private final static TimerThread TIMER_THREAD = new TimerThread(); static { TIMER_THREAD.start(); } private final long t0; private final long timeout; private final Collector collector; /** * Create a TimeLimitedCollector wrapper over another {@link Collector} with a specified timeout. * @param collector the wrapped {@link Collector} * @param timeAllowed max time allowed for collecting hits after which {@link TimeExceededException} is thrown */ public TimeLimitingCollector(final Collector collector, final long timeAllowed ) { this.collector = collector; t0 = TIMER_THREAD.getMilliseconds(); this.timeout = t0 + timeAllowed; } /** * Return the timer resolution. * @see #setResolution(long) */ public static long getResolution() { return resolution; } /** * Set the timer resolution. * The default timer resolution is 20 milliseconds. * This means that a search required to take no longer than * 800 milliseconds may be stopped after 780 to 820 milliseconds. * <br>Note that: * <ul> * <li>Finer (smaller) resolution is more accurate but less efficient.</li> * <li>Setting resolution to less than 5 milliseconds will be silently modified to 5 milliseconds.</li> * <li>Setting resolution smaller than current resolution might take effect only after current * resolution. (Assume current resolution of 20 milliseconds is modified to 5 milliseconds, * then it can take up to 20 milliseconds for the change to have effect.</li> * </ul> */ public static void setResolution(long newResolution) { resolution = Math.max(newResolution,5); // 5 milliseconds is about the minimum reasonable time for a Object.wait(long) call. } /** * Checks if this time limited collector is greedy in collecting the last hit. * A non greedy collector, upon a timeout, would throw a {@link TimeExceededException} * without allowing the wrapped collector to collect current doc. A greedy one would * first allow the wrapped hit collector to collect current doc and only then * throw a {@link TimeExceededException}. * @see #setGreedy(boolean) */ public boolean isGreedy() { return greedy; } /** * Sets whether this time limited collector is greedy. * @param greedy true to make this time limited greedy * @see #isGreedy() */ public void setGreedy(boolean greedy) { this.greedy = greedy; } /** * Calls {@link Collector#collect(int)} on the decorated {@link Collector} * unless the allowed time has passed, in which case it throws an exception. * * @throws TimeExceededException * if the time allowed has exceeded. */ @Override public void collect(final int doc) throws IOException { long time = TIMER_THREAD.getMilliseconds(); if (timeout < time) { if (greedy) { //System.out.println(this+" greedy: before failing, collecting doc: "+doc+" "+(time-t0)); collector.collect(doc); } //System.out.println(this+" failing on: "+doc+" "+(time-t0)); throw new TimeExceededException( timeout-t0, time-t0, doc ); } //System.out.println(this+" collecting: "+doc+" "+(time-t0)); collector.collect(doc); } @Override public void setNextReader(IndexReader reader, int base) throws IOException { collector.setNextReader(reader, base); } @Override public void setScorer(Scorer scorer) throws IOException { collector.setScorer(scorer); } @Override public boolean acceptsDocsOutOfOrder() { return collector.acceptsDocsOutOfOrder(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/TimeLimitingCollector.java
Java
art
7,851
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Collection; import java.util.Arrays; import java.util.Comparator; /** Scorer for conjunctions, sets of queries, all of which are required. */ class ConjunctionScorer extends Scorer { private final Scorer[] scorers; private final float coord; private int lastDoc = -1; public ConjunctionScorer(Similarity similarity, Collection<Scorer> scorers) throws IOException { this(similarity, scorers.toArray(new Scorer[scorers.size()])); } public ConjunctionScorer(Similarity similarity, Scorer... scorers) throws IOException { super(similarity); this.scorers = scorers; coord = similarity.coord(scorers.length, scorers.length); for (int i = 0; i < scorers.length; i++) { if (scorers[i].nextDoc() == NO_MORE_DOCS) { // If even one of the sub-scorers does not have any documents, this // scorer should not attempt to do any more work. lastDoc = NO_MORE_DOCS; return; } } // Sort the array the first time... // We don't need to sort the array in any future calls because we know // it will already start off sorted (all scorers on same doc). // note that this comparator is not consistent with equals! Arrays.sort(scorers, new Comparator<Scorer>() { // sort the array public int compare(Scorer o1, Scorer o2) { return o1.docID() - o2.docID(); } }); // NOTE: doNext() must be called before the re-sorting of the array later on. // The reason is this: assume there are 5 scorers, whose first docs are 1, // 2, 3, 5, 5 respectively. Sorting (above) leaves the array as is. Calling // doNext() here advances all the first scorers to 5 (or a larger doc ID // they all agree on). // However, if we re-sort before doNext() is called, the order will be 5, 3, // 2, 1, 5 and then doNext() will stop immediately, since the first scorer's // docs equals the last one. So the invariant that after calling doNext() // all scorers are on the same doc ID is broken. if (doNext() == NO_MORE_DOCS) { // The scorers did not agree on any document. lastDoc = NO_MORE_DOCS; return; } // If first-time skip distance is any predictor of // scorer sparseness, then we should always try to skip first on // those scorers. // Keep last scorer in it's last place (it will be the first // to be skipped on), but reverse all of the others so that // they will be skipped on in order of original high skip. int end = scorers.length - 1; int max = end >> 1; for (int i = 0; i < max; i++) { Scorer tmp = scorers[i]; int idx = end - i - 1; scorers[i] = scorers[idx]; scorers[idx] = tmp; } } private int doNext() throws IOException { int first = 0; int doc = scorers[scorers.length - 1].docID(); Scorer firstScorer; while ((firstScorer = scorers[first]).docID() < doc) { doc = firstScorer.advance(doc); first = first == scorers.length - 1 ? 0 : first + 1; } return doc; } @Override public int advance(int target) throws IOException { if (lastDoc == NO_MORE_DOCS) { return lastDoc; } else if (scorers[(scorers.length - 1)].docID() < target) { scorers[(scorers.length - 1)].advance(target); } return lastDoc = doNext(); } @Override public int docID() { return lastDoc; } @Override public int nextDoc() throws IOException { if (lastDoc == NO_MORE_DOCS) { return lastDoc; } else if (lastDoc == -1) { return lastDoc = scorers[scorers.length - 1].docID(); } scorers[(scorers.length - 1)].nextDoc(); return lastDoc = doNext(); } @Override public float score() throws IOException { float sum = 0.0f; for (int i = 0; i < scorers.length; i++) { sum += scorers[i].score(); } return sum * coord; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/ConjunctionScorer.java
Java
art
4,768
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; /** * Subclass of FilteredTermEnum for enumerating a single term. * <p> * This can be used by {@link MultiTermQuery}s that need only visit one term, * but want to preserve MultiTermQuery semantics such as * {@link MultiTermQuery#rewriteMethod}. */ public class SingleTermEnum extends FilteredTermEnum { private Term singleTerm; private boolean endEnum = false; /** * Creates a new <code>SingleTermEnum</code>. * <p> * After calling the constructor the enumeration is already pointing to the term, * if it exists. */ public SingleTermEnum(IndexReader reader, Term singleTerm) throws IOException { super(); this.singleTerm = singleTerm; setEnum(reader.terms(singleTerm)); } @Override public float difference() { return 1.0F; } @Override protected boolean endEnum() { return endEnum; } @Override protected boolean termCompare(Term term) { if (term.equals(singleTerm)) { return true; } else { endEnum = true; return false; } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/SingleTermEnum.java
Java
art
1,980
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.TermDocs; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.document.NumericField; // for javadocs /** * A range filter built on top of a cached single term field (in {@link FieldCache}). * * <p>{@code FieldCacheRangeFilter} builds a single cache for the field the first time it is used. * Each subsequent {@code FieldCacheRangeFilter} on the same field then reuses this cache, * even if the range itself changes. * * <p>This means that {@code FieldCacheRangeFilter} is much faster (sometimes more than 100x as fast) * as building a {@link TermRangeFilter}, if using a {@link #newStringRange}. * However, if the range never changes it is slower (around 2x as slow) than building * a CachingWrapperFilter on top of a single {@link TermRangeFilter}. * * For numeric data types, this filter may be significantly faster than {@link NumericRangeFilter}. * Furthermore, it does not need the numeric values encoded by {@link NumericField}. But * it has the problem that it only works with exact one value/document (see below). * * <p>As with all {@link FieldCache} based functionality, {@code FieldCacheRangeFilter} is only valid for * fields which exact one term for each document (except for {@link #newStringRange} * where 0 terms are also allowed). Due to a restriction of {@link FieldCache}, for numeric ranges * all terms that do not have a numeric value, 0 is assumed. * * <p>Thus it works on dates, prices and other single value fields but will not work on * regular text fields. It is preferable to use a <code>NOT_ANALYZED</code> field to ensure that * there is only a single term. * * <p>This class does not have an constructor, use one of the static factory methods available, * that create a correct instance for different data types supported by {@link FieldCache}. */ public abstract class FieldCacheRangeFilter<T> extends Filter { final String field; final FieldCache.Parser parser; final T lowerVal; final T upperVal; final boolean includeLower; final boolean includeUpper; private FieldCacheRangeFilter(String field, FieldCache.Parser parser, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) { this.field = field; this.parser = parser; this.lowerVal = lowerVal; this.upperVal = upperVal; this.includeLower = includeLower; this.includeUpper = includeUpper; } /** This method is implemented for each data type */ @Override public abstract DocIdSet getDocIdSet(IndexReader reader) throws IOException; /** * Creates a string range filter using {@link FieldCache#getStringIndex}. This works with all * fields containing zero or one term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { final FieldCache.StringIndex fcsi = FieldCache.DEFAULT.getStringIndex(reader, field); final int lowerPoint = fcsi.binarySearchLookup(lowerVal); final int upperPoint = fcsi.binarySearchLookup(upperVal); final int inclusiveLowerPoint, inclusiveUpperPoint; // Hints: // * binarySearchLookup returns 0, if value was null. // * the value is <0 if no exact hit was found, the returned value // is (-(insertion point) - 1) if (lowerPoint == 0) { assert lowerVal == null; inclusiveLowerPoint = 1; } else if (includeLower && lowerPoint > 0) { inclusiveLowerPoint = lowerPoint; } else if (lowerPoint > 0) { inclusiveLowerPoint = lowerPoint + 1; } else { inclusiveLowerPoint = Math.max(1, -lowerPoint - 1); } if (upperPoint == 0) { assert upperVal == null; inclusiveUpperPoint = Integer.MAX_VALUE; } else if (includeUpper && upperPoint > 0) { inclusiveUpperPoint = upperPoint; } else if (upperPoint > 0) { inclusiveUpperPoint = upperPoint - 1; } else { inclusiveUpperPoint = -upperPoint - 2; } if (inclusiveUpperPoint <= 0 || inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; assert inclusiveLowerPoint > 0 && inclusiveUpperPoint > 0; // for this DocIdSet, we never need to use TermDocs, // because deleted docs have an order of 0 (null entry in StringIndex) return new FieldCacheDocIdSet(reader, false) { @Override final boolean matchDoc(int doc) { return fcsi.order[doc] >= inclusiveLowerPoint && fcsi.order[doc] <= inclusiveUpperPoint; } }; } }; } /** * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String)}. This works with all * byte fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Byte> newByteRange(String field, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) { return newByteRange(field, null, lowerVal, upperVal, includeLower, includeUpper); } /** * Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser)}. This works with all * byte fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Byte> newByteRange(String field, FieldCache.ByteParser parser, Byte lowerVal, Byte upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<Byte>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { final byte inclusiveLowerPoint, inclusiveUpperPoint; if (lowerVal != null) { final byte i = lowerVal.byteValue(); if (!includeLower && i == Byte.MAX_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveLowerPoint = (byte) (includeLower ? i : (i + 1)); } else { inclusiveLowerPoint = Byte.MIN_VALUE; } if (upperVal != null) { final byte i = upperVal.byteValue(); if (!includeUpper && i == Byte.MIN_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveUpperPoint = (byte) (includeUpper ? i : (i - 1)); } else { inclusiveUpperPoint = Byte.MAX_VALUE; } if (inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; final byte[] values = FieldCache.DEFAULT.getBytes(reader, field, (FieldCache.ByteParser) parser); // we only request the usage of termDocs, if the range contains 0 return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) { @Override boolean matchDoc(int doc) { return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; } }; } }; } /** * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String)}. This works with all * short fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Short> newShortRange(String field, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) { return newShortRange(field, null, lowerVal, upperVal, includeLower, includeUpper); } /** * Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser)}. This works with all * short fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Short> newShortRange(String field, FieldCache.ShortParser parser, Short lowerVal, Short upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<Short>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { final short inclusiveLowerPoint, inclusiveUpperPoint; if (lowerVal != null) { short i = lowerVal.shortValue(); if (!includeLower && i == Short.MAX_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveLowerPoint = (short) (includeLower ? i : (i + 1)); } else { inclusiveLowerPoint = Short.MIN_VALUE; } if (upperVal != null) { short i = upperVal.shortValue(); if (!includeUpper && i == Short.MIN_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveUpperPoint = (short) (includeUpper ? i : (i - 1)); } else { inclusiveUpperPoint = Short.MAX_VALUE; } if (inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; final short[] values = FieldCache.DEFAULT.getShorts(reader, field, (FieldCache.ShortParser) parser); // we only request the usage of termDocs, if the range contains 0 return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) { @Override boolean matchDoc(int doc) { return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; } }; } }; } /** * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String)}. This works with all * int fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) { return newIntRange(field, null, lowerVal, upperVal, includeLower, includeUpper); } /** * Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser)}. This works with all * int fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { final int inclusiveLowerPoint, inclusiveUpperPoint; if (lowerVal != null) { int i = lowerVal.intValue(); if (!includeLower && i == Integer.MAX_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveLowerPoint = includeLower ? i : (i + 1); } else { inclusiveLowerPoint = Integer.MIN_VALUE; } if (upperVal != null) { int i = upperVal.intValue(); if (!includeUpper && i == Integer.MIN_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveUpperPoint = includeUpper ? i : (i - 1); } else { inclusiveUpperPoint = Integer.MAX_VALUE; } if (inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; final int[] values = FieldCache.DEFAULT.getInts(reader, field, (FieldCache.IntParser) parser); // we only request the usage of termDocs, if the range contains 0 return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0 && inclusiveUpperPoint >= 0)) { @Override boolean matchDoc(int doc) { return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; } }; } }; } /** * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String)}. This works with all * long fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) { return newLongRange(field, null, lowerVal, upperVal, includeLower, includeUpper); } /** * Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser)}. This works with all * long fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { final long inclusiveLowerPoint, inclusiveUpperPoint; if (lowerVal != null) { long i = lowerVal.longValue(); if (!includeLower && i == Long.MAX_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveLowerPoint = includeLower ? i : (i + 1L); } else { inclusiveLowerPoint = Long.MIN_VALUE; } if (upperVal != null) { long i = upperVal.longValue(); if (!includeUpper && i == Long.MIN_VALUE) return DocIdSet.EMPTY_DOCIDSET; inclusiveUpperPoint = includeUpper ? i : (i - 1L); } else { inclusiveUpperPoint = Long.MAX_VALUE; } if (inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; final long[] values = FieldCache.DEFAULT.getLongs(reader, field, (FieldCache.LongParser) parser); // we only request the usage of termDocs, if the range contains 0 return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0L && inclusiveUpperPoint >= 0L)) { @Override boolean matchDoc(int doc) { return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; } }; } }; } /** * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String)}. This works with all * float fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) { return newFloatRange(field, null, lowerVal, upperVal, includeLower, includeUpper); } /** * Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser)}. This works with all * float fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { // we transform the floating point numbers to sortable integers // using NumericUtils to easier find the next bigger/lower value final float inclusiveLowerPoint, inclusiveUpperPoint; if (lowerVal != null) { float f = lowerVal.floatValue(); if (!includeUpper && f > 0.0f && Float.isInfinite(f)) return DocIdSet.EMPTY_DOCIDSET; int i = NumericUtils.floatToSortableInt(f); inclusiveLowerPoint = NumericUtils.sortableIntToFloat( includeLower ? i : (i + 1) ); } else { inclusiveLowerPoint = Float.NEGATIVE_INFINITY; } if (upperVal != null) { float f = upperVal.floatValue(); if (!includeUpper && f < 0.0f && Float.isInfinite(f)) return DocIdSet.EMPTY_DOCIDSET; int i = NumericUtils.floatToSortableInt(f); inclusiveUpperPoint = NumericUtils.sortableIntToFloat( includeUpper ? i : (i - 1) ); } else { inclusiveUpperPoint = Float.POSITIVE_INFINITY; } if (inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; final float[] values = FieldCache.DEFAULT.getFloats(reader, field, (FieldCache.FloatParser) parser); // we only request the usage of termDocs, if the range contains 0 return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0.0f && inclusiveUpperPoint >= 0.0f)) { @Override boolean matchDoc(int doc) { return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; } }; } }; } /** * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String)}. This works with all * double fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) { return newDoubleRange(field, null, lowerVal, upperVal, includeLower, includeUpper); } /** * Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser)}. This works with all * double fields containing exactly one numeric term in the field. The range can be half-open by setting one * of the values to <code>null</code>. */ public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) { return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) { @Override public DocIdSet getDocIdSet(IndexReader reader) throws IOException { // we transform the floating point numbers to sortable integers // using NumericUtils to easier find the next bigger/lower value final double inclusiveLowerPoint, inclusiveUpperPoint; if (lowerVal != null) { double f = lowerVal.doubleValue(); if (!includeUpper && f > 0.0 && Double.isInfinite(f)) return DocIdSet.EMPTY_DOCIDSET; long i = NumericUtils.doubleToSortableLong(f); inclusiveLowerPoint = NumericUtils.sortableLongToDouble( includeLower ? i : (i + 1L) ); } else { inclusiveLowerPoint = Double.NEGATIVE_INFINITY; } if (upperVal != null) { double f = upperVal.doubleValue(); if (!includeUpper && f < 0.0 && Double.isInfinite(f)) return DocIdSet.EMPTY_DOCIDSET; long i = NumericUtils.doubleToSortableLong(f); inclusiveUpperPoint = NumericUtils.sortableLongToDouble( includeUpper ? i : (i - 1L) ); } else { inclusiveUpperPoint = Double.POSITIVE_INFINITY; } if (inclusiveLowerPoint > inclusiveUpperPoint) return DocIdSet.EMPTY_DOCIDSET; final double[] values = FieldCache.DEFAULT.getDoubles(reader, field, (FieldCache.DoubleParser) parser); // we only request the usage of termDocs, if the range contains 0 return new FieldCacheDocIdSet(reader, (inclusiveLowerPoint <= 0.0 && inclusiveUpperPoint >= 0.0)) { @Override boolean matchDoc(int doc) { return values[doc] >= inclusiveLowerPoint && values[doc] <= inclusiveUpperPoint; } }; } }; } @Override public final String toString() { final StringBuilder sb = new StringBuilder(field).append(":"); return sb.append(includeLower ? '[' : '{') .append((lowerVal == null) ? "*" : lowerVal.toString()) .append(" TO ") .append((upperVal == null) ? "*" : upperVal.toString()) .append(includeUpper ? ']' : '}') .toString(); } @Override public final boolean equals(Object o) { if (this == o) return true; if (!(o instanceof FieldCacheRangeFilter)) return false; FieldCacheRangeFilter other = (FieldCacheRangeFilter) o; if (!this.field.equals(other.field) || this.includeLower != other.includeLower || this.includeUpper != other.includeUpper ) { return false; } if (this.lowerVal != null ? !this.lowerVal.equals(other.lowerVal) : other.lowerVal != null) return false; if (this.upperVal != null ? !this.upperVal.equals(other.upperVal) : other.upperVal != null) return false; if (this.parser != null ? !this.parser.equals(other.parser) : other.parser != null) return false; return true; } @Override public final int hashCode() { int h = field.hashCode(); h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204; h = (h << 1) | (h >>> 31); // rotate to distinguish lower from upper h ^= (upperVal != null) ? upperVal.hashCode() : -1674416163; h ^= (parser != null) ? parser.hashCode() : -1572457324; h ^= (includeLower ? 1549299360 : -365038026) ^ (includeUpper ? 1721088258 : 1948649653); return h; } /** Returns the field name for this filter */ public String getField() { return field; } /** Returns <code>true</code> if the lower endpoint is inclusive */ public boolean includesLower() { return includeLower; } /** Returns <code>true</code> if the upper endpoint is inclusive */ public boolean includesUpper() { return includeUpper; } /** Returns the lower value of this range filter */ public T getLowerVal() { return lowerVal; } /** Returns the upper value of this range filter */ public T getUpperVal() { return upperVal; } /** Returns the current numeric parser ({@code null} for {@code T} is {@code String}} */ public FieldCache.Parser getParser() { return parser; } static abstract class FieldCacheDocIdSet extends DocIdSet { private final IndexReader reader; private boolean mayUseTermDocs; FieldCacheDocIdSet(IndexReader reader, boolean mayUseTermDocs) { this.reader = reader; this.mayUseTermDocs = mayUseTermDocs; } /** this method checks, if a doc is a hit, should throw AIOBE, when position invalid */ abstract boolean matchDoc(int doc) throws ArrayIndexOutOfBoundsException; /** this DocIdSet is cacheable, if it works solely with FieldCache and no TermDocs */ @Override public boolean isCacheable() { return !(mayUseTermDocs && reader.hasDeletions()); } @Override public DocIdSetIterator iterator() throws IOException { // Synchronization needed because deleted docs BitVector // can change after call to hasDeletions until TermDocs creation. // We only use an iterator with termDocs, when this was requested (e.g. range contains 0) // and the index has deletions final TermDocs termDocs; synchronized(reader) { termDocs = isCacheable() ? null : reader.termDocs(null); } if (termDocs != null) { // a DocIdSetIterator using TermDocs to iterate valid docIds return new DocIdSetIterator() { private int doc = -1; @Override public int docID() { return doc; } @Override public int nextDoc() throws IOException { do { if (!termDocs.next()) return doc = NO_MORE_DOCS; } while (!matchDoc(doc = termDocs.doc())); return doc; } @Override public int advance(int target) throws IOException { if (!termDocs.skipTo(target)) return doc = NO_MORE_DOCS; while (!matchDoc(doc = termDocs.doc())) { if (!termDocs.next()) return doc = NO_MORE_DOCS; } return doc; } }; } else { // a DocIdSetIterator generating docIds by incrementing a variable - // this one can be used if there are no deletions are on the index return new DocIdSetIterator() { private int doc = -1; @Override public int docID() { return doc; } @Override public int nextDoc() { try { do { doc++; } while (!matchDoc(doc)); return doc; } catch (ArrayIndexOutOfBoundsException e) { return doc = NO_MORE_DOCS; } } @Override public int advance(int target) { try { doc = target; while (!matchDoc(doc)) { doc++; } return doc; } catch (ArrayIndexOutOfBoundsException e) { return doc = NO_MORE_DOCS; } } }; } } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FieldCacheRangeFilter.java
Java
art
26,899
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; /** * A {@link Collector} implementation that collects the top-scoring hits, * returning them as a {@link TopDocs}. This is used by {@link IndexSearcher} to * implement {@link TopDocs}-based search. Hits are sorted by score descending * and then (when the scores are tied) docID ascending. When you create an * instance of this collector you should know in advance whether documents are * going to be collected in doc Id order or not. * * <p><b>NOTE</b>: The values {@link Float#NaN} and * {Float#NEGATIVE_INFINITY} are not valid scores. This * collector will not properly collect hits with such * scores. */ public abstract class TopScoreDocCollector extends TopDocsCollector<ScoreDoc> { // Assumes docs are scored in order. private static class InOrderTopScoreDocCollector extends TopScoreDocCollector { private InOrderTopScoreDocCollector(int numHits) { super(numHits); } @Override public void collect(int doc) throws IOException { float score = scorer.score(); // This collector cannot handle these scores: assert score != Float.NEGATIVE_INFINITY; assert !Float.isNaN(score); totalHits++; if (score <= pqTop.score) { // Since docs are returned in-order (i.e., increasing doc Id), a document // with equal score to pqTop.score cannot compete since HitQueue favors // documents with lower doc Ids. Therefore reject those docs too. return; } pqTop.doc = doc + docBase; pqTop.score = score; pqTop = pq.updateTop(); } @Override public boolean acceptsDocsOutOfOrder() { return false; } } // Assumes docs are scored out of order. private static class OutOfOrderTopScoreDocCollector extends TopScoreDocCollector { private OutOfOrderTopScoreDocCollector(int numHits) { super(numHits); } @Override public void collect(int doc) throws IOException { float score = scorer.score(); // This collector cannot handle NaN assert !Float.isNaN(score); totalHits++; doc += docBase; if (score < pqTop.score || (score == pqTop.score && doc > pqTop.doc)) { return; } pqTop.doc = doc; pqTop.score = score; pqTop = pq.updateTop(); } @Override public boolean acceptsDocsOutOfOrder() { return true; } } /** * Creates a new {@link TopScoreDocCollector} given the number of hits to * collect and whether documents are scored in order by the input * {@link Scorer} to {@link #setScorer(Scorer)}. * * <p><b>NOTE</b>: The instances returned by this method * pre-allocate a full array of length * <code>numHits</code>, and fill the array with sentinel * objects. */ public static TopScoreDocCollector create(int numHits, boolean docsScoredInOrder) { if (docsScoredInOrder) { return new InOrderTopScoreDocCollector(numHits); } else { return new OutOfOrderTopScoreDocCollector(numHits); } } ScoreDoc pqTop; int docBase = 0; Scorer scorer; // prevents instantiation private TopScoreDocCollector(int numHits) { super(new HitQueue(numHits, true)); // HitQueue implements getSentinelObject to return a ScoreDoc, so we know // that at this point top() is already initialized. pqTop = pq.top(); } @Override protected TopDocs newTopDocs(ScoreDoc[] results, int start) { if (results == null) { return EMPTY_TOPDOCS; } // We need to compute maxScore in order to set it in TopDocs. If start == 0, // it means the largest element is already in results, use its score as // maxScore. Otherwise pop everything else, until the largest element is // extracted and use its score as maxScore. float maxScore = Float.NaN; if (start == 0) { maxScore = results[0].score; } else { for (int i = pq.size(); i > 1; i--) { pq.pop(); } maxScore = pq.pop().score; } return new TopDocs(totalHits, results, maxScore); } @Override public void setNextReader(IndexReader reader, int base) { docBase = base; } @Override public void setScorer(Scorer scorer) throws IOException { this.scorer = scorer; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/TopScoreDocCollector.java
Java
art
5,166
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.analysis.NumericTokenStream; // for javadocs import org.apache.lucene.document.NumericField; // for javadocs import org.apache.lucene.util.NumericUtils; // for javadocs /** * A {@link Filter} that only accepts numeric values within * a specified range. To use this, you must first index the * numeric values using {@link NumericField} (expert: {@link * NumericTokenStream}). * * <p>You create a new NumericRangeFilter with the static * factory methods, eg: * * <pre> * Filter f = NumericRangeFilter.newFloatRange("weight", 0.3f, 0.10f, true, true); * </pre> * * accepts all documents whose float valued "weight" field * ranges from 0.3 to 0.10, inclusive. * See {@link NumericRangeQuery} for details on how Lucene * indexes and searches numeric valued fields. * * <p><font color="red"><b>NOTE:</b> This API is experimental and * might change in incompatible ways in the next * release.</font> * * @since 2.9 **/ public final class NumericRangeFilter<T extends Number> extends MultiTermQueryWrapperFilter<NumericRangeQuery<T>> { private NumericRangeFilter(final NumericRangeQuery<T> query) { super(query); } /** * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>long</code> * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>. * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Long> newLongRange(final String field, final int precisionStep, Long min, Long max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Long>( NumericRangeQuery.newLongRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>long</code> * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Long> newLongRange(final String field, Long min, Long max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Long>( NumericRangeQuery.newLongRange(field, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>int</code> * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>. * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Integer> newIntRange(final String field, final int precisionStep, Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Integer>( NumericRangeQuery.newIntRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>int</code> * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Integer> newIntRange(final String field, Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Integer>( NumericRangeQuery.newIntRange(field, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>double</code> * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>. * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Double> newDoubleRange(final String field, final int precisionStep, Double min, Double max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Double>( NumericRangeQuery.newDoubleRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>double</code> * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Double> newDoubleRange(final String field, Double min, Double max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Double>( NumericRangeQuery.newDoubleRange(field, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that filters a <code>float</code> * range using the given <a href="NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>. * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Float> newFloatRange(final String field, final int precisionStep, Float min, Float max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Float>( NumericRangeQuery.newFloatRange(field, precisionStep, min, max, minInclusive, maxInclusive) ); } /** * Factory that creates a <code>NumericRangeFilter</code>, that queries a <code>float</code> * range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). * You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries) * by setting the min or max value to <code>null</code>. By setting inclusive to false, it will * match all documents excluding the bounds, with inclusive on, the boundaries are hits, too. */ public static NumericRangeFilter<Float> newFloatRange(final String field, Float min, Float max, final boolean minInclusive, final boolean maxInclusive ) { return new NumericRangeFilter<Float>( NumericRangeQuery.newFloatRange(field, min, max, minInclusive, maxInclusive) ); } /** Returns the field name for this filter */ public String getField() { return query.getField(); } /** Returns <code>true</code> if the lower endpoint is inclusive */ public boolean includesMin() { return query.includesMin(); } /** Returns <code>true</code> if the upper endpoint is inclusive */ public boolean includesMax() { return query.includesMax(); } /** Returns the lower value of this range filter */ public T getMin() { return query.getMin(); } /** Returns the upper value of this range filter */ public T getMax() { return query.getMax(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/NumericRangeFilter.java
Java
art
9,124
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Expert: A ScoreDoc which also contains information about * how to sort the referenced document. In addition to the * document number and score, this object contains an array * of values for the document from the field(s) used to sort. * For example, if the sort criteria was to sort by fields * "a", "b" then "c", the <code>fields</code> object array * will have three elements, corresponding respectively to * the term values for the document in fields "a", "b" and "c". * The class of each element in the array will be either * Integer, Float or String depending on the type of values * in the terms of each field. * * <p>Created: Feb 11, 2004 1:23:38 PM * * @since lucene 1.4 * @see ScoreDoc * @see TopFieldDocs */ public class FieldDoc extends ScoreDoc { /** Expert: The values which are used to sort the referenced document. * The order of these will match the original sort criteria given by a * Sort object. Each Object will be either an Integer, Float or String, * depending on the type of values in the terms of the original field. * @see Sort * @see Searcher#search(Query,Filter,int,Sort) */ public Comparable[] fields; /** Expert: Creates one of these objects with empty sort information. */ public FieldDoc (int doc, float score) { super (doc, score); } /** Expert: Creates one of these objects with the given sort information. */ public FieldDoc (int doc, float score, Comparable[] fields) { super (doc, score); this.fields = fields; } // A convenience method for debugging. @Override public String toString() { // super.toString returns the doc and score information, so just add the // fields information StringBuilder sb = new StringBuilder(super.toString()); sb.append("["); for (int i = 0; i < fields.length; i++) { sb.append(fields[i]).append(", "); } sb.setLength(sb.length() - 2); // discard last ", " sb.append("]"); return sb.toString(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FieldDoc.java
Java
art
2,852
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; /** * <p>Expert: Collectors are primarily meant to be used to * gather raw results from a search, and implement sorting * or custom result filtering, collation, etc. </p> * * <p>Lucene's core collectors are derived from Collector. * Likely your application can use one of these classes, or * subclass {@link TopDocsCollector}, instead of * implementing Collector directly: * * <ul> * * <li>{@link TopDocsCollector} is an abstract base class * that assumes you will retrieve the top N docs, * according to some criteria, after collection is * done. </li> * * <li>{@link TopScoreDocCollector} is a concrete subclass * {@link TopDocsCollector} and sorts according to score + * docID. This is used internally by the {@link * IndexSearcher} search methods that do not take an * explicit {@link Sort}. It is likely the most frequently * used collector.</li> * * <li>{@link TopFieldCollector} subclasses {@link * TopDocsCollector} and sorts according to a specified * {@link Sort} object (sort by field). This is used * internally by the {@link IndexSearcher} search methods * that take an explicit {@link Sort}. * * <li>{@link TimeLimitingCollector}, which wraps any other * Collector and aborts the search if it's taken too much * time.</li> * * <li>{@link PositiveScoresOnlyCollector} wraps any other * Collector and prevents collection of hits whose score * is &lt;= 0.0</li> * * </ul> * * <p>Collector decouples the score from the collected doc: * the score computation is skipped entirely if it's not * needed. Collectors that do need the score should * implement the {@link #setScorer} method, to hold onto the * passed {@link Scorer} instance, and call {@link * Scorer#score()} within the collect method to compute the * current hit's score. If your collector may request the * score for a single hit multiple times, you should use * {@link ScoreCachingWrappingScorer}. </p> * * <p><b>NOTE:</b> The doc that is passed to the collect * method is relative to the current reader. If your * collector needs to resolve this to the docID space of the * Multi*Reader, you must re-base it by recording the * docBase from the most recent setNextReader call. Here's * a simple example showing how to collect docIDs into a * BitSet:</p> * * <pre> * Searcher searcher = new IndexSearcher(indexReader); * final BitSet bits = new BitSet(indexReader.maxDoc()); * searcher.search(query, new Collector() { * private int docBase; * * <em>// ignore scorer</em> * public void setScorer(Scorer scorer) { * } * * <em>// accept docs out of order (for a BitSet it doesn't matter)</em> * public boolean acceptsDocsOutOfOrder() { * return true; * } * * public void collect(int doc) { * bits.set(doc + docBase); * } * * public void setNextReader(IndexReader reader, int docBase) { * this.docBase = docBase; * } * }); * </pre> * * <p>Not all collectors will need to rebase the docID. For * example, a collector that simply counts the total number * of hits would skip it.</p> * * <p><b>NOTE:</b> Prior to 2.9, Lucene silently filtered * out hits with score <= 0. As of 2.9, the core Collectors * no longer do that. It's very unusual to have such hits * (a negative query boost, or function query returning * negative custom scores, could cause it to happen). If * you need that behavior, use {@link * PositiveScoresOnlyCollector}.</p> * * <p><b>NOTE:</b> This API is experimental and might change * in incompatible ways in the next release.</p> * * @since 2.9 */ public abstract class Collector { /** * Called before successive calls to {@link #collect(int)}. Implementations * that need the score of the current document (passed-in to * {@link #collect(int)}), should save the passed-in Scorer and call * scorer.score() when needed. */ public abstract void setScorer(Scorer scorer) throws IOException; /** * Called once for every document matching a query, with the unbased document * number. * * <p> * Note: This is called in an inner search loop. For good search performance, * implementations of this method should not call {@link Searcher#doc(int)} or * {@link org.apache.lucene.index.IndexReader#document(int)} on every hit. * Doing so can slow searches by an order of magnitude or more. */ public abstract void collect(int doc) throws IOException; /** * Called before collecting from each IndexReader. All doc ids in * {@link #collect(int)} will correspond to reader. * * Add docBase to the current IndexReaders internal document id to re-base ids * in {@link #collect(int)}. * * @param reader * next IndexReader * @param docBase */ public abstract void setNextReader(IndexReader reader, int docBase) throws IOException; /** * Return <code>true</code> if this collector does not * require the matching docIDs to be delivered in int sort * order (smallest to largest) to {@link #collect}. * * <p> Most Lucene Query implementations will visit * matching docIDs in order. However, some queries * (currently limited to certain cases of {@link * BooleanQuery}) can achieve faster searching if the * <code>Collector</code> allows them to deliver the * docIDs out of order.</p> * * <p> Many collectors don't mind getting docIDs out of * order, so it's important to return <code>true</code> * here. */ public abstract boolean acceptsDocsOutOfOrder(); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/Collector.java
Java
art
6,493
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; /** * Subclass of FilteredTermEnum for enumerating all terms that match the * specified prefix filter term. * <p> * Term enumerations are always ordered by Term.compareTo(). Each term in * the enumeration is greater than all that precede it. * */ public class PrefixTermEnum extends FilteredTermEnum { private final Term prefix; private boolean endEnum = false; public PrefixTermEnum(IndexReader reader, Term prefix) throws IOException { this.prefix = prefix; setEnum(reader.terms(new Term(prefix.field(), prefix.text()))); } @Override public float difference() { return 1.0f; } @Override protected boolean endEnum() { return endEnum; } protected Term getPrefixTerm() { return prefix; } @Override protected boolean termCompare(Term term) { if (term.field() == prefix.field() && term.text().startsWith(prefix.text())) { return true; } endEnum = true; return false; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/PrefixTermEnum.java
Java
art
1,987
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.List; import java.io.IOException; import org.apache.lucene.util.ScorerDocQueue; /** A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>. * This Scorer implements {@link Scorer#skipTo(int)} and uses skipTo() on the given Scorers. */ class DisjunctionSumScorer extends Scorer { /** The number of subscorers. */ private final int nrScorers; /** The subscorers. */ protected final List<Scorer> subScorers; /** The minimum number of scorers that should match. */ private final int minimumNrMatchers; /** The scorerDocQueue contains all subscorers ordered by their current doc(), * with the minimum at the top. * <br>The scorerDocQueue is initialized the first time next() or skipTo() is called. * <br>An exhausted scorer is immediately removed from the scorerDocQueue. * <br>If less than the minimumNrMatchers scorers * remain in the scorerDocQueue next() and skipTo() return false. * <p> * After each to call to next() or skipTo() * <code>currentSumScore</code> is the total score of the current matching doc, * <code>nrMatchers</code> is the number of matching scorers, * and all scorers are after the matching doc, or are exhausted. */ private ScorerDocQueue scorerDocQueue; /** The document number of the current match. */ private int currentDoc = -1; /** The number of subscorers that provide the current match. */ protected int nrMatchers = -1; private float currentScore = Float.NaN; /** Construct a <code>DisjunctionScorer</code>. * @param subScorers A collection of at least two subscorers. * @param minimumNrMatchers The positive minimum number of subscorers that should * match to match this query. * <br>When <code>minimumNrMatchers</code> is bigger than * the number of <code>subScorers</code>, * no matches will be produced. * <br>When minimumNrMatchers equals the number of subScorers, * it more efficient to use <code>ConjunctionScorer</code>. */ public DisjunctionSumScorer( List<Scorer> subScorers, int minimumNrMatchers) throws IOException { super(null); nrScorers = subScorers.size(); if (minimumNrMatchers <= 0) { throw new IllegalArgumentException("Minimum nr of matchers must be positive"); } if (nrScorers <= 1) { throw new IllegalArgumentException("There must be at least 2 subScorers"); } this.minimumNrMatchers = minimumNrMatchers; this.subScorers = subScorers; initScorerDocQueue(); } /** Construct a <code>DisjunctionScorer</code>, using one as the minimum number * of matching subscorers. */ public DisjunctionSumScorer(List<Scorer> subScorers) throws IOException { this(subScorers, 1); } /** Called the first time next() or skipTo() is called to * initialize <code>scorerDocQueue</code>. */ private void initScorerDocQueue() throws IOException { scorerDocQueue = new ScorerDocQueue(nrScorers); for (Scorer se : subScorers) { if (se.nextDoc() != NO_MORE_DOCS) { scorerDocQueue.insert(se); } } } /** Scores and collects all matching documents. * @param collector The collector to which all matching documents are passed through. */ @Override public void score(Collector collector) throws IOException { collector.setScorer(this); while (nextDoc() != NO_MORE_DOCS) { collector.collect(currentDoc); } } /** Expert: Collects matching documents in a range. Hook for optimization. * Note that {@link #next()} must be called once before this method is called * for the first time. * @param collector The collector to which all matching documents are passed through. * @param max Do not score documents past this. * @return true if more matching documents may remain. */ @Override protected boolean score(Collector collector, int max, int firstDocID) throws IOException { // firstDocID is ignored since nextDoc() sets 'currentDoc' collector.setScorer(this); while (currentDoc < max) { collector.collect(currentDoc); if (nextDoc() == NO_MORE_DOCS) { return false; } } return true; } @Override public int nextDoc() throws IOException { if (scorerDocQueue.size() < minimumNrMatchers || !advanceAfterCurrent()) { currentDoc = NO_MORE_DOCS; } return currentDoc; } /** Advance all subscorers after the current document determined by the * top of the <code>scorerDocQueue</code>. * Repeat until at least the minimum number of subscorers match on the same * document and all subscorers are after that document or are exhausted. * <br>On entry the <code>scorerDocQueue</code> has at least <code>minimumNrMatchers</code> * available. At least the scorer with the minimum document number will be advanced. * @return true iff there is a match. * <br>In case there is a match, </code>currentDoc</code>, </code>currentSumScore</code>, * and </code>nrMatchers</code> describe the match. * * TODO: Investigate whether it is possible to use skipTo() when * the minimum number of matchers is bigger than one, ie. try and use the * character of ConjunctionScorer for the minimum number of matchers. * Also delay calling score() on the sub scorers until the minimum number of * matchers is reached. * <br>For this, a Scorer array with minimumNrMatchers elements might * hold Scorers at currentDoc that are temporarily popped from scorerQueue. */ protected boolean advanceAfterCurrent() throws IOException { do { // repeat until minimum nr of matchers currentDoc = scorerDocQueue.topDoc(); currentScore = scorerDocQueue.topScore(); nrMatchers = 1; do { // Until all subscorers are after currentDoc if (!scorerDocQueue.topNextAndAdjustElsePop()) { if (scorerDocQueue.size() == 0) { break; // nothing more to advance, check for last match. } } if (scorerDocQueue.topDoc() != currentDoc) { break; // All remaining subscorers are after currentDoc. } currentScore += scorerDocQueue.topScore(); nrMatchers++; } while (true); if (nrMatchers >= minimumNrMatchers) { return true; } else if (scorerDocQueue.size() < minimumNrMatchers) { return false; } } while (true); } /** Returns the score of the current document matching the query. * Initially invalid, until {@link #next()} is called the first time. */ @Override public float score() throws IOException { return currentScore; } @Override public int docID() { return currentDoc; } /** Returns the number of subscorers matching the current document. * Initially invalid, until {@link #next()} is called the first time. */ public int nrMatchers() { return nrMatchers; } /** * Advances to the first match beyond the current whose document number is * greater than or equal to a given target. <br> * The implementation uses the skipTo() method on the subscorers. * * @param target * The target document number. * @return the document whose number is greater than or equal to the given * target, or -1 if none exist. */ @Override public int advance(int target) throws IOException { if (scorerDocQueue.size() < minimumNrMatchers) { return currentDoc = NO_MORE_DOCS; } if (target <= currentDoc) { return currentDoc; } do { if (scorerDocQueue.topDoc() >= target) { return advanceAfterCurrent() ? currentDoc : (currentDoc = NO_MORE_DOCS); } else if (!scorerDocQueue.topSkipToAndAdjustElsePop(target)) { if (scorerDocQueue.size() < minimumNrMatchers) { return currentDoc = NO_MORE_DOCS; } } } while (true); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
Java
art
8,705
package org.apache.lucene.search; import org.apache.lucene.index.FieldInvertState; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Expert: Default scoring implementation. */ public class DefaultSimilarity extends Similarity { /** Implemented as * <code>state.getBoost()*lengthNorm(numTerms)</code>, where * <code>numTerms</code> is {@link FieldInvertState#getLength()} if {@link * #setDiscountOverlaps} is false, else it's {@link * FieldInvertState#getLength()} - {@link * FieldInvertState#getNumOverlap()}. * * <p><b>WARNING</b>: This API is new and experimental, and may suddenly * change.</p> */ @Override public float computeNorm(String field, FieldInvertState state) { final int numTerms; if (discountOverlaps) numTerms = state.getLength() - state.getNumOverlap(); else numTerms = state.getLength(); return (state.getBoost() * lengthNorm(field, numTerms)); } /** Implemented as <code>1/sqrt(numTerms)</code>. */ @Override public float lengthNorm(String fieldName, int numTerms) { return (float)(1.0 / Math.sqrt(numTerms)); } /** Implemented as <code>1/sqrt(sumOfSquaredWeights)</code>. */ @Override public float queryNorm(float sumOfSquaredWeights) { return (float)(1.0 / Math.sqrt(sumOfSquaredWeights)); } /** Implemented as <code>sqrt(freq)</code>. */ @Override public float tf(float freq) { return (float)Math.sqrt(freq); } /** Implemented as <code>1 / (distance + 1)</code>. */ @Override public float sloppyFreq(int distance) { return 1.0f / (distance + 1); } /** Implemented as <code>log(numDocs/(docFreq+1)) + 1</code>. */ @Override public float idf(int docFreq, int numDocs) { return (float)(Math.log(numDocs/(double)(docFreq+1)) + 1.0); } /** Implemented as <code>overlap / maxOverlap</code>. */ @Override public float coord(int overlap, int maxOverlap) { return overlap / (float)maxOverlap; } // Default false protected boolean discountOverlaps; /** Determines whether overlap tokens (Tokens with * 0 position increment) are ignored when computing * norm. By default this is false, meaning overlap * tokens are counted just like non-overlap tokens. * * <p><b>WARNING</b>: This API is new and experimental, and may suddenly * change.</p> * * @see #computeNorm */ public void setDiscountOverlaps(boolean v) { discountOverlaps = v; } /** @see #setDiscountOverlaps */ public boolean getDiscountOverlaps() { return discountOverlaps; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/DefaultSimilarity.java
Java
art
3,331
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.util.PriorityQueue; import java.text.Collator; import java.util.Locale; /** * Expert: Collects sorted results from Searchable's and collates them. * The elements put into this queue must be of type FieldDoc. * * <p>Created: Feb 11, 2004 2:04:21 PM * * @since lucene 1.4 */ class FieldDocSortedHitQueue extends PriorityQueue<FieldDoc> { volatile SortField[] fields = null; // used in the case where the fields are sorted by locale // based strings volatile Collator[] collators = null; /** * Creates a hit queue sorted by the given list of fields. * @param fields Fieldable names, in priority order (highest priority first). * @param size The number of hits to retain. Must be greater than zero. */ FieldDocSortedHitQueue (int size) { initialize (size); } /** * Allows redefinition of sort fields if they are <code>null</code>. * This is to handle the case using ParallelMultiSearcher where the * original list contains AUTO and we don't know the actual sort * type until the values come back. The fields can only be set once. * This method should be synchronized external like all other PQ methods. * @param fields */ void setFields (SortField[] fields) { this.fields = fields; this.collators = hasCollators (fields); } /** Returns the fields being used to sort. */ SortField[] getFields() { return fields; } /** Returns an array of collators, possibly <code>null</code>. The collators * correspond to any SortFields which were given a specific locale. * @param fields Array of sort fields. * @return Array, possibly <code>null</code>. */ private Collator[] hasCollators (final SortField[] fields) { if (fields == null) return null; Collator[] ret = new Collator[fields.length]; for (int i=0; i<fields.length; ++i) { Locale locale = fields[i].getLocale(); if (locale != null) ret[i] = Collator.getInstance (locale); } return ret; } /** * Returns whether <code>a</code> is less relevant than <code>b</code>. * @param a ScoreDoc * @param b ScoreDoc * @return <code>true</code> if document <code>a</code> should be sorted after document <code>b</code>. */ @SuppressWarnings("unchecked") @Override protected final boolean lessThan(final FieldDoc docA, final FieldDoc docB) { final int n = fields.length; int c = 0; for (int i=0; i<n && c==0; ++i) { final int type = fields[i].getType(); if (type == SortField.STRING) { final String s1 = (String) docA.fields[i]; final String s2 = (String) docB.fields[i]; // null values need to be sorted first, because of how FieldCache.getStringIndex() // works - in that routine, any documents without a value in the given field are // put first. If both are null, the next SortField is used if (s1 == null) { c = (s2 == null) ? 0 : -1; } else if (s2 == null) { c = 1; } else if (fields[i].getLocale() == null) { c = s1.compareTo(s2); } else { c = collators[i].compare(s1, s2); } } else { c = docA.fields[i].compareTo(docB.fields[i]); if (type == SortField.SCORE) { c = -c; } } // reverse sort if (fields[i].getReverse()) { c = -c; } } // avoid random sort order that could lead to duplicates (bug #31241): if (c == 0) return docA.doc > docB.doc; return c > 0; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/FieldDocSortedHitQueue.java
Java
art
4,385
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** Expert: Returned by low-level search implementations. * @see TopDocs */ public class ScoreDoc implements java.io.Serializable { /** Expert: The score of this document for the query. */ public float score; /** Expert: A hit document's number. * @see Searcher#doc(int) */ public int doc; /** Expert: Constructs a ScoreDoc. */ public ScoreDoc(int doc, float score) { this.doc = doc; this.score = score; } // A convenience method for debugging. @Override public String toString() { return "doc=" + doc + " score=" + score; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/ScoreDoc.java
Java
art
1,415
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.*; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.MultipleTermPositions; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermPositions; import org.apache.lucene.util.ToStringUtils; /** * MultiPhraseQuery is a generalized version of PhraseQuery, with an added * method {@link #add(Term[])}. * To use this class, to search for the phrase "Microsoft app*" first use * add(Term) on the term "Microsoft", then find all terms that have "app" as * prefix using IndexReader.terms(Term), and use MultiPhraseQuery.add(Term[] * terms) to add them to the query. * * @version 1.0 */ public class MultiPhraseQuery extends Query { private String field; private ArrayList<Term[]> termArrays = new ArrayList<Term[]>(); private ArrayList<Integer> positions = new ArrayList<Integer>(); private int slop = 0; /** Sets the phrase slop for this query. * @see PhraseQuery#setSlop(int) */ public void setSlop(int s) { slop = s; } /** Sets the phrase slop for this query. * @see PhraseQuery#getSlop() */ public int getSlop() { return slop; } /** Add a single term at the next position in the phrase. * @see PhraseQuery#add(Term) */ public void add(Term term) { add(new Term[]{term}); } /** Add multiple terms at the next position in the phrase. Any of the terms * may match. * * @see PhraseQuery#add(Term) */ public void add(Term[] terms) { int position = 0; if (positions.size() > 0) position = positions.get(positions.size()-1).intValue() + 1; add(terms, position); } /** * Allows to specify the relative position of terms within the phrase. * * @see PhraseQuery#add(Term, int) * @param terms * @param position */ public void add(Term[] terms, int position) { if (termArrays.size() == 0) field = terms[0].field(); for (int i = 0; i < terms.length; i++) { if (terms[i].field() != field) { throw new IllegalArgumentException( "All phrase terms must be in the same field (" + field + "): " + terms[i]); } } termArrays.add(terms); positions.add(Integer.valueOf(position)); } /** * Returns a List of the terms in the multiphrase. * Do not modify the List or its contents. */ public List<Term[]> getTermArrays() { return Collections.unmodifiableList(termArrays); } /** * Returns the relative positions of terms in this phrase. */ public int[] getPositions() { int[] result = new int[positions.size()]; for (int i = 0; i < positions.size(); i++) result[i] = positions.get(i).intValue(); return result; } // inherit javadoc @Override public void extractTerms(Set<Term> terms) { for (final Term[] arr : termArrays) { for (final Term term: arr) { terms.add(term); } } } private class MultiPhraseWeight extends Weight { private Similarity similarity; private float value; private float idf; private float queryNorm; private float queryWeight; public MultiPhraseWeight(Searcher searcher) throws IOException { this.similarity = getSimilarity(searcher); // compute idf final int maxDoc = searcher.maxDoc(); for(final Term[] terms: termArrays) { for (Term term: terms) { idf += this.similarity.idf(searcher.docFreq(term), maxDoc); } } } @Override public Query getQuery() { return MultiPhraseQuery.this; } @Override public float getValue() { return value; } @Override public float sumOfSquaredWeights() { queryWeight = idf * getBoost(); // compute query weight return queryWeight * queryWeight; // square it } @Override public void normalize(float queryNorm) { this.queryNorm = queryNorm; queryWeight *= queryNorm; // normalize query weight value = queryWeight * idf; // idf for document } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { if (termArrays.size() == 0) // optimize zero-term case return null; TermPositions[] tps = new TermPositions[termArrays.size()]; for (int i=0; i<tps.length; i++) { Term[] terms = termArrays.get(i); TermPositions p; if (terms.length > 1) p = new MultipleTermPositions(reader, terms); else p = reader.termPositions(terms[0]); if (p == null) return null; tps[i] = p; } if (slop == 0) return new ExactPhraseScorer(this, tps, getPositions(), similarity, reader.norms(field)); else return new SloppyPhraseScorer(this, tps, getPositions(), similarity, slop, reader.norms(field)); } @Override public Explanation explain(IndexReader reader, int doc) throws IOException { ComplexExplanation result = new ComplexExplanation(); result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); Explanation idfExpl = new Explanation(idf, "idf("+getQuery()+")"); // explain query weight Explanation queryExpl = new Explanation(); queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:"); Explanation boostExpl = new Explanation(getBoost(), "boost"); if (getBoost() != 1.0f) queryExpl.addDetail(boostExpl); queryExpl.addDetail(idfExpl); Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm"); queryExpl.addDetail(queryNormExpl); queryExpl.setValue(boostExpl.getValue() * idfExpl.getValue() * queryNormExpl.getValue()); result.addDetail(queryExpl); // explain field weight ComplexExplanation fieldExpl = new ComplexExplanation(); fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+ "), product of:"); PhraseScorer scorer = (PhraseScorer) scorer(reader, true, false); if (scorer == null) { return new Explanation(0.0f, "no matching docs"); } Explanation tfExplanation = new Explanation(); int d = scorer.advance(doc); float phraseFreq = (d == doc) ? scorer.currentFreq() : 0.0f; tfExplanation.setValue(similarity.tf(phraseFreq)); tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")"); fieldExpl.addDetail(tfExplanation); fieldExpl.addDetail(idfExpl); Explanation fieldNormExpl = new Explanation(); byte[] fieldNorms = reader.norms(field); float fieldNorm = fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f; fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")"); fieldExpl.addDetail(fieldNormExpl); fieldExpl.setMatch(Boolean.valueOf(tfExplanation.isMatch())); fieldExpl.setValue(tfExplanation.getValue() * idfExpl.getValue() * fieldNormExpl.getValue()); result.addDetail(fieldExpl); result.setMatch(fieldExpl.getMatch()); // combine them result.setValue(queryExpl.getValue() * fieldExpl.getValue()); if (queryExpl.getValue() == 1.0f) return fieldExpl; return result; } } @Override public Query rewrite(IndexReader reader) { if (termArrays.size() == 1) { // optimize one-term case Term[] terms = termArrays.get(0); BooleanQuery boq = new BooleanQuery(true); for (int i=0; i<terms.length; i++) { boq.add(new TermQuery(terms[i]), BooleanClause.Occur.SHOULD); } boq.setBoost(getBoost()); return boq; } else { return this; } } @Override public Weight createWeight(Searcher searcher) throws IOException { return new MultiPhraseWeight(searcher); } /** Prints a user-readable version of this query. */ @Override public final String toString(String f) { StringBuilder buffer = new StringBuilder(); if (!field.equals(f)) { buffer.append(field); buffer.append(":"); } buffer.append("\""); Iterator<Term[]> i = termArrays.iterator(); while (i.hasNext()) { Term[] terms = i.next(); if (terms.length > 1) { buffer.append("("); for (int j = 0; j < terms.length; j++) { buffer.append(terms[j].text()); if (j < terms.length-1) buffer.append(" "); } buffer.append(")"); } else { buffer.append(terms[0].text()); } if (i.hasNext()) buffer.append(" "); } buffer.append("\""); if (slop != 0) { buffer.append("~"); buffer.append(slop); } buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } /** Returns true if <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (!(o instanceof MultiPhraseQuery)) return false; MultiPhraseQuery other = (MultiPhraseQuery)o; return this.getBoost() == other.getBoost() && this.slop == other.slop && termArraysEquals(this.termArrays, other.termArrays) && this.positions.equals(other.positions); } /** Returns a hash code value for this object.*/ @Override public int hashCode() { return Float.floatToIntBits(getBoost()) ^ slop ^ termArraysHashCode() ^ positions.hashCode() ^ 0x4AC65113; } // Breakout calculation of the termArrays hashcode private int termArraysHashCode() { int hashCode = 1; for (final Term[] termArray: termArrays) { hashCode = 31 * hashCode + (termArray == null ? 0 : Arrays.hashCode(termArray)); } return hashCode; } // Breakout calculation of the termArrays equals private boolean termArraysEquals(List<Term[]> termArrays1, List<Term[]> termArrays2) { if (termArrays1.size() != termArrays2.size()) { return false; } ListIterator<Term[]> iterator1 = termArrays1.listIterator(); ListIterator<Term[]> iterator2 = termArrays2.listIterator(); while (iterator1.hasNext()) { Term[] termArray1 = iterator1.next(); Term[] termArray2 = iterator2.next(); if (!(termArray1 == null ? termArray2 == null : Arrays.equals(termArray1, termArray2))) { return false; } } return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/MultiPhraseQuery.java
Java
art
11,450
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Set; import java.util.ArrayList; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermPositions; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Explanation.IDFExplanation; import org.apache.lucene.util.ToStringUtils; /** A Query that matches documents containing a particular sequence of terms. * A PhraseQuery is built by QueryParser for input like <code>"new york"</code>. * * <p>This query may be combined with other terms or queries with a {@link BooleanQuery}. */ public class PhraseQuery extends Query { private String field; private ArrayList<Term> terms = new ArrayList<Term>(4); private ArrayList<Integer> positions = new ArrayList<Integer>(4); private int maxPosition = 0; private int slop = 0; /** Constructs an empty phrase query. */ public PhraseQuery() {} /** Sets the number of other words permitted between words in query phrase. If zero, then this is an exact phrase search. For larger values this works like a <code>WITHIN</code> or <code>NEAR</code> operator. <p>The slop is in fact an edit-distance, where the units correspond to moves of terms in the query phrase out of position. For example, to switch the order of two words requires two moves (the first move places the words atop one another), so to permit re-orderings of phrases, the slop must be at least two. <p>More exact matches are scored higher than sloppier matches, thus search results are sorted by exactness. <p>The slop is zero by default, requiring exact matches.*/ public void setSlop(int s) { slop = s; } /** Returns the slop. See setSlop(). */ public int getSlop() { return slop; } /** * Adds a term to the end of the query phrase. * The relative position of the term is the one immediately after the last term added. */ public void add(Term term) { int position = 0; if(positions.size() > 0) position = positions.get(positions.size()-1).intValue() + 1; add(term, position); } /** * Adds a term to the end of the query phrase. * The relative position of the term within the phrase is specified explicitly. * This allows e.g. phrases with more than one term at the same position * or phrases with gaps (e.g. in connection with stopwords). * * @param term * @param position */ public void add(Term term, int position) { if (terms.size() == 0) field = term.field(); else if (term.field() != field) throw new IllegalArgumentException("All phrase terms must be in the same field: " + term); terms.add(term); positions.add(Integer.valueOf(position)); if (position > maxPosition) maxPosition = position; } /** Returns the set of terms in this phrase. */ public Term[] getTerms() { return terms.toArray(new Term[0]); } /** * Returns the relative positions of terms in this phrase. */ public int[] getPositions() { int[] result = new int[positions.size()]; for(int i = 0; i < positions.size(); i++) result[i] = positions.get(i).intValue(); return result; } private class PhraseWeight extends Weight { private Similarity similarity; private float value; private float idf; private float queryNorm; private float queryWeight; private IDFExplanation idfExp; public PhraseWeight(Searcher searcher) throws IOException { this.similarity = getSimilarity(searcher); idfExp = similarity.idfExplain(terms, searcher); idf = idfExp.getIdf(); } @Override public String toString() { return "weight(" + PhraseQuery.this + ")"; } @Override public Query getQuery() { return PhraseQuery.this; } @Override public float getValue() { return value; } @Override public float sumOfSquaredWeights() { queryWeight = idf * getBoost(); // compute query weight return queryWeight * queryWeight; // square it } @Override public void normalize(float queryNorm) { this.queryNorm = queryNorm; queryWeight *= queryNorm; // normalize query weight value = queryWeight * idf; // idf for document } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { if (terms.size() == 0) // optimize zero-term case return null; TermPositions[] tps = new TermPositions[terms.size()]; for (int i = 0; i < terms.size(); i++) { TermPositions p = reader.termPositions(terms.get(i)); if (p == null) return null; tps[i] = p; } if (slop == 0) // optimize exact case return new ExactPhraseScorer(this, tps, getPositions(), similarity, reader.norms(field)); else return new SloppyPhraseScorer(this, tps, getPositions(), similarity, slop, reader.norms(field)); } @Override public Explanation explain(IndexReader reader, int doc) throws IOException { Explanation result = new Explanation(); result.setDescription("weight("+getQuery()+" in "+doc+"), product of:"); StringBuilder docFreqs = new StringBuilder(); StringBuilder query = new StringBuilder(); query.append('\"'); docFreqs.append(idfExp.explain()); for (int i = 0; i < terms.size(); i++) { if (i != 0) { query.append(" "); } Term term = terms.get(i); query.append(term.text()); } query.append('\"'); Explanation idfExpl = new Explanation(idf, "idf(" + field + ":" + docFreqs + ")"); // explain query weight Explanation queryExpl = new Explanation(); queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:"); Explanation boostExpl = new Explanation(getBoost(), "boost"); if (getBoost() != 1.0f) queryExpl.addDetail(boostExpl); queryExpl.addDetail(idfExpl); Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm"); queryExpl.addDetail(queryNormExpl); queryExpl.setValue(boostExpl.getValue() * idfExpl.getValue() * queryNormExpl.getValue()); result.addDetail(queryExpl); // explain field weight Explanation fieldExpl = new Explanation(); fieldExpl.setDescription("fieldWeight("+field+":"+query+" in "+doc+ "), product of:"); PhraseScorer scorer = (PhraseScorer) scorer(reader, true, false); if (scorer == null) { return new Explanation(0.0f, "no matching docs"); } Explanation tfExplanation = new Explanation(); int d = scorer.advance(doc); float phraseFreq = (d == doc) ? scorer.currentFreq() : 0.0f; tfExplanation.setValue(similarity.tf(phraseFreq)); tfExplanation.setDescription("tf(phraseFreq=" + phraseFreq + ")"); fieldExpl.addDetail(tfExplanation); fieldExpl.addDetail(idfExpl); Explanation fieldNormExpl = new Explanation(); byte[] fieldNorms = reader.norms(field); float fieldNorm = fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f; fieldNormExpl.setValue(fieldNorm); fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")"); fieldExpl.addDetail(fieldNormExpl); fieldExpl.setValue(tfExplanation.getValue() * idfExpl.getValue() * fieldNormExpl.getValue()); result.addDetail(fieldExpl); // combine them result.setValue(queryExpl.getValue() * fieldExpl.getValue()); if (queryExpl.getValue() == 1.0f) return fieldExpl; return result; } } @Override public Weight createWeight(Searcher searcher) throws IOException { if (terms.size() == 1) { // optimize one-term case Term term = terms.get(0); Query termQuery = new TermQuery(term); termQuery.setBoost(getBoost()); return termQuery.createWeight(searcher); } return new PhraseWeight(searcher); } /** * @see org.apache.lucene.search.Query#extractTerms(Set) */ @Override public void extractTerms(Set<Term> queryTerms) { queryTerms.addAll(terms); } /** Prints a user-readable version of this query. */ @Override public String toString(String f) { StringBuilder buffer = new StringBuilder(); if (field != null && !field.equals(f)) { buffer.append(field); buffer.append(":"); } buffer.append("\""); String[] pieces = new String[maxPosition + 1]; for (int i = 0; i < terms.size(); i++) { int pos = positions.get(i).intValue(); String s = pieces[pos]; if (s == null) { s = (terms.get(i)).text(); } else { s = s + "|" + (terms.get(i)).text(); } pieces[pos] = s; } for (int i = 0; i < pieces.length; i++) { if (i > 0) { buffer.append(' '); } String s = pieces[i]; if (s == null) { buffer.append('?'); } else { buffer.append(s); } } buffer.append("\""); if (slop != 0) { buffer.append("~"); buffer.append(slop); } buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } /** Returns true iff <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (!(o instanceof PhraseQuery)) return false; PhraseQuery other = (PhraseQuery)o; return (this.getBoost() == other.getBoost()) && (this.slop == other.slop) && this.terms.equals(other.terms) && this.positions.equals(other.positions); } /** Returns a hash code value for this object.*/ @Override public int hashCode() { return Float.floatToIntBits(getBoost()) ^ slop ^ terms.hashCode() ^ positions.hashCode(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/PhraseQuery.java
Java
art
10,904
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.Term; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.TermPositions; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.spans.TermSpans; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.SpanWeight; import org.apache.lucene.search.spans.SpanScorer; import java.io.IOException; /** * This class is very similar to * {@link org.apache.lucene.search.spans.SpanTermQuery} except that it factors * in the value of the payload located at each of the positions where the * {@link org.apache.lucene.index.Term} occurs. * <p> * In order to take advantage of this, you must override * {@link org.apache.lucene.search.Similarity#scorePayload(int, String, int, int, byte[],int,int)} * which returns 1 by default. * <p> * Payload scores are aggregated using a pluggable {@link PayloadFunction}. **/ public class PayloadTermQuery extends SpanTermQuery { protected PayloadFunction function; private boolean includeSpanScore; public PayloadTermQuery(Term term, PayloadFunction function) { this(term, function, true); } public PayloadTermQuery(Term term, PayloadFunction function, boolean includeSpanScore) { super(term); this.function = function; this.includeSpanScore = includeSpanScore; } @Override public Weight createWeight(Searcher searcher) throws IOException { return new PayloadTermWeight(this, searcher); } protected class PayloadTermWeight extends SpanWeight { public PayloadTermWeight(PayloadTermQuery query, Searcher searcher) throws IOException { super(query, searcher); } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new PayloadTermSpanScorer((TermSpans) query.getSpans(reader), this, similarity, reader.norms(query.getField())); } protected class PayloadTermSpanScorer extends SpanScorer { // TODO: is this the best way to allocate this? protected byte[] payload = new byte[256]; protected TermPositions positions; protected float payloadScore; protected int payloadsSeen; public PayloadTermSpanScorer(TermSpans spans, Weight weight, Similarity similarity, byte[] norms) throws IOException { super(spans, weight, similarity, norms); positions = spans.getPositions(); } @Override protected boolean setFreqCurrentDoc() throws IOException { if (!more) { return false; } doc = spans.doc(); freq = 0.0f; payloadScore = 0; payloadsSeen = 0; Similarity similarity1 = getSimilarity(); while (more && doc == spans.doc()) { int matchLength = spans.end() - spans.start(); freq += similarity1.sloppyFreq(matchLength); processPayload(similarity1); more = spans.next();// this moves positions to the next match in this // document } return more || (freq != 0); } protected void processPayload(Similarity similarity) throws IOException { if (positions.isPayloadAvailable()) { payload = positions.getPayload(payload, 0); payloadScore = function.currentScore(doc, term.field(), spans.start(), spans.end(), payloadsSeen, payloadScore, similarity.scorePayload(doc, term.field(), spans.start(), spans .end(), payload, 0, positions.getPayloadLength())); payloadsSeen++; } else { // zero out the payload? } } /** * * @return {@link #getSpanScore()} * {@link #getPayloadScore()} * @throws IOException */ @Override public float score() throws IOException { return includeSpanScore ? getSpanScore() * getPayloadScore() : getPayloadScore(); } /** * Returns the SpanScorer score only. * <p/> * Should not be overridden without good cause! * * @return the score for just the Span part w/o the payload * @throws IOException * * @see #score() */ protected float getSpanScore() throws IOException { return super.score(); } /** * The score for the payload * * @return The score, as calculated by * {@link PayloadFunction#docScore(int, String, int, float)} */ protected float getPayloadScore() { return function.docScore(doc, term.field(), payloadsSeen, payloadScore); } @Override protected Explanation explain(final int doc) throws IOException { ComplexExplanation result = new ComplexExplanation(); Explanation nonPayloadExpl = super.explain(doc); result.addDetail(nonPayloadExpl); // QUESTION: Is there a way to avoid this skipTo call? We need to know // whether to load the payload or not Explanation payloadBoost = new Explanation(); result.addDetail(payloadBoost); float payloadScore = getPayloadScore(); payloadBoost.setValue(payloadScore); // GSI: I suppose we could toString the payload, but I don't think that // would be a good idea payloadBoost.setDescription("scorePayload(...)"); result.setValue(nonPayloadExpl.getValue() * payloadScore); result.setDescription("btq, product of:"); result.setMatch(nonPayloadExpl.getValue() == 0 ? Boolean.FALSE : Boolean.TRUE); // LUCENE-1303 return result; } } } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((function == null) ? 0 : function.hashCode()); result = prime * result + (includeSpanScore ? 1231 : 1237); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; PayloadTermQuery other = (PayloadTermQuery) obj; if (function == null) { if (other.function != null) return false; } else if (!function.equals(other.function)) return false; if (includeSpanScore != other.includeSpanScore) return false; return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/PayloadTermQuery.java
Java
art
7,519
<HTML> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <HEAD> <TITLE>org.apache.lucene.search.payloads</TITLE> </HEAD> <BODY> <DIV>The payloads package provides Query mechanisms for finding and using payloads. The following Query implementations are provided: </DIV> <div> <ol> <li><a href="./PayloadTermQuery.html">PayloadTermQuery</a> -- Boost a term's score based on the value of the payload located at that term.</li> <li><a href="./PayloadNearQuery.html">PayloadNearQuery</a> -- A <a href="../spans/SpanNearQuery.html">SpanNearQuery</a> that factors in the value of the payloads located at each of the positions where the spans occur.</li> </ol> </div> <DIV>&nbsp;</DIV> <DIV align="center"> </DIV> </BODY> </HTML>
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/package.html
HTML
art
1,470
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Calculate the final score as the average score of all payloads seen. * <p/> * Is thread safe and completely reusable. * **/ public class AveragePayloadFunction extends PayloadFunction{ @Override public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) { return currentPayloadScore + currentScore; } @Override public float docScore(int docId, String field, int numPayloadsSeen, float payloadScore) { return numPayloadsSeen > 0 ? (payloadScore / numPayloadsSeen) : 1; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + this.getClass().hashCode(); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/AveragePayloadFunction.java
Java
art
1,812
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Returns the maximum payload score seen, else 1 if there are no payloads on the doc. * <p/> * Is thread safe and completely reusable. * **/ public class MaxPayloadFunction extends PayloadFunction { @Override public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) { if (numPayloadsSeen == 0) { return currentPayloadScore; } else { return Math.max(currentPayloadScore, currentScore); } } @Override public float docScore(int docId, String field, int numPayloadsSeen, float payloadScore) { return numPayloadsSeen > 0 ? payloadScore : 1; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + this.getClass().hashCode(); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/MaxPayloadFunction.java
Java
art
1,900
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Searcher; import org.apache.lucene.search.Similarity; import org.apache.lucene.search.Weight; import org.apache.lucene.search.spans.NearSpansOrdered; import org.apache.lucene.search.spans.NearSpansUnordered; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanScorer; import org.apache.lucene.search.spans.SpanWeight; import org.apache.lucene.search.spans.Spans; import org.apache.lucene.util.ToStringUtils; import java.io.IOException; import java.util.Collection; import java.util.Iterator; /** * This class is very similar to * {@link org.apache.lucene.search.spans.SpanNearQuery} except that it factors * in the value of the payloads located at each of the positions where the * {@link org.apache.lucene.search.spans.TermSpans} occurs. * <p/> * In order to take advantage of this, you must override * {@link org.apache.lucene.search.Similarity#scorePayload} * which returns 1 by default. * <p/> * Payload scores are aggregated using a pluggable {@link PayloadFunction}. * * @see org.apache.lucene.search.Similarity#scorePayload */ public class PayloadNearQuery extends SpanNearQuery { protected String fieldName; protected PayloadFunction function; public PayloadNearQuery(SpanQuery[] clauses, int slop, boolean inOrder) { this(clauses, slop, inOrder, new AveragePayloadFunction()); } public PayloadNearQuery(SpanQuery[] clauses, int slop, boolean inOrder, PayloadFunction function) { super(clauses, slop, inOrder); fieldName = clauses[0].getField(); // all clauses must have same field this.function = function; } @Override public Weight createWeight(Searcher searcher) throws IOException { return new PayloadNearSpanWeight(this, searcher); } @Override public Object clone() { int sz = clauses.size(); SpanQuery[] newClauses = new SpanQuery[sz]; for (int i = 0; i < sz; i++) { newClauses[i] = (SpanQuery) clauses.get(i).clone(); } PayloadNearQuery boostingNearQuery = new PayloadNearQuery(newClauses, slop, inOrder); boostingNearQuery.setBoost(getBoost()); return boostingNearQuery; } @Override public String toString(String field) { StringBuilder buffer = new StringBuilder(); buffer.append("payloadNear(["); Iterator<SpanQuery> i = clauses.iterator(); while (i.hasNext()) { SpanQuery clause = i.next(); buffer.append(clause.toString(field)); if (i.hasNext()) { buffer.append(", "); } } buffer.append("], "); buffer.append(slop); buffer.append(", "); buffer.append(inOrder); buffer.append(")"); buffer.append(ToStringUtils.boost(getBoost())); return buffer.toString(); } @Override public int hashCode() { final int prime = 31; int result = super.hashCode(); result = prime * result + ((fieldName == null) ? 0 : fieldName.hashCode()); result = prime * result + ((function == null) ? 0 : function.hashCode()); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (!super.equals(obj)) return false; if (getClass() != obj.getClass()) return false; PayloadNearQuery other = (PayloadNearQuery) obj; if (fieldName == null) { if (other.fieldName != null) return false; } else if (!fieldName.equals(other.fieldName)) return false; if (function == null) { if (other.function != null) return false; } else if (!function.equals(other.function)) return false; return true; } public class PayloadNearSpanWeight extends SpanWeight { public PayloadNearSpanWeight(SpanQuery query, Searcher searcher) throws IOException { super(query, searcher); } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new PayloadNearSpanScorer(query.getSpans(reader), this, similarity, reader.norms(query.getField())); } } public class PayloadNearSpanScorer extends SpanScorer { Spans spans; protected float payloadScore; private int payloadsSeen; Similarity similarity = getSimilarity(); protected PayloadNearSpanScorer(Spans spans, Weight weight, Similarity similarity, byte[] norms) throws IOException { super(spans, weight, similarity, norms); this.spans = spans; } // Get the payloads associated with all underlying subspans public void getPayloads(Spans[] subSpans) throws IOException { for (int i = 0; i < subSpans.length; i++) { if (subSpans[i] instanceof NearSpansOrdered) { if (((NearSpansOrdered) subSpans[i]).isPayloadAvailable()) { processPayloads(((NearSpansOrdered) subSpans[i]).getPayload(), subSpans[i].start(), subSpans[i].end()); } getPayloads(((NearSpansOrdered) subSpans[i]).getSubSpans()); } else if (subSpans[i] instanceof NearSpansUnordered) { if (((NearSpansUnordered) subSpans[i]).isPayloadAvailable()) { processPayloads(((NearSpansUnordered) subSpans[i]).getPayload(), subSpans[i].start(), subSpans[i].end()); } getPayloads(((NearSpansUnordered) subSpans[i]).getSubSpans()); } } } /** * By default, uses the {@link PayloadFunction} to score the payloads, but * can be overridden to do other things. * * @param payLoads The payloads * @param start The start position of the span being scored * @param end The end position of the span being scored * * @see Spans */ protected void processPayloads(Collection<byte[]> payLoads, int start, int end) { for (final byte[] thePayload : payLoads) { payloadScore = function.currentScore(doc, fieldName, start, end, payloadsSeen, payloadScore, similarity.scorePayload(doc, fieldName, spans.start(), spans.end(), thePayload, 0, thePayload.length)); ++payloadsSeen; } } // @Override protected boolean setFreqCurrentDoc() throws IOException { if (!more) { return false; } Spans[] spansArr = new Spans[1]; spansArr[0] = spans; payloadScore = 0; payloadsSeen = 0; getPayloads(spansArr); return super.setFreqCurrentDoc(); } @Override public float score() throws IOException { return super.score() * function.docScore(doc, fieldName, payloadsSeen, payloadScore); } @Override protected Explanation explain(int doc) throws IOException { Explanation result = new Explanation(); Explanation nonPayloadExpl = super.explain(doc); result.addDetail(nonPayloadExpl); Explanation payloadBoost = new Explanation(); result.addDetail(payloadBoost); float avgPayloadScore = (payloadsSeen > 0 ? (payloadScore / payloadsSeen) : 1); payloadBoost.setValue(avgPayloadScore); payloadBoost.setDescription("scorePayload(...)"); result.setValue(nonPayloadExpl.getValue() * avgPayloadScore); result.setDescription("bnq, product of:"); return result; } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/PayloadNearQuery.java
Java
art
8,292
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Calculates the minimum payload seen * **/ public class MinPayloadFunction extends PayloadFunction { @Override public float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore) { if (numPayloadsSeen == 0) { return currentPayloadScore; } else { return Math.min(currentPayloadScore, currentScore); } } @Override public float docScore(int docId, String field, int numPayloadsSeen, float payloadScore) { return numPayloadsSeen > 0 ? payloadScore : 1; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + this.getClass().hashCode(); return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; return true; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/MinPayloadFunction.java
Java
art
1,802
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Iterator; import java.util.List; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.apache.lucene.search.spans.Spans; /** * Experimental class to get set of payloads for most standard Lucene queries. * Operates like Highlighter - IndexReader should only contain doc of interest, * best to use MemoryIndex. * * <p/> * <font color="#FF0000"> * WARNING: The status of the <b>Payloads</b> feature is experimental. * The APIs introduced here might change in the future and will not be * supported anymore in such a case.</font> * */ public class PayloadSpanUtil { private IndexReader reader; /** * @param reader * that contains doc with payloads to extract */ public PayloadSpanUtil(IndexReader reader) { this.reader = reader; } /** * Query should be rewritten for wild/fuzzy support. * * @param query * @return payloads Collection * @throws IOException */ public Collection<byte[]> getPayloadsForQuery(Query query) throws IOException { Collection<byte[]> payloads = new ArrayList<byte[]>(); queryToSpanQuery(query, payloads); return payloads; } private void queryToSpanQuery(Query query, Collection<byte[]> payloads) throws IOException { if (query instanceof BooleanQuery) { BooleanClause[] queryClauses = ((BooleanQuery) query).getClauses(); for (int i = 0; i < queryClauses.length; i++) { if (!queryClauses[i].isProhibited()) { queryToSpanQuery(queryClauses[i].getQuery(), payloads); } } } else if (query instanceof PhraseQuery) { Term[] phraseQueryTerms = ((PhraseQuery) query).getTerms(); SpanQuery[] clauses = new SpanQuery[phraseQueryTerms.length]; for (int i = 0; i < phraseQueryTerms.length; i++) { clauses[i] = new SpanTermQuery(phraseQueryTerms[i]); } int slop = ((PhraseQuery) query).getSlop(); boolean inorder = false; if (slop == 0) { inorder = true; } SpanNearQuery sp = new SpanNearQuery(clauses, slop, inorder); sp.setBoost(query.getBoost()); getPayloads(payloads, sp); } else if (query instanceof TermQuery) { SpanTermQuery stq = new SpanTermQuery(((TermQuery) query).getTerm()); stq.setBoost(query.getBoost()); getPayloads(payloads, stq); } else if (query instanceof SpanQuery) { getPayloads(payloads, (SpanQuery) query); } else if (query instanceof FilteredQuery) { queryToSpanQuery(((FilteredQuery) query).getQuery(), payloads); } else if (query instanceof DisjunctionMaxQuery) { for (Iterator<Query> iterator = ((DisjunctionMaxQuery) query).iterator(); iterator .hasNext();) { queryToSpanQuery(iterator.next(), payloads); } } else if (query instanceof MultiPhraseQuery) { final MultiPhraseQuery mpq = (MultiPhraseQuery) query; final List<Term[]> termArrays = mpq.getTermArrays(); final int[] positions = mpq.getPositions(); if (positions.length > 0) { int maxPosition = positions[positions.length - 1]; for (int i = 0; i < positions.length - 1; ++i) { if (positions[i] > maxPosition) { maxPosition = positions[i]; } } @SuppressWarnings("unchecked") final List<Query>[] disjunctLists = new List[maxPosition + 1]; int distinctPositions = 0; for (int i = 0; i < termArrays.size(); ++i) { final Term[] termArray = termArrays.get(i); List<Query> disjuncts = disjunctLists[positions[i]]; if (disjuncts == null) { disjuncts = (disjunctLists[positions[i]] = new ArrayList<Query>( termArray.length)); ++distinctPositions; } for (final Term term : termArray) { disjuncts.add(new SpanTermQuery(term)); } } int positionGaps = 0; int position = 0; final SpanQuery[] clauses = new SpanQuery[distinctPositions]; for (int i = 0; i < disjunctLists.length; ++i) { List<Query> disjuncts = disjunctLists[i]; if (disjuncts != null) { clauses[position++] = new SpanOrQuery(disjuncts .toArray(new SpanQuery[disjuncts.size()])); } else { ++positionGaps; } } final int slop = mpq.getSlop(); final boolean inorder = (slop == 0); SpanNearQuery sp = new SpanNearQuery(clauses, slop + positionGaps, inorder); sp.setBoost(query.getBoost()); getPayloads(payloads, sp); } } } private void getPayloads(Collection<byte []> payloads, SpanQuery query) throws IOException { Spans spans = query.getSpans(reader); while (spans.next() == true) { if (spans.isPayloadAvailable()) { Collection<byte[]> payload = spans.getPayload(); for (byte [] bytes : payload) { payloads.add(bytes); } } } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/PayloadSpanUtil.java
Java
art
6,540
package org.apache.lucene.search.payloads; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.Serializable; /** * An abstract class that defines a way for Payload*Query instances * to transform the cumulative effects of payload scores for a document. * * @see org.apache.lucene.search.payloads.PayloadTermQuery for more information * * <p/> * This class and its derivations are experimental and subject to change * **/ public abstract class PayloadFunction implements Serializable { /** * Calculate the score up to this point for this doc and field * @param docId The current doc * @param field The field * @param start The start position of the matching Span * @param end The end position of the matching Span * @param numPayloadsSeen The number of payloads seen so far * @param currentScore The current score so far * @param currentPayloadScore The score for the current payload * @return The new current Score * * @see org.apache.lucene.search.spans.Spans */ public abstract float currentScore(int docId, String field, int start, int end, int numPayloadsSeen, float currentScore, float currentPayloadScore); /** * Calculate the final score for all the payloads seen so far for this doc/field * @param docId The current doc * @param field The current field * @param numPayloadsSeen The total number of payloads seen on this document * @param payloadScore The raw score for those payloads * @return The final score for the payloads */ public abstract float docScore(int docId, String field, int numPayloadsSeen, float payloadScore); @Override public abstract int hashCode(); @Override public abstract boolean equals(Object o); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/payloads/PayloadFunction.java
Java
art
2,491
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.io.Closeable; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldSelector; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.Term; /** * The interface for search implementations. * * <p> * Searchable is the abstract network protocol for searching. Implementations * provide search over a single index, over multiple indices, and over indices * on remote servers. * * <p> * Queries, filters and sort criteria are designed to be compact so that they * may be efficiently passed to a remote index, with only the top-scoring hits * being returned, rather than every matching hit. * * <b>NOTE:</b> this interface is kept public for convenience. Since it is not * expected to be implemented directly, it may be changed unexpectedly between * releases. */ public interface Searchable extends Closeable { /** * Lower-level search API. * * <p> * {@link Collector#collect(int)} is called for every document. <br> * Collector-based access to remote indexes is discouraged. * * <p> * Applications should only use this if they need <i>all</i> of the matching * documents. The high-level search API ({@link Searcher#search(Query,int)}) is * usually more efficient, as it skips non-high-scoring hits. * * @param weight * to match documents * @param filter * if non-null, used to permit documents to be collected. * @param collector * to receive hits * @throws BooleanQuery.TooManyClauses */ void search(Weight weight, Filter filter, Collector collector) throws IOException; /** Frees resources associated with this Searcher. * Be careful not to call this method while you are still using objects * that reference this Searchable. */ void close() throws IOException; /** Expert: Returns the number of documents containing <code>term</code>. * * @see org.apache.lucene.index.IndexReader#docFreq(Term) */ int docFreq(Term term) throws IOException; /** Expert: For each term in the terms array, calculates the number of * documents containing <code>term</code>. Returns an array with these * document frequencies. Used to minimize number of remote calls. */ int[] docFreqs(Term[] terms) throws IOException; /** Expert: Returns one greater than the largest possible document number. * * @see org.apache.lucene.index.IndexReader#maxDoc() */ int maxDoc() throws IOException; /** Expert: Low-level search implementation. Finds the top <code>n</code> * hits for <code>query</code>, applying <code>filter</code> if non-null. * * <p>Applications should usually call {@link Searcher#search(Query,int)} or * {@link Searcher#search(Query,Filter,int)} instead. * @throws BooleanQuery.TooManyClauses */ TopDocs search(Weight weight, Filter filter, int n) throws IOException; /** * Returns the stored fields of document <code>i</code>. * * @see org.apache.lucene.index.IndexReader#document(int) * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ Document doc(int i) throws CorruptIndexException, IOException; /** * Get the {@link org.apache.lucene.document.Document} at the <code>n</code><sup>th</sup> position. The {@link org.apache.lucene.document.FieldSelector} * may be used to determine what {@link org.apache.lucene.document.Field}s to load and how they should be loaded. * * <b>NOTE:</b> If the underlying Reader (more specifically, the underlying <code>FieldsReader</code>) is closed before the lazy {@link org.apache.lucene.document.Field} is * loaded an exception may be thrown. If you want the value of a lazy {@link org.apache.lucene.document.Field} to be available after closing you must * explicitly load it or fetch the Document again with a new loader. * * * @param n Get the document at the <code>n</code><sup>th</sup> position * @param fieldSelector The {@link org.apache.lucene.document.FieldSelector} to use to determine what Fields should be loaded on the Document. May be null, in which case all Fields will be loaded. * @return The stored fields of the {@link org.apache.lucene.document.Document} at the nth position * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error * * @see org.apache.lucene.index.IndexReader#document(int, FieldSelector) * @see org.apache.lucene.document.Fieldable * @see org.apache.lucene.document.FieldSelector * @see org.apache.lucene.document.SetBasedFieldSelector * @see org.apache.lucene.document.LoadFirstFieldSelector */ Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException; /** Expert: called to re-write queries into primitive queries. * @throws BooleanQuery.TooManyClauses */ Query rewrite(Query query) throws IOException; /** Expert: low-level implementation method * Returns an Explanation that describes how <code>doc</code> scored against * <code>weight</code>. * * <p>This is intended to be used in developing Similarity implementations, * and, for good performance, should not be displayed with every hit. * Computing an explanation is as expensive as executing the query over the * entire index. * <p>Applications should call {@link Searcher#explain(Query, int)}. * @throws BooleanQuery.TooManyClauses */ Explanation explain(Weight weight, int doc) throws IOException; /** Expert: Low-level search implementation with arbitrary sorting. Finds * the top <code>n</code> hits for <code>query</code>, applying * <code>filter</code> if non-null, and sorting the hits by the criteria in * <code>sort</code>. * * <p>Applications should usually call {@link * Searcher#search(Query,Filter,int,Sort)} instead. * * @throws BooleanQuery.TooManyClauses */ TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException; }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/Searchable.java
Java
art
6,948
package org.apache.lucene.search; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Term; import java.io.IOException; import java.util.Set; /** * A query that wraps a filter and simply returns a constant score equal to the * query boost for every document in the filter. */ public class ConstantScoreQuery extends Query { protected final Filter filter; public ConstantScoreQuery(Filter filter) { this.filter=filter; } /** Returns the encapsulated filter */ public Filter getFilter() { return filter; } @Override public Query rewrite(IndexReader reader) throws IOException { return this; } @Override public void extractTerms(Set<Term> terms) { // OK to not add any terms when used for MultiSearcher, // but may not be OK for highlighting } protected class ConstantWeight extends Weight { private Similarity similarity; private float queryNorm; private float queryWeight; public ConstantWeight(Searcher searcher) { this.similarity = getSimilarity(searcher); } @Override public Query getQuery() { return ConstantScoreQuery.this; } @Override public float getValue() { return queryWeight; } @Override public float sumOfSquaredWeights() throws IOException { queryWeight = getBoost(); return queryWeight * queryWeight; } @Override public void normalize(float norm) { this.queryNorm = norm; queryWeight *= this.queryNorm; } @Override public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException { return new ConstantScorer(similarity, reader, this); } @Override public Explanation explain(IndexReader reader, int doc) throws IOException { ConstantScorer cs = new ConstantScorer(similarity, reader, this); boolean exists = cs.docIdSetIterator.advance(doc) == doc; ComplexExplanation result = new ComplexExplanation(); if (exists) { result.setDescription("ConstantScoreQuery(" + filter + "), product of:"); result.setValue(queryWeight); result.setMatch(Boolean.TRUE); result.addDetail(new Explanation(getBoost(), "boost")); result.addDetail(new Explanation(queryNorm,"queryNorm")); } else { result.setDescription("ConstantScoreQuery(" + filter + ") doesn't match id " + doc); result.setValue(0); result.setMatch(Boolean.FALSE); } return result; } } protected class ConstantScorer extends Scorer { final DocIdSetIterator docIdSetIterator; final float theScore; int doc = -1; public ConstantScorer(Similarity similarity, IndexReader reader, Weight w) throws IOException { super(similarity); theScore = w.getValue(); DocIdSet docIdSet = filter.getDocIdSet(reader); if (docIdSet == null) { docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator(); } else { DocIdSetIterator iter = docIdSet.iterator(); if (iter == null) { docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator(); } else { docIdSetIterator = iter; } } } @Override public int nextDoc() throws IOException { return docIdSetIterator.nextDoc(); } @Override public int docID() { return docIdSetIterator.docID(); } @Override public float score() throws IOException { return theScore; } @Override public int advance(int target) throws IOException { return docIdSetIterator.advance(target); } } @Override public Weight createWeight(Searcher searcher) { return new ConstantScoreQuery.ConstantWeight(searcher); } /** Prints a user-readable version of this query. */ @Override public String toString(String field) { return "ConstantScore(" + filter.toString() + (getBoost()==1.0 ? ")" : "^" + getBoost()); } /** Returns true if <code>o</code> is equal to this. */ @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof ConstantScoreQuery)) return false; ConstantScoreQuery other = (ConstantScoreQuery)o; return this.getBoost()==other.getBoost() && filter.equals(other.filter); } /** Returns a hash code value for this object. */ @Override public int hashCode() { // Simple add is OK since no existing filter hashcode has a float component. return filter.hashCode() + Float.floatToIntBits(getBoost()); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/ConstantScoreQuery.java
Java
art
5,365
package org.apache.lucene.search; /** * Copyright 2005 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.ArrayList; import java.util.List; /** * The results of a SpanQueryFilter. Wraps the BitSet and the position information from the SpanQuery * *<p/> * NOTE: This API is still experimental and subject to change. * **/ public class SpanFilterResult { private DocIdSet docIdSet; private List<PositionInfo> positions;//Spans spans; /** * * @param docIdSet The DocIdSet for the Filter * @param positions A List of {@link org.apache.lucene.search.SpanFilterResult.PositionInfo} objects */ public SpanFilterResult(DocIdSet docIdSet, List<PositionInfo> positions) { this.docIdSet = docIdSet; this.positions = positions; } /** * The first entry in the array corresponds to the first "on" bit. * Entries are increasing by document order * @return A List of PositionInfo objects */ public List<PositionInfo> getPositions() { return positions; } /** Returns the docIdSet */ public DocIdSet getDocIdSet() { return docIdSet; } public static class PositionInfo { private int doc; private List<StartEnd> positions; public PositionInfo(int doc) { this.doc = doc; positions = new ArrayList<StartEnd>(); } public void addPosition(int start, int end) { positions.add(new StartEnd(start, end)); } public int getDoc() { return doc; } /** * * @return Positions */ public List<StartEnd> getPositions() { return positions; } } public static class StartEnd { private int start; private int end; public StartEnd(int start, int end) { this.start = start; this.end = end; } /** * * @return The end position of this match */ public int getEnd() { return end; } /** * The Start position * @return The start position of this match */ public int getStart() { return start; } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/SpanFilterResult.java
Java
art
2,600
package org.apache.lucene.search; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; /** * The Scorer for DisjunctionMaxQuery. The union of all documents generated by the the subquery scorers * is generated in document number order. The score for each document is the maximum of the scores computed * by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores * for the other subqueries that generate the document. */ class DisjunctionMaxScorer extends Scorer { /* The scorers for subqueries that have remaining docs, kept as a min heap by number of next doc. */ private final Scorer[] subScorers; private int numScorers; /* Multiplier applied to non-maximum-scoring subqueries for a document as they are summed into the result. */ private final float tieBreakerMultiplier; private int doc = -1; /** * Creates a new instance of DisjunctionMaxScorer * * @param tieBreakerMultiplier * Multiplier applied to non-maximum-scoring subqueries for a * document as they are summed into the result. * @param similarity * -- not used since our definition involves neither coord nor terms * directly * @param subScorers * The sub scorers this Scorer should iterate on * @param numScorers * The actual number of scorers to iterate on. Note that the array's * length may be larger than the actual number of scorers. */ public DisjunctionMaxScorer(float tieBreakerMultiplier, Similarity similarity, Scorer[] subScorers, int numScorers) throws IOException { super(similarity); this.tieBreakerMultiplier = tieBreakerMultiplier; // The passed subScorers array includes only scorers which have documents // (DisjunctionMaxQuery takes care of that), and their nextDoc() was already // called. this.subScorers = subScorers; this.numScorers = numScorers; heapify(); } @Override public int nextDoc() throws IOException { if (numScorers == 0) return doc = NO_MORE_DOCS; while (subScorers[0].docID() == doc) { if (subScorers[0].nextDoc() != NO_MORE_DOCS) { heapAdjust(0); } else { heapRemoveRoot(); if (numScorers == 0) { return doc = NO_MORE_DOCS; } } } return doc = subScorers[0].docID(); } @Override public int docID() { return doc; } /** Determine the current document score. Initially invalid, until {@link #next()} is called the first time. * @return the score of the current generated document */ @Override public float score() throws IOException { int doc = subScorers[0].docID(); float[] sum = { subScorers[0].score() }, max = { sum[0] }; int size = numScorers; scoreAll(1, size, doc, sum, max); scoreAll(2, size, doc, sum, max); return max[0] + (sum[0] - max[0]) * tieBreakerMultiplier; } // Recursively iterate all subScorers that generated last doc computing sum and max private void scoreAll(int root, int size, int doc, float[] sum, float[] max) throws IOException { if (root < size && subScorers[root].docID() == doc) { float sub = subScorers[root].score(); sum[0] += sub; max[0] = Math.max(max[0], sub); scoreAll((root<<1)+1, size, doc, sum, max); scoreAll((root<<1)+2, size, doc, sum, max); } } @Override public int advance(int target) throws IOException { if (numScorers == 0) return doc = NO_MORE_DOCS; while (subScorers[0].docID() < target) { if (subScorers[0].advance(target) != NO_MORE_DOCS) { heapAdjust(0); } else { heapRemoveRoot(); if (numScorers == 0) { return doc = NO_MORE_DOCS; } } } return doc = subScorers[0].docID(); } // Organize subScorers into a min heap with scorers generating the earliest document on top. private void heapify() { for (int i = (numScorers >> 1) - 1; i >= 0; i--) { heapAdjust(i); } } /* The subtree of subScorers at root is a min heap except possibly for its root element. * Bubble the root down as required to make the subtree a heap. */ private void heapAdjust(int root) { Scorer scorer = subScorers[root]; int doc = scorer.docID(); int i = root; while (i <= (numScorers >> 1) - 1) { int lchild = (i << 1) + 1; Scorer lscorer = subScorers[lchild]; int ldoc = lscorer.docID(); int rdoc = Integer.MAX_VALUE, rchild = (i << 1) + 2; Scorer rscorer = null; if (rchild < numScorers) { rscorer = subScorers[rchild]; rdoc = rscorer.docID(); } if (ldoc < doc) { if (rdoc < ldoc) { subScorers[i] = rscorer; subScorers[rchild] = scorer; i = rchild; } else { subScorers[i] = lscorer; subScorers[lchild] = scorer; i = lchild; } } else if (rdoc < doc) { subScorers[i] = rscorer; subScorers[rchild] = scorer; i = rchild; } else { return; } } } // Remove the root Scorer from subScorers and re-establish it as a heap private void heapRemoveRoot() { if (numScorers == 1) { subScorers[0] = null; numScorers = 0; } else { subScorers[0] = subScorers[numScorers - 1]; subScorers[numScorers - 1] = null; --numScorers; heapAdjust(0); } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
Java
art
6,045
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Calendar; import java.util.Date; import java.util.TimeZone; import java.util.Locale; import org.apache.lucene.search.NumericRangeQuery; // for javadocs import org.apache.lucene.util.NumericUtils; // for javadocs /** * Provides support for converting dates to strings and vice-versa. * The strings are structured so that lexicographic sorting orders * them by date, which makes them suitable for use as field values * and search terms. * * <P>This class also helps you to limit the resolution of your dates. Do not * save dates with a finer resolution than you really need, as then * RangeQuery and PrefixQuery will require more memory and become slower. * * <P>Compared to {@link DateField} the strings generated by the methods * in this class take slightly more space, unless your selected resolution * is set to <code>Resolution.DAY</code> or lower. * * <P> * Another approach is {@link NumericUtils}, which provides * a sortable binary representation (prefix encoded) of numeric values, which * date/time are. * For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as * <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and * index this as a numeric value with {@link NumericField} * and use {@link NumericRangeQuery} to query it. */ public class DateTools { private final static TimeZone GMT = TimeZone.getTimeZone("GMT"); private static final SimpleDateFormat YEAR_FORMAT = new SimpleDateFormat("yyyy", Locale.US); private static final SimpleDateFormat MONTH_FORMAT = new SimpleDateFormat("yyyyMM", Locale.US); private static final SimpleDateFormat DAY_FORMAT = new SimpleDateFormat("yyyyMMdd", Locale.US); private static final SimpleDateFormat HOUR_FORMAT = new SimpleDateFormat("yyyyMMddHH", Locale.US); private static final SimpleDateFormat MINUTE_FORMAT = new SimpleDateFormat("yyyyMMddHHmm", Locale.US); private static final SimpleDateFormat SECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmss", Locale.US); private static final SimpleDateFormat MILLISECOND_FORMAT = new SimpleDateFormat("yyyyMMddHHmmssSSS", Locale.US); static { // times need to be normalized so the value doesn't depend on the // location the index is created/used: YEAR_FORMAT.setTimeZone(GMT); MONTH_FORMAT.setTimeZone(GMT); DAY_FORMAT.setTimeZone(GMT); HOUR_FORMAT.setTimeZone(GMT); MINUTE_FORMAT.setTimeZone(GMT); SECOND_FORMAT.setTimeZone(GMT); MILLISECOND_FORMAT.setTimeZone(GMT); } private static final Calendar calInstance = Calendar.getInstance(GMT); // cannot create, the class has static methods only private DateTools() {} /** * Converts a Date to a string suitable for indexing. * * @param date the date to be converted * @param resolution the desired resolution, see * {@link #round(Date, DateTools.Resolution)} * @return a string in format <code>yyyyMMddHHmmssSSS</code> or shorter, * depending on <code>resolution</code>; using GMT as timezone */ public static synchronized String dateToString(Date date, Resolution resolution) { return timeToString(date.getTime(), resolution); } /** * Converts a millisecond time to a string suitable for indexing. * * @param time the date expressed as milliseconds since January 1, 1970, 00:00:00 GMT * @param resolution the desired resolution, see * {@link #round(long, DateTools.Resolution)} * @return a string in format <code>yyyyMMddHHmmssSSS</code> or shorter, * depending on <code>resolution</code>; using GMT as timezone */ public static synchronized String timeToString(long time, Resolution resolution) { calInstance.setTimeInMillis(round(time, resolution)); Date date = calInstance.getTime(); if (resolution == Resolution.YEAR) { return YEAR_FORMAT.format(date); } else if (resolution == Resolution.MONTH) { return MONTH_FORMAT.format(date); } else if (resolution == Resolution.DAY) { return DAY_FORMAT.format(date); } else if (resolution == Resolution.HOUR) { return HOUR_FORMAT.format(date); } else if (resolution == Resolution.MINUTE) { return MINUTE_FORMAT.format(date); } else if (resolution == Resolution.SECOND) { return SECOND_FORMAT.format(date); } else if (resolution == Resolution.MILLISECOND) { return MILLISECOND_FORMAT.format(date); } throw new IllegalArgumentException("unknown resolution " + resolution); } /** * Converts a string produced by <code>timeToString</code> or * <code>dateToString</code> back to a time, represented as the * number of milliseconds since January 1, 1970, 00:00:00 GMT. * * @param dateString the date string to be converted * @return the number of milliseconds since January 1, 1970, 00:00:00 GMT * @throws ParseException if <code>dateString</code> is not in the * expected format */ public static synchronized long stringToTime(String dateString) throws ParseException { return stringToDate(dateString).getTime(); } /** * Converts a string produced by <code>timeToString</code> or * <code>dateToString</code> back to a time, represented as a * Date object. * * @param dateString the date string to be converted * @return the parsed time as a Date object * @throws ParseException if <code>dateString</code> is not in the * expected format */ public static synchronized Date stringToDate(String dateString) throws ParseException { if (dateString.length() == 4) { return YEAR_FORMAT.parse(dateString); } else if (dateString.length() == 6) { return MONTH_FORMAT.parse(dateString); } else if (dateString.length() == 8) { return DAY_FORMAT.parse(dateString); } else if (dateString.length() == 10) { return HOUR_FORMAT.parse(dateString); } else if (dateString.length() == 12) { return MINUTE_FORMAT.parse(dateString); } else if (dateString.length() == 14) { return SECOND_FORMAT.parse(dateString); } else if (dateString.length() == 17) { return MILLISECOND_FORMAT.parse(dateString); } throw new ParseException("Input is not valid date string: " + dateString, 0); } /** * Limit a date's resolution. For example, the date <code>2004-09-21 13:50:11</code> * will be changed to <code>2004-09-01 00:00:00</code> when using * <code>Resolution.MONTH</code>. * * @param resolution The desired resolution of the date to be returned * @return the date with all values more precise than <code>resolution</code> * set to 0 or 1 */ public static synchronized Date round(Date date, Resolution resolution) { return new Date(round(date.getTime(), resolution)); } /** * Limit a date's resolution. For example, the date <code>1095767411000</code> * (which represents 2004-09-21 13:50:11) will be changed to * <code>1093989600000</code> (2004-09-01 00:00:00) when using * <code>Resolution.MONTH</code>. * * @param resolution The desired resolution of the date to be returned * @return the date with all values more precise than <code>resolution</code> * set to 0 or 1, expressed as milliseconds since January 1, 1970, 00:00:00 GMT */ public static synchronized long round(long time, Resolution resolution) { calInstance.setTimeInMillis(time); if (resolution == Resolution.YEAR) { calInstance.set(Calendar.MONTH, 0); calInstance.set(Calendar.DAY_OF_MONTH, 1); calInstance.set(Calendar.HOUR_OF_DAY, 0); calInstance.set(Calendar.MINUTE, 0); calInstance.set(Calendar.SECOND, 0); calInstance.set(Calendar.MILLISECOND, 0); } else if (resolution == Resolution.MONTH) { calInstance.set(Calendar.DAY_OF_MONTH, 1); calInstance.set(Calendar.HOUR_OF_DAY, 0); calInstance.set(Calendar.MINUTE, 0); calInstance.set(Calendar.SECOND, 0); calInstance.set(Calendar.MILLISECOND, 0); } else if (resolution == Resolution.DAY) { calInstance.set(Calendar.HOUR_OF_DAY, 0); calInstance.set(Calendar.MINUTE, 0); calInstance.set(Calendar.SECOND, 0); calInstance.set(Calendar.MILLISECOND, 0); } else if (resolution == Resolution.HOUR) { calInstance.set(Calendar.MINUTE, 0); calInstance.set(Calendar.SECOND, 0); calInstance.set(Calendar.MILLISECOND, 0); } else if (resolution == Resolution.MINUTE) { calInstance.set(Calendar.SECOND, 0); calInstance.set(Calendar.MILLISECOND, 0); } else if (resolution == Resolution.SECOND) { calInstance.set(Calendar.MILLISECOND, 0); } else if (resolution == Resolution.MILLISECOND) { // don't cut off anything } else { throw new IllegalArgumentException("unknown resolution " + resolution); } return calInstance.getTimeInMillis(); } /** Specifies the time granularity. */ public static class Resolution { public static final Resolution YEAR = new Resolution("year"); public static final Resolution MONTH = new Resolution("month"); public static final Resolution DAY = new Resolution("day"); public static final Resolution HOUR = new Resolution("hour"); public static final Resolution MINUTE = new Resolution("minute"); public static final Resolution SECOND = new Resolution("second"); public static final Resolution MILLISECOND = new Resolution("millisecond"); private String resolution; private Resolution() { } private Resolution(String resolution) { this.resolution = resolution; } @Override public String toString() { return resolution; } } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/DateTools.java
Java
art
10,584
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.document.NumericField; // for javadocs import org.apache.lucene.search.NumericRangeQuery; // for javadocs import org.apache.lucene.util.NumericUtils; // for javadocs // do not remove this class in 3.0, it may be needed to decode old indexes! /** * Provides support for converting longs to Strings, and back again. The strings * are structured so that lexicographic sorting order is preserved. * * <p> * That is, if l1 is less than l2 for any two longs l1 and l2, then * NumberTools.longToString(l1) is lexicographically less than * NumberTools.longToString(l2). (Similarly for "greater than" and "equals".) * * <p> * This class handles <b>all</b> long values (unlike * {@link org.apache.lucene.document.DateField}). * * @deprecated For new indexes use {@link NumericUtils} instead, which * provides a sortable binary representation (prefix encoded) of numeric * values. * To index and efficiently query numeric values use {@link NumericField} * and {@link NumericRangeQuery}. * This class is included for use with existing * indices and will be removed in a future release (possibly Lucene 4.0). */ public class NumberTools { private static final int RADIX = 36; private static final char NEGATIVE_PREFIX = '-'; // NB: NEGATIVE_PREFIX must be < POSITIVE_PREFIX private static final char POSITIVE_PREFIX = '0'; //NB: this must be less than /** * Equivalent to longToString(Long.MIN_VALUE) */ public static final String MIN_STRING_VALUE = NEGATIVE_PREFIX + "0000000000000"; /** * Equivalent to longToString(Long.MAX_VALUE) */ public static final String MAX_STRING_VALUE = POSITIVE_PREFIX + "1y2p0ij32e8e7"; /** * The length of (all) strings returned by {@link #longToString} */ public static final int STR_SIZE = MIN_STRING_VALUE.length(); /** * Converts a long to a String suitable for indexing. */ public static String longToString(long l) { if (l == Long.MIN_VALUE) { // special case, because long is not symmetric around zero return MIN_STRING_VALUE; } StringBuilder buf = new StringBuilder(STR_SIZE); if (l < 0) { buf.append(NEGATIVE_PREFIX); l = Long.MAX_VALUE + l + 1; } else { buf.append(POSITIVE_PREFIX); } String num = Long.toString(l, RADIX); int padLen = STR_SIZE - num.length() - buf.length(); while (padLen-- > 0) { buf.append('0'); } buf.append(num); return buf.toString(); } /** * Converts a String that was returned by {@link #longToString} back to a * long. * * @throws IllegalArgumentException * if the input is null * @throws NumberFormatException * if the input does not parse (it was not a String returned by * longToString()). */ public static long stringToLong(String str) { if (str == null) { throw new NullPointerException("string cannot be null"); } if (str.length() != STR_SIZE) { throw new NumberFormatException("string is the wrong size"); } if (str.equals(MIN_STRING_VALUE)) { return Long.MIN_VALUE; } char prefix = str.charAt(0); long l = Long.parseLong(str.substring(1), RADIX); if (prefix == POSITIVE_PREFIX) { // nop } else if (prefix == NEGATIVE_PREFIX) { l = l - Long.MAX_VALUE - 1; } else { throw new NumberFormatException( "string does not begin with the correct prefix"); } return l; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/NumberTools.java
Java
art
4,611
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; /** * A {@link FieldSelector} based on a Map of field names to {@link FieldSelectorResult}s * */ public class MapFieldSelector implements FieldSelector { Map<String,FieldSelectorResult> fieldSelections; /** Create a a MapFieldSelector * @param fieldSelections maps from field names (String) to {@link FieldSelectorResult}s */ public MapFieldSelector(Map<String,FieldSelectorResult> fieldSelections) { this.fieldSelections = fieldSelections; } /** Create a a MapFieldSelector * @param fields fields to LOAD. List of Strings. All other fields are NO_LOAD. */ public MapFieldSelector(List<String> fields) { fieldSelections = new HashMap<String,FieldSelectorResult>(fields.size()*5/3); for (final String field : fields) fieldSelections.put(field, FieldSelectorResult.LOAD); } /** Create a a MapFieldSelector * @param fields fields to LOAD. All other fields are NO_LOAD. */ public MapFieldSelector(String... fields) { this(Arrays.asList(fields)); } /** Load field according to its associated value in fieldSelections * @param field a field name * @return the fieldSelections value that field maps to or NO_LOAD if none. */ public FieldSelectorResult accept(String field) { FieldSelectorResult selection = fieldSelections.get(field); return selection!=null ? selection : FieldSelectorResult.NO_LOAD; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/MapFieldSelector.java
Java
art
2,429
<!doctype html public "-//w3c//dtd html 4.0 transitional//en"> <!-- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"> </head> <body> <p>The logical representation of a {@link org.apache.lucene.document.Document} for indexing and searching.</p> <p>The document package provides the user level logical representation of content to be indexed and searched. The package also provides utilities for working with {@link org.apache.lucene.document.Document}s and {@link org.apache.lucene.document.Fieldable}s.</p> <h2>Document and Fieldable</h2> <p>A {@link org.apache.lucene.document.Document} is a collection of {@link org.apache.lucene.document.Fieldable}s. A {@link org.apache.lucene.document.Fieldable} is a logical representation of a user's content that needs to be indexed or stored. {@link org.apache.lucene.document.Fieldable}s have a number of properties that tell Lucene how to treat the content (like indexed, tokenized, stored, etc.) See the {@link org.apache.lucene.document.Field} implementation of {@link org.apache.lucene.document.Fieldable} for specifics on these properties. </p> <p>Note: it is common to refer to {@link org.apache.lucene.document.Document}s having {@link org.apache.lucene.document.Field}s, even though technically they have {@link org.apache.lucene.document.Fieldable}s.</p> <h2>Working with Documents</h2> <p>First and foremost, a {@link org.apache.lucene.document.Document} is something created by the user application. It is your job to create Documents based on the content of the files you are working with in your application (Word, txt, PDF, Excel or any other format.) How this is done is completely up to you. That being said, there are many tools available in other projects that can make the process of taking a file and converting it into a Lucene {@link org.apache.lucene.document.Document}. To see an example of this, take a look at the Lucene <a href="../../../../../../gettingstarted.html" target="top">demo</a> and the associated source code for extracting content from HTML. </p> <p>The {@link org.apache.lucene.document.DateTools} is a utility class to make dates and times searchable (remember, Lucene only searches text). {@link org.apache.lucene.document.NumericField} is a special helper class to simplify indexing of numeric values (and also dates) for fast range range queries with {@link org.apache.lucene.search.NumericRangeQuery} (using a special sortable string representation of numeric values).</p> <p>The {@link org.apache.lucene.document.FieldSelector} class provides a mechanism to tell Lucene how to load Documents from storage. If no FieldSelector is used, all Fieldables on a Document will be loaded. As an example of the FieldSelector usage, consider the common use case of displaying search results on a web page and then having users click through to see the full document. In this scenario, it is often the case that there are many small fields and one or two large fields (containing the contents of the original file). Before the FieldSelector, the full Document had to be loaded, including the large fields, in order to display the results. Now, using the FieldSelector, one can {@link org.apache.lucene.document.FieldSelectorResult#LAZY_LOAD} the large fields, thus only loading the large fields when a user clicks on the actual link to view the original content.</p> </body> </html>
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/package.html
HTML
art
4,204
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.IndexWriter; // for javadoc import org.apache.lucene.util.StringHelper; import java.io.Reader; import java.io.Serializable; /** A field is a section of a Document. Each field has two parts, a name and a value. Values may be free text, provided as a String or as a Reader, or they may be atomic keywords, which are not further processed. Such keywords may be used to represent dates, urls, etc. Fields are optionally stored in the index, so that they may be returned with hits on the document. */ public final class Field extends AbstractField implements Fieldable, Serializable { /** Specifies whether and how a field should be stored. */ public static enum Store { /** Store the original field value in the index. This is useful for short texts * like a document's title which should be displayed with the results. The * value is stored in its original form, i.e. no analyzer is used before it is * stored. */ YES { @Override public boolean isStored() { return true; } }, /** Do not store the field value in the index. */ NO { @Override public boolean isStored() { return false; } }; public abstract boolean isStored(); } /** Specifies whether and how a field should be indexed. */ public static enum Index { /** Do not index the field value. This field can thus not be searched, * but one can still access its contents provided it is * {@link Field.Store stored}. */ NO { @Override public boolean isIndexed() { return false; } @Override public boolean isAnalyzed() { return false; } @Override public boolean omitNorms() { return true; } }, /** Index the tokens produced by running the field's * value through an Analyzer. This is useful for * common text. */ ANALYZED { @Override public boolean isIndexed() { return true; } @Override public boolean isAnalyzed() { return true; } @Override public boolean omitNorms() { return false; } }, /** Index the field's value without using an Analyzer, so it can be searched. * As no analyzer is used the value will be stored as a single term. This is * useful for unique Ids like product numbers. */ NOT_ANALYZED { @Override public boolean isIndexed() { return true; } @Override public boolean isAnalyzed() { return false; } @Override public boolean omitNorms() { return false; } }, /** Expert: Index the field's value without an Analyzer, * and also disable the storing of norms. Note that you * can also separately enable/disable norms by calling * {@link Field#setOmitNorms}. No norms means that * index-time field and document boosting and field * length normalization are disabled. The benefit is * less memory usage as norms take up one byte of RAM * per indexed field for every document in the index, * during searching. Note that once you index a given * field <i>with</i> norms enabled, disabling norms will * have no effect. In other words, for this to have the * above described effect on a field, all instances of * that field must be indexed with NOT_ANALYZED_NO_NORMS * from the beginning. */ NOT_ANALYZED_NO_NORMS { @Override public boolean isIndexed() { return true; } @Override public boolean isAnalyzed() { return false; } @Override public boolean omitNorms() { return true; } }, /** Expert: Index the tokens produced by running the * field's value through an Analyzer, and also * separately disable the storing of norms. See * {@link #NOT_ANALYZED_NO_NORMS} for what norms are * and why you may want to disable them. */ ANALYZED_NO_NORMS { @Override public boolean isIndexed() { return true; } @Override public boolean isAnalyzed() { return true; } @Override public boolean omitNorms() { return true; } }; /** Get the best representation of the index given the flags. */ public static Index toIndex(boolean indexed, boolean analyzed) { return toIndex(indexed, analyzed, false); } /** Expert: Get the best representation of the index given the flags. */ public static Index toIndex(boolean indexed, boolean analyzed, boolean omitNorms) { // If it is not indexed nothing else matters if (!indexed) { return Index.NO; } // typical, non-expert if (!omitNorms) { if (analyzed) { return Index.ANALYZED; } return Index.NOT_ANALYZED; } // Expert: Norms omitted if (analyzed) { return Index.ANALYZED_NO_NORMS; } return Index.NOT_ANALYZED_NO_NORMS; } public abstract boolean isIndexed(); public abstract boolean isAnalyzed(); public abstract boolean omitNorms(); } /** Specifies whether and how a field should have term vectors. */ public static enum TermVector { /** Do not store term vectors. */ NO { @Override public boolean isStored() { return false; } @Override public boolean withPositions() { return false; } @Override public boolean withOffsets() { return false; } }, /** Store the term vectors of each document. A term vector is a list * of the document's terms and their number of occurrences in that document. */ YES { @Override public boolean isStored() { return true; } @Override public boolean withPositions() { return false; } @Override public boolean withOffsets() { return false; } }, /** * Store the term vector + token position information * * @see #YES */ WITH_POSITIONS { @Override public boolean isStored() { return true; } @Override public boolean withPositions() { return true; } @Override public boolean withOffsets() { return false; } }, /** * Store the term vector + Token offset information * * @see #YES */ WITH_OFFSETS { @Override public boolean isStored() { return true; } @Override public boolean withPositions() { return false; } @Override public boolean withOffsets() { return true; } }, /** * Store the term vector + Token position and offset information * * @see #YES * @see #WITH_POSITIONS * @see #WITH_OFFSETS */ WITH_POSITIONS_OFFSETS { @Override public boolean isStored() { return true; } @Override public boolean withPositions() { return true; } @Override public boolean withOffsets() { return true; } }; /** Get the best representation of a TermVector given the flags. */ public static TermVector toTermVector(boolean stored, boolean withOffsets, boolean withPositions) { // If it is not stored, nothing else matters. if (!stored) { return TermVector.NO; } if (withOffsets) { if (withPositions) { return Field.TermVector.WITH_POSITIONS_OFFSETS; } return Field.TermVector.WITH_OFFSETS; } if (withPositions) { return Field.TermVector.WITH_POSITIONS; } return Field.TermVector.YES; } public abstract boolean isStored(); public abstract boolean withPositions(); public abstract boolean withOffsets(); } /** The value of the field as a String, or null. If null, the Reader value or * binary value is used. Exactly one of stringValue(), * readerValue(), and getBinaryValue() must be set. */ public String stringValue() { return fieldsData instanceof String ? (String)fieldsData : null; } /** The value of the field as a Reader, or null. If null, the String value or * binary value is used. Exactly one of stringValue(), * readerValue(), and getBinaryValue() must be set. */ public Reader readerValue() { return fieldsData instanceof Reader ? (Reader)fieldsData : null; } /** The TokesStream for this field to be used when indexing, or null. If null, the Reader value * or String value is analyzed to produce the indexed tokens. */ public TokenStream tokenStreamValue() { return tokenStream; } /** <p>Expert: change the value of this field. This can * be used during indexing to re-use a single Field * instance to improve indexing speed by avoiding GC cost * of new'ing and reclaiming Field instances. Typically * a single {@link Document} instance is re-used as * well. This helps most on small documents.</p> * * <p>Each Field instance should only be used once * within a single {@link Document} instance. See <a * href="http://wiki.apache.org/lucene-java/ImproveIndexingSpeed">ImproveIndexingSpeed</a> * for details.</p> */ public void setValue(String value) { if (isBinary) { throw new IllegalArgumentException("cannot set a String value on a binary field"); } fieldsData = value; } /** Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. */ public void setValue(Reader value) { if (isBinary) { throw new IllegalArgumentException("cannot set a Reader value on a binary field"); } if (isStored) { throw new IllegalArgumentException("cannot set a Reader value on a stored field"); } fieldsData = value; } /** Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. */ public void setValue(byte[] value) { if (!isBinary) { throw new IllegalArgumentException("cannot set a byte[] value on a non-binary field"); } fieldsData = value; binaryLength = value.length; binaryOffset = 0; } /** Expert: change the value of this field. See <a href="#setValue(java.lang.String)">setValue(String)</a>. */ public void setValue(byte[] value, int offset, int length) { if (!isBinary) { throw new IllegalArgumentException("cannot set a byte[] value on a non-binary field"); } fieldsData = value; binaryLength = length; binaryOffset = offset; } /** Expert: sets the token stream to be used for indexing and causes isIndexed() and isTokenized() to return true. * May be combined with stored values from stringValue() or getBinaryValue() */ public void setTokenStream(TokenStream tokenStream) { this.isIndexed = true; this.isTokenized = true; this.tokenStream = tokenStream; } /** * Create a field by specifying its name, value and how it will * be saved in the index. Term vectors will not be stored in the index. * * @param name The name of the field * @param value The string to process * @param store Whether <code>value</code> should be stored in the index * @param index Whether the field should be indexed, and if so, if it should * be tokenized before indexing * @throws NullPointerException if name or value is <code>null</code> * @throws IllegalArgumentException if the field is neither stored nor indexed */ public Field(String name, String value, Store store, Index index) { this(name, value, store, index, TermVector.NO); } /** * Create a field by specifying its name, value and how it will * be saved in the index. * * @param name The name of the field * @param value The string to process * @param store Whether <code>value</code> should be stored in the index * @param index Whether the field should be indexed, and if so, if it should * be tokenized before indexing * @param termVector Whether term vector should be stored * @throws NullPointerException if name or value is <code>null</code> * @throws IllegalArgumentException in any of the following situations: * <ul> * <li>the field is neither stored nor indexed</li> * <li>the field is not indexed but termVector is <code>TermVector.YES</code></li> * </ul> */ public Field(String name, String value, Store store, Index index, TermVector termVector) { this(name, true, value, store, index, termVector); } /** * Create a field by specifying its name, value and how it will * be saved in the index. * * @param name The name of the field * @param internName Whether to .intern() name or not * @param value The string to process * @param store Whether <code>value</code> should be stored in the index * @param index Whether the field should be indexed, and if so, if it should * be tokenized before indexing * @param termVector Whether term vector should be stored * @throws NullPointerException if name or value is <code>null</code> * @throws IllegalArgumentException in any of the following situations: * <ul> * <li>the field is neither stored nor indexed</li> * <li>the field is not indexed but termVector is <code>TermVector.YES</code></li> * </ul> */ public Field(String name, boolean internName, String value, Store store, Index index, TermVector termVector) { if (name == null) throw new NullPointerException("name cannot be null"); if (value == null) throw new NullPointerException("value cannot be null"); if (name.length() == 0 && value.length() == 0) throw new IllegalArgumentException("name and value cannot both be empty"); if (index == Index.NO && store == Store.NO) throw new IllegalArgumentException("it doesn't make sense to have a field that " + "is neither indexed nor stored"); if (index == Index.NO && termVector != TermVector.NO) throw new IllegalArgumentException("cannot store term vector information " + "for a field that is not indexed"); if (internName) // field names are optionally interned name = StringHelper.intern(name); this.name = name; this.fieldsData = value; this.isStored = store.isStored(); this.isIndexed = index.isIndexed(); this.isTokenized = index.isAnalyzed(); this.omitNorms = index.omitNorms(); if (index == Index.NO) { this.omitTermFreqAndPositions = false; } this.isBinary = false; setStoreTermVector(termVector); } /** * Create a tokenized and indexed field that is not stored. Term vectors will * not be stored. The Reader is read only when the Document is added to the index, * i.e. you may not close the Reader until {@link IndexWriter#addDocument(Document)} * has been called. * * @param name The name of the field * @param reader The reader with the content * @throws NullPointerException if name or reader is <code>null</code> */ public Field(String name, Reader reader) { this(name, reader, TermVector.NO); } /** * Create a tokenized and indexed field that is not stored, optionally with * storing term vectors. The Reader is read only when the Document is added to the index, * i.e. you may not close the Reader until {@link IndexWriter#addDocument(Document)} * has been called. * * @param name The name of the field * @param reader The reader with the content * @param termVector Whether term vector should be stored * @throws NullPointerException if name or reader is <code>null</code> */ public Field(String name, Reader reader, TermVector termVector) { if (name == null) throw new NullPointerException("name cannot be null"); if (reader == null) throw new NullPointerException("reader cannot be null"); this.name = StringHelper.intern(name); // field names are interned this.fieldsData = reader; this.isStored = false; this.isIndexed = true; this.isTokenized = true; this.isBinary = false; setStoreTermVector(termVector); } /** * Create a tokenized and indexed field that is not stored. Term vectors will * not be stored. This is useful for pre-analyzed fields. * The TokenStream is read only when the Document is added to the index, * i.e. you may not close the TokenStream until {@link IndexWriter#addDocument(Document)} * has been called. * * @param name The name of the field * @param tokenStream The TokenStream with the content * @throws NullPointerException if name or tokenStream is <code>null</code> */ public Field(String name, TokenStream tokenStream) { this(name, tokenStream, TermVector.NO); } /** * Create a tokenized and indexed field that is not stored, optionally with * storing term vectors. This is useful for pre-analyzed fields. * The TokenStream is read only when the Document is added to the index, * i.e. you may not close the TokenStream until {@link IndexWriter#addDocument(Document)} * has been called. * * @param name The name of the field * @param tokenStream The TokenStream with the content * @param termVector Whether term vector should be stored * @throws NullPointerException if name or tokenStream is <code>null</code> */ public Field(String name, TokenStream tokenStream, TermVector termVector) { if (name == null) throw new NullPointerException("name cannot be null"); if (tokenStream == null) throw new NullPointerException("tokenStream cannot be null"); this.name = StringHelper.intern(name); // field names are interned this.fieldsData = null; this.tokenStream = tokenStream; this.isStored = false; this.isIndexed = true; this.isTokenized = true; this.isBinary = false; setStoreTermVector(termVector); } /** * Create a stored field with binary value. Optionally the value may be compressed. * * @param name The name of the field * @param value The binary value * @param store How <code>value</code> should be stored (compressed or not) * @throws IllegalArgumentException if store is <code>Store.NO</code> */ public Field(String name, byte[] value, Store store) { this(name, value, 0, value.length, store); } /** * Create a stored field with binary value. Optionally the value may be compressed. * * @param name The name of the field * @param value The binary value * @param offset Starting offset in value where this Field's bytes are * @param length Number of bytes to use for this Field, starting at offset * @param store How <code>value</code> should be stored (compressed or not) * @throws IllegalArgumentException if store is <code>Store.NO</code> */ public Field(String name, byte[] value, int offset, int length, Store store) { if (name == null) throw new IllegalArgumentException("name cannot be null"); if (value == null) throw new IllegalArgumentException("value cannot be null"); this.name = StringHelper.intern(name); // field names are interned fieldsData = value; if (store == Store.NO) throw new IllegalArgumentException("binary values can't be unstored"); isStored = store.isStored(); isIndexed = false; isTokenized = false; omitTermFreqAndPositions = false; omitNorms = true; isBinary = true; binaryLength = length; binaryOffset = offset; setStoreTermVector(TermVector.NO); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/Field.java
Java
art
20,382
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.*; // for javadoc import org.apache.lucene.search.ScoreDoc; // for javadoc import org.apache.lucene.search.Searcher; // for javadoc import org.apache.lucene.index.IndexReader; // for javadoc /** Documents are the unit of indexing and search. * * A Document is a set of fields. Each field has a name and a textual value. * A field may be {@link Fieldable#isStored() stored} with the document, in which * case it is returned with search hits on the document. Thus each document * should typically contain one or more stored fields which uniquely identify * it. * * <p>Note that fields which are <i>not</i> {@link Fieldable#isStored() stored} are * <i>not</i> available in documents retrieved from the index, e.g. with {@link * ScoreDoc#doc}, {@link Searcher#doc(int)} or {@link * IndexReader#document(int)}. */ public final class Document implements java.io.Serializable { List<Fieldable> fields = new ArrayList<Fieldable>(); private float boost = 1.0f; /** Constructs a new document with no fields. */ public Document() {} /** Sets a boost factor for hits on any field of this document. This value * will be multiplied into the score of all hits on this document. * * <p>The default value is 1.0. * * <p>Values are multiplied into the value of {@link Fieldable#getBoost()} of * each field in this document. Thus, this method in effect sets a default * boost for the fields of this document. * * @see Fieldable#setBoost(float) */ public void setBoost(float boost) { this.boost = boost; } /** Returns, at indexing time, the boost factor as set by {@link #setBoost(float)}. * * <p>Note that once a document is indexed this value is no longer available * from the index. At search time, for retrieved documents, this method always * returns 1. This however does not mean that the boost value set at indexing * time was ignored - it was just combined with other indexing time factors and * stored elsewhere, for better indexing and search performance. (For more * information see the "norm(t,d)" part of the scoring formula in * {@link org.apache.lucene.search.Similarity Similarity}.) * * @see #setBoost(float) */ public float getBoost() { return boost; } /** * <p>Adds a field to a document. Several fields may be added with * the same name. In this case, if the fields are indexed, their text is * treated as though appended for the purposes of search.</p> * <p> Note that add like the removeField(s) methods only makes sense * prior to adding a document to an index. These methods cannot * be used to change the content of an existing index! In order to achieve this, * a document has to be deleted from an index and a new changed version of that * document has to be added.</p> */ public final void add(Fieldable field) { fields.add(field); } /** * <p>Removes field with the specified name from the document. * If multiple fields exist with this name, this method removes the first field that has been added. * If there is no field with the specified name, the document remains unchanged.</p> * <p> Note that the removeField(s) methods like the add method only make sense * prior to adding a document to an index. These methods cannot * be used to change the content of an existing index! In order to achieve this, * a document has to be deleted from an index and a new changed version of that * document has to be added.</p> */ public final void removeField(String name) { Iterator<Fieldable> it = fields.iterator(); while (it.hasNext()) { Fieldable field = it.next(); if (field.name().equals(name)) { it.remove(); return; } } } /** * <p>Removes all fields with the given name from the document. * If there is no field with the specified name, the document remains unchanged.</p> * <p> Note that the removeField(s) methods like the add method only make sense * prior to adding a document to an index. These methods cannot * be used to change the content of an existing index! In order to achieve this, * a document has to be deleted from an index and a new changed version of that * document has to be added.</p> */ public final void removeFields(String name) { Iterator<Fieldable> it = fields.iterator(); while (it.hasNext()) { Fieldable field = it.next(); if (field.name().equals(name)) { it.remove(); } } } /** Returns a field with the given name if any exist in this document, or * null. If multiple fields exists with this name, this method returns the * first value added. * Do not use this method with lazy loaded fields. */ public final Field getField(String name) { return (Field) getFieldable(name); } /** Returns a field with the given name if any exist in this document, or * null. If multiple fields exists with this name, this method returns the * first value added. */ public Fieldable getFieldable(String name) { for (Fieldable field : fields) { if (field.name().equals(name)) return field; } return null; } /** Returns the string value of the field with the given name if any exist in * this document, or null. If multiple fields exist with this name, this * method returns the first value added. If only binary fields with this name * exist, returns null. */ public final String get(String name) { for (Fieldable field : fields) { if (field.name().equals(name) && (!field.isBinary())) return field.stringValue(); } return null; } /** Returns a List of all the fields in a document. * <p>Note that fields which are <i>not</i> {@link Fieldable#isStored() stored} are * <i>not</i> available in documents retrieved from the * index, e.g. {@link Searcher#doc(int)} or {@link * IndexReader#document(int)}. */ public final List<Fieldable> getFields() { return fields; } private final static Field[] NO_FIELDS = new Field[0]; /** * Returns an array of {@link Field}s with the given name. * Do not use with lazy loaded fields. * This method returns an empty array when there are no * matching fields. It never returns null. * * @param name the name of the field * @return a <code>Field[]</code> array */ public final Field[] getFields(String name) { List<Field> result = new ArrayList<Field>(); for (Fieldable field : fields) { if (field.name().equals(name)) { result.add((Field) field); } } if (result.size() == 0) return NO_FIELDS; return result.toArray(new Field[result.size()]); } private final static Fieldable[] NO_FIELDABLES = new Fieldable[0]; /** * Returns an array of {@link Fieldable}s with the given name. * This method returns an empty array when there are no * matching fields. It never returns null. * * @param name the name of the field * @return a <code>Fieldable[]</code> array */ public Fieldable[] getFieldables(String name) { List<Fieldable> result = new ArrayList<Fieldable>(); for (Fieldable field : fields) { if (field.name().equals(name)) { result.add(field); } } if (result.size() == 0) return NO_FIELDABLES; return result.toArray(new Fieldable[result.size()]); } private final static String[] NO_STRINGS = new String[0]; /** * Returns an array of values of the field specified as the method parameter. * This method returns an empty array when there are no * matching fields. It never returns null. * @param name the name of the field * @return a <code>String[]</code> of field values */ public final String[] getValues(String name) { List<String> result = new ArrayList<String>(); for (Fieldable field : fields) { if (field.name().equals(name) && (!field.isBinary())) result.add(field.stringValue()); } if (result.size() == 0) return NO_STRINGS; return result.toArray(new String[result.size()]); } private final static byte[][] NO_BYTES = new byte[0][]; /** * Returns an array of byte arrays for of the fields that have the name specified * as the method parameter. This method returns an empty * array when there are no matching fields. It never * returns null. * * @param name the name of the field * @return a <code>byte[][]</code> of binary field values */ public final byte[][] getBinaryValues(String name) { List<byte[]> result = new ArrayList<byte[]>(); for (Fieldable field : fields) { if (field.name().equals(name) && (field.isBinary())) result.add(field.getBinaryValue()); } if (result.size() == 0) return NO_BYTES; return result.toArray(new byte[result.size()][]); } /** * Returns an array of bytes for the first (or only) field that has the name * specified as the method parameter. This method will return <code>null</code> * if no binary fields with the specified name are available. * There may be non-binary fields with the same name. * * @param name the name of the field. * @return a <code>byte[]</code> containing the binary field value or <code>null</code> */ public final byte[] getBinaryValue(String name) { for (Fieldable field : fields) { if (field.name().equals(name) && (field.isBinary())) return field.getBinaryValue(); } return null; } /** Prints the fields of a document for human consumption. */ @Override public final String toString() { StringBuilder buffer = new StringBuilder(); buffer.append("Document<"); for (int i = 0; i < fields.size(); i++) { Fieldable field = fields.get(i); buffer.append(field.toString()); if (i != fields.size()-1) buffer.append(" "); } buffer.append(">"); return buffer.toString(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/Document.java
Java
art
10,862
package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.index.FieldInvertState; // for javadocs import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.spans.SpanQuery; import java.io.Reader; import java.io.Serializable; /** * Synonymous with {@link Field}. * * <p><bold>WARNING</bold>: This interface may change within minor versions, despite Lucene's backward compatibility requirements. * This means new methods may be added from version to version. This change only affects the Fieldable API; other backwards * compatibility promises remain intact. For example, Lucene can still * read and write indices created within the same major version. * </p> * **/ public interface Fieldable extends Serializable { /** Sets the boost factor hits on this field. This value will be * multiplied into the score of all hits on this this field of this * document. * * <p>The boost is multiplied by {@link org.apache.lucene.document.Document#getBoost()} of the document * containing this field. If a document has multiple fields with the same * name, all such values are multiplied together. This product is then * used to compute the norm factor for the field. By * default, in the {@link * org.apache.lucene.search.Similarity#computeNorm(String, * FieldInvertState)} method, the boost value is multiplied * by the {@link * org.apache.lucene.search.Similarity#lengthNorm(String, * int)} and then rounded by {@link org.apache.lucene.search.Similarity#encodeNorm(float)} before it is stored in the * index. One should attempt to ensure that this product does not overflow * the range of that encoding. * * @see org.apache.lucene.document.Document#setBoost(float) * @see org.apache.lucene.search.Similarity#computeNorm(String, FieldInvertState) * @see org.apache.lucene.search.Similarity#encodeNorm(float) */ void setBoost(float boost); /** Returns the boost factor for hits for this field. * * <p>The default value is 1.0. * * <p>Note: this value is not stored directly with the document in the index. * Documents returned from {@link org.apache.lucene.index.IndexReader#document(int)} and * {@link org.apache.lucene.search.Searcher#doc(int)} may thus not have the same value present as when * this field was indexed. * * @see #setBoost(float) */ float getBoost(); /** Returns the name of the field as an interned string. * For example "date", "title", "body", ... */ String name(); /** The value of the field as a String, or null. * <p> * For indexing, if isStored()==true, the stringValue() will be used as the stored field value * unless isBinary()==true, in which case getBinaryValue() will be used. * * If isIndexed()==true and isTokenized()==false, this String value will be indexed as a single token. * If isIndexed()==true and isTokenized()==true, then tokenStreamValue() will be used to generate indexed tokens if not null, * else readerValue() will be used to generate indexed tokens if not null, else stringValue() will be used to generate tokens. */ public String stringValue(); /** The value of the field as a Reader, which can be used at index time to generate indexed tokens. * @see #stringValue() */ public Reader readerValue(); /** The TokenStream for this field to be used when indexing, or null. * @see #stringValue() */ public TokenStream tokenStreamValue(); /** True if the value of the field is to be stored in the index for return with search hits. */ boolean isStored(); /** True if the value of the field is to be indexed, so that it may be searched on. */ boolean isIndexed(); /** True if the value of the field should be tokenized as text prior to indexing. Un-tokenized fields are indexed as a single word and may not be Reader-valued. */ boolean isTokenized(); /** True if the term or terms used to index this field are stored as a term * vector, available from {@link org.apache.lucene.index.IndexReader#getTermFreqVector(int,String)}. * These methods do not provide access to the original content of the field, * only to terms used to index it. If the original content must be * preserved, use the <code>stored</code> attribute instead. * * @see org.apache.lucene.index.IndexReader#getTermFreqVector(int, String) */ boolean isTermVectorStored(); /** * True if terms are stored as term vector together with their offsets * (start and end positon in source text). */ boolean isStoreOffsetWithTermVector(); /** * True if terms are stored as term vector together with their token positions. */ boolean isStorePositionWithTermVector(); /** True if the value of the field is stored as binary */ boolean isBinary(); /** True if norms are omitted for this indexed field */ boolean getOmitNorms(); /** Expert: * * If set, omit normalization factors associated with this indexed field. * This effectively disables indexing boosts and length normalization for this field. */ void setOmitNorms(boolean omitNorms); /** * Indicates whether a Field is Lazy or not. The semantics of Lazy loading are such that if a Field is lazily loaded, retrieving * it's values via {@link #stringValue()} or {@link #getBinaryValue()} is only valid as long as the {@link org.apache.lucene.index.IndexReader} that * retrieved the {@link Document} is still open. * * @return true if this field can be loaded lazily */ boolean isLazy(); /** * Returns offset into byte[] segment that is used as value, if Field is not binary * returned value is undefined * @return index of the first character in byte[] segment that represents this Field value */ abstract int getBinaryOffset(); /** * Returns length of byte[] segment that is used as value, if Field is not binary * returned value is undefined * @return length of byte[] segment that represents this Field value */ abstract int getBinaryLength(); /** * Return the raw byte[] for the binary field. Note that * you must also call {@link #getBinaryLength} and {@link * #getBinaryOffset} to know which range of bytes in this * returned array belong to the field. * @return reference to the Field value as byte[]. */ abstract byte[] getBinaryValue(); /** * Return the raw byte[] for the binary field. Note that * you must also call {@link #getBinaryLength} and {@link * #getBinaryOffset} to know which range of bytes in this * returned array belong to the field.<p> * About reuse: if you pass in the result byte[] and it is * used, likely the underlying implementation will hold * onto this byte[] and return it in future calls to * {@link #getBinaryValue()}. * So if you subsequently re-use the same byte[] elsewhere * it will alter this Fieldable's value. * @param result User defined buffer that will be used if * possible. If this is null or not large enough, a new * buffer is allocated * @return reference to the Field value as byte[]. */ abstract byte[] getBinaryValue(byte[] result); /** @see #setOmitTermFreqAndPositions */ boolean getOmitTermFreqAndPositions(); /** Expert: * * If set, omit term freq, positions and payloads from * postings for this field. * * <p><b>NOTE</b>: While this option reduces storage space * required in the index, it also means any query * requiring positional information, such as {@link * PhraseQuery} or {@link SpanQuery} subclasses will * silently fail to find results. */ void setOmitTermFreqAndPositions(boolean omitTermFreqAndPositions); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/Fieldable.java
Java
art
8,383
package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides information about what should be done with this Field * **/ public enum FieldSelectorResult { /** * Load this {@link Field} every time the {@link Document} is loaded, reading in the data as it is encountered. * {@link Document#getField(String)} and {@link Document#getFieldable(String)} should not return null. *<p/> * {@link Document#add(Fieldable)} should be called by the Reader. */ LOAD, /** * Lazily load this {@link Field}. This means the {@link Field} is valid, but it may not actually contain its data until * invoked. {@link Document#getField(String)} SHOULD NOT BE USED. {@link Document#getFieldable(String)} is safe to use and should * return a valid instance of a {@link Fieldable}. *<p/> * {@link Document#add(Fieldable)} should be called by the Reader. */ LAZY_LOAD, /** * Do not load the {@link Field}. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should return null. * {@link Document#add(Fieldable)} is not called. * <p/> * {@link Document#add(Fieldable)} should not be called by the Reader. */ NO_LOAD, /** * Load this field as in the {@link #LOAD} case, but immediately return from {@link Field} loading for the {@link Document}. Thus, the * Document may not have its complete set of Fields. {@link Document#getField(String)} and {@link Document#getFieldable(String)} should * both be valid for this {@link Field} * <p/> * {@link Document#add(Fieldable)} should be called by the Reader. */ LOAD_AND_BREAK, /** Expert: Load the size of this {@link Field} rather than its value. * Size is measured as number of bytes required to store the field == bytes for a binary or any compressed value, and 2*chars for a String value. * The size is stored as a binary value, represented as an int in a byte[], with the higher order byte first in [0] */ SIZE, /** Expert: Like {@link #SIZE} but immediately break from the field loading loop, i.e., stop loading further fields, after the size is loaded */ SIZE_AND_BREAK }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/FieldSelectorResult.java
Java
art
2,815
package org.apache.lucene.document; /** * Copyright 2006 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.search.PhraseQuery; // for javadocs import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.util.StringHelper; // for javadocs /** * * **/ public abstract class AbstractField implements Fieldable { protected String name = "body"; protected boolean storeTermVector = false; protected boolean storeOffsetWithTermVector = false; protected boolean storePositionWithTermVector = false; protected boolean omitNorms = false; protected boolean isStored = false; protected boolean isIndexed = true; protected boolean isTokenized = true; protected boolean isBinary = false; protected boolean lazy = false; protected boolean omitTermFreqAndPositions = false; protected float boost = 1.0f; // the data object for all different kind of field values protected Object fieldsData = null; // pre-analyzed tokenStream for indexed fields protected TokenStream tokenStream; // length/offset for all primitive types protected int binaryLength; protected int binaryOffset; protected AbstractField() { } protected AbstractField(String name, Field.Store store, Field.Index index, Field.TermVector termVector) { if (name == null) throw new NullPointerException("name cannot be null"); this.name = StringHelper.intern(name); // field names are interned this.isStored = store.isStored(); this.isIndexed = index.isIndexed(); this.isTokenized = index.isAnalyzed(); this.omitNorms = index.omitNorms(); this.isBinary = false; setStoreTermVector(termVector); } /** Sets the boost factor hits on this field. This value will be * multiplied into the score of all hits on this this field of this * document. * * <p>The boost is multiplied by {@link org.apache.lucene.document.Document#getBoost()} of the document * containing this field. If a document has multiple fields with the same * name, all such values are multiplied together. This product is then * used to compute the norm factor for the field. By * default, in the {@link * org.apache.lucene.search.Similarity#computeNorm(String, * FieldInvertState)} method, the boost value is multipled * by the {@link * org.apache.lucene.search.Similarity#lengthNorm(String, * int)} and then * rounded by {@link org.apache.lucene.search.Similarity#encodeNorm(float)} before it is stored in the * index. One should attempt to ensure that this product does not overflow * the range of that encoding. * * @see org.apache.lucene.document.Document#setBoost(float) * @see org.apache.lucene.search.Similarity#computeNorm(String, org.apache.lucene.index.FieldInvertState) * @see org.apache.lucene.search.Similarity#encodeNorm(float) */ public void setBoost(float boost) { this.boost = boost; } /** Returns the boost factor for hits for this field. * * <p>The default value is 1.0. * * <p>Note: this value is not stored directly with the document in the index. * Documents returned from {@link org.apache.lucene.index.IndexReader#document(int)} and * {@link org.apache.lucene.search.Searcher#doc(int)} may thus not have the same value present as when * this field was indexed. * * @see #setBoost(float) */ public float getBoost() { return boost; } /** Returns the name of the field as an interned string. * For example "date", "title", "body", ... */ public String name() { return name; } protected void setStoreTermVector(Field.TermVector termVector) { this.storeTermVector = termVector.isStored(); this.storePositionWithTermVector = termVector.withPositions(); this.storeOffsetWithTermVector = termVector.withOffsets(); } /** True iff the value of the field is to be stored in the index for return with search hits. It is an error for this to be true if a field is Reader-valued. */ public final boolean isStored() { return isStored; } /** True iff the value of the field is to be indexed, so that it may be searched on. */ public final boolean isIndexed() { return isIndexed; } /** True iff the value of the field should be tokenized as text prior to indexing. Un-tokenized fields are indexed as a single word and may not be Reader-valued. */ public final boolean isTokenized() { return isTokenized; } /** True iff the term or terms used to index this field are stored as a term * vector, available from {@link org.apache.lucene.index.IndexReader#getTermFreqVector(int,String)}. * These methods do not provide access to the original content of the field, * only to terms used to index it. If the original content must be * preserved, use the <code>stored</code> attribute instead. * * @see org.apache.lucene.index.IndexReader#getTermFreqVector(int, String) */ public final boolean isTermVectorStored() { return storeTermVector; } /** * True iff terms are stored as term vector together with their offsets * (start and end position in source text). */ public boolean isStoreOffsetWithTermVector(){ return storeOffsetWithTermVector; } /** * True iff terms are stored as term vector together with their token positions. */ public boolean isStorePositionWithTermVector(){ return storePositionWithTermVector; } /** True iff the value of the filed is stored as binary */ public final boolean isBinary() { return isBinary; } /** * Return the raw byte[] for the binary field. Note that * you must also call {@link #getBinaryLength} and {@link * #getBinaryOffset} to know which range of bytes in this * returned array belong to the field. * @return reference to the Field value as byte[]. */ public byte[] getBinaryValue() { return getBinaryValue(null); } public byte[] getBinaryValue(byte[] result){ if (isBinary || fieldsData instanceof byte[]) return (byte[]) fieldsData; else return null; } /** * Returns length of byte[] segment that is used as value, if Field is not binary * returned value is undefined * @return length of byte[] segment that represents this Field value */ public int getBinaryLength() { if (isBinary) { return binaryLength; } else if (fieldsData instanceof byte[]) return ((byte[]) fieldsData).length; else return 0; } /** * Returns offset into byte[] segment that is used as value, if Field is not binary * returned value is undefined * @return index of the first character in byte[] segment that represents this Field value */ public int getBinaryOffset() { return binaryOffset; } /** True if norms are omitted for this indexed field */ public boolean getOmitNorms() { return omitNorms; } /** @see #setOmitTermFreqAndPositions */ public boolean getOmitTermFreqAndPositions() { return omitTermFreqAndPositions; } /** Expert: * * If set, omit normalization factors associated with this indexed field. * This effectively disables indexing boosts and length normalization for this field. */ public void setOmitNorms(boolean omitNorms) { this.omitNorms=omitNorms; } /** Expert: * * If set, omit term freq, positions and payloads from * postings for this field. * * <p><b>NOTE</b>: While this option reduces storage space * required in the index, it also means any query * requiring positional information, such as {@link * PhraseQuery} or {@link SpanQuery} subclasses will * silently fail to find results. */ public void setOmitTermFreqAndPositions(boolean omitTermFreqAndPositions) { this.omitTermFreqAndPositions=omitTermFreqAndPositions; } public boolean isLazy() { return lazy; } /** Prints a Field for human consumption. */ @Override public final String toString() { StringBuilder result = new StringBuilder(); if (isStored) { result.append("stored"); } if (isIndexed) { if (result.length() > 0) result.append(","); result.append("indexed"); } if (isTokenized) { if (result.length() > 0) result.append(","); result.append("tokenized"); } if (storeTermVector) { if (result.length() > 0) result.append(","); result.append("termVector"); } if (storeOffsetWithTermVector) { if (result.length() > 0) result.append(","); result.append("termVectorOffsets"); } if (storePositionWithTermVector) { if (result.length() > 0) result.append(","); result.append("termVectorPosition"); } if (isBinary) { if (result.length() > 0) result.append(","); result.append("binary"); } if (omitNorms) { result.append(",omitNorms"); } if (omitTermFreqAndPositions) { result.append(",omitTermFreqAndPositions"); } if (lazy){ result.append(",lazy"); } result.append('<'); result.append(name); result.append(':'); if (fieldsData != null && lazy == false) { result.append(fieldsData); } result.append('>'); return result.toString(); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/AbstractField.java
Java
art
9,829
package org.apache.lucene.document; import java.io.Serializable; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Similar to a {@link java.io.FileFilter}, the FieldSelector allows one to make decisions about * what Fields get loaded on a {@link Document} by {@link org.apache.lucene.index.IndexReader#document(int,org.apache.lucene.document.FieldSelector)} * **/ public interface FieldSelector extends Serializable { /** * * @param fieldName the field to accept or reject * @return an instance of {@link FieldSelectorResult} * if the {@link Field} named <code>fieldName</code> should be loaded. */ FieldSelectorResult accept(String fieldName); }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/FieldSelector.java
Java
art
1,245
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.Reader; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.search.NumericRangeQuery; // javadocs import org.apache.lucene.search.NumericRangeFilter; // javadocs import org.apache.lucene.search.SortField; // javadocs import org.apache.lucene.search.FieldCache; // javadocs /** * <p>This class provides a {@link Field} that enables indexing * of numeric values for efficient range filtering and * sorting. Here's an example usage, adding an int value: * <pre> * document.add(new NumericField(name).setIntValue(value)); * </pre> * * For optimal performance, re-use the * <code>NumericField</code> and {@link Document} instance for more than * one document: * * <pre> * NumericField field = new NumericField(name); * Document document = new Document(); * document.add(field); * * for(all documents) { * ... * field.setIntValue(value) * writer.addDocument(document); * ... * } * </pre> * * <p>The java native types <code>int</code>, <code>long</code>, * <code>float</code> and <code>double</code> are * directly supported. However, any value that can be * converted into these native types can also be indexed. * For example, date/time values represented by a * {@link java.util.Date} can be translated into a long * value using the {@link java.util.Date#getTime} method. If you * don't need millisecond precision, you can quantize the * value, either by dividing the result of * {@link java.util.Date#getTime} or using the separate getters * (for year, month, etc.) to construct an <code>int</code> or * <code>long</code> value.</p> * * <p>To perform range querying or filtering against a * <code>NumericField</code>, use {@link NumericRangeQuery} or {@link * NumericRangeFilter}. To sort according to a * <code>NumericField</code>, use the normal numeric sort types, eg * {@link SortField#INT}. <code>NumericField</code> values * can also be loaded directly from {@link FieldCache}.</p> * * <p>By default, a <code>NumericField</code>'s value is not stored but * is indexed for range filtering and sorting. You can use * the {@link #NumericField(String,Field.Store,boolean)} * constructor if you need to change these defaults.</p> * * <p>You may add the same field name as a <code>NumericField</code> to * the same document more than once. Range querying and * filtering will be the logical OR of all values; so a range query * will hit all documents that have at least one value in * the range. However sort behavior is not defined. If you need to sort, * you should separately index a single-valued <code>NumericField</code>.</p> * * <p>A <code>NumericField</code> will consume somewhat more disk space * in the index than an ordinary single-valued field. * However, for a typical index that includes substantial * textual content per document, this increase will likely * be in the noise. </p> * * <p>Within Lucene, each numeric value is indexed as a * <em>trie</em> structure, where each term is logically * assigned to larger and larger pre-defined brackets (which * are simply lower-precision representations of the value). * The step size between each successive bracket is called the * <code>precisionStep</code>, measured in bits. Smaller * <code>precisionStep</code> values result in larger number * of brackets, which consumes more disk space in the index * but may result in faster range search performance. The * default value, 4, was selected for a reasonable tradeoff * of disk space consumption versus performance. You can * use the expert constructor {@link * #NumericField(String,int,Field.Store,boolean)} if you'd * like to change the value. Note that you must also * specify a congruent value when creating {@link * NumericRangeQuery} or {@link NumericRangeFilter}. * For low cardinality fields larger precision steps are good. * If the cardinality is &lt; 100, it is fair * to use {@link Integer#MAX_VALUE}, which produces one * term per value. * * <p>For more information on the internals of numeric trie * indexing, including the <a * href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a> * configuration, see {@link NumericRangeQuery}. The format of * indexed values is described in {@link NumericUtils}. * * <p>If you only need to sort by numeric value, and never * run range querying/filtering, you can index using a * <code>precisionStep</code> of {@link Integer#MAX_VALUE}. * This will minimize disk space consumed. </p> * * <p>More advanced users can instead use {@link * NumericTokenStream} directly, when indexing numbers. This * class is a wrapper around this token stream type for * easier, more intuitive usage.</p> * * <p><b>NOTE:</b> This class is only used during * indexing. When retrieving the stored field value from a * {@link Document} instance after search, you will get a * conventional {@link Fieldable} instance where the numeric * values are returned as {@link String}s (according to * <code>toString(value)</code> of the used data type). * * <p><font color="red"><b>NOTE:</b> This API is * experimental and might change in incompatible ways in the * next release.</font> * * @since 2.9 */ public final class NumericField extends AbstractField { private final NumericTokenStream tokenStream; /** * Creates a field for numeric values using the default <code>precisionStep</code> * {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The instance is not yet initialized with * a numeric value, before indexing a document containing this field, * set a value using the various set<em>???</em>Value() methods. * This constructor creates an indexed, but not stored field. * @param name the field name */ public NumericField(String name) { this(name, NumericUtils.PRECISION_STEP_DEFAULT, Field.Store.NO, true); } /** * Creates a field for numeric values using the default <code>precisionStep</code> * {@link NumericUtils#PRECISION_STEP_DEFAULT} (4). The instance is not yet initialized with * a numeric value, before indexing a document containing this field, * set a value using the various set<em>???</em>Value() methods. * @param name the field name * @param store if the field should be stored in plain text form * (according to <code>toString(value)</code> of the used data type) * @param index if the field should be indexed using {@link NumericTokenStream} */ public NumericField(String name, Field.Store store, boolean index) { this(name, NumericUtils.PRECISION_STEP_DEFAULT, store, index); } /** * Creates a field for numeric values with the specified * <code>precisionStep</code>. The instance is not yet initialized with * a numeric value, before indexing a document containing this field, * set a value using the various set<em>???</em>Value() methods. * This constructor creates an indexed, but not stored field. * @param name the field name * @param precisionStep the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a> */ public NumericField(String name, int precisionStep) { this(name, precisionStep, Field.Store.NO, true); } /** * Creates a field for numeric values with the specified * <code>precisionStep</code>. The instance is not yet initialized with * a numeric value, before indexing a document containing this field, * set a value using the various set<em>???</em>Value() methods. * @param name the field name * @param precisionStep the used <a href="../search/NumericRangeQuery.html#precisionStepDesc">precision step</a> * @param store if the field should be stored in plain text form * (according to <code>toString(value)</code> of the used data type) * @param index if the field should be indexed using {@link NumericTokenStream} */ public NumericField(String name, int precisionStep, Field.Store store, boolean index) { super(name, store, index ? Field.Index.ANALYZED_NO_NORMS : Field.Index.NO, Field.TermVector.NO); setOmitTermFreqAndPositions(true); tokenStream = new NumericTokenStream(precisionStep); } /** Returns a {@link NumericTokenStream} for indexing the numeric value. */ public TokenStream tokenStreamValue() { return isIndexed() ? tokenStream : null; } /** Returns always <code>null</code> for numeric fields */ @Override public byte[] getBinaryValue(byte[] result){ return null; } /** Returns always <code>null</code> for numeric fields */ public Reader readerValue() { return null; } /** Returns the numeric value as a string (how it is stored, when {@link Field.Store#YES} is chosen). */ public String stringValue() { return (fieldsData == null) ? null : fieldsData.toString(); } /** Returns the current numeric value as a subclass of {@link Number}, <code>null</code> if not yet initialized. */ public Number getNumericValue() { return (Number) fieldsData; } /** * Initializes the field with the supplied <code>long</code> value. * @param value the numeric value * @return this instance, because of this you can use it the following way: * <code>document.add(new NumericField(name, precisionStep).setLongValue(value))</code> */ public NumericField setLongValue(final long value) { tokenStream.setLongValue(value); fieldsData = Long.valueOf(value); return this; } /** * Initializes the field with the supplied <code>int</code> value. * @param value the numeric value * @return this instance, because of this you can use it the following way: * <code>document.add(new NumericField(name, precisionStep).setIntValue(value))</code> */ public NumericField setIntValue(final int value) { tokenStream.setIntValue(value); fieldsData = Integer.valueOf(value); return this; } /** * Initializes the field with the supplied <code>double</code> value. * @param value the numeric value * @return this instance, because of this you can use it the following way: * <code>document.add(new NumericField(name, precisionStep).setDoubleValue(value))</code> */ public NumericField setDoubleValue(final double value) { tokenStream.setDoubleValue(value); fieldsData = Double.valueOf(value); return this; } /** * Initializes the field with the supplied <code>float</code> value. * @param value the numeric value * @return this instance, because of this you can use it the following way: * <code>document.add(new NumericField(name, precisionStep).setFloatValue(value))</code> */ public NumericField setFloatValue(final float value) { tokenStream.setFloatValue(value); fieldsData = Float.valueOf(value); return this; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/NumericField.java
Java
art
11,736
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.zip.Deflater; import java.util.zip.Inflater; import java.util.zip.DataFormatException; import java.io.ByteArrayOutputStream; import org.apache.lucene.util.UnicodeUtil; /** Simple utility class providing static methods to * compress and decompress binary data for stored fields. * This class uses java.util.zip.Deflater and Inflater * classes to compress and decompress. */ public class CompressionTools { // Export only static methods private CompressionTools() {} /** Compresses the specified byte range using the * specified compressionLevel (constants are defined in * java.util.zip.Deflater). */ public static byte[] compress(byte[] value, int offset, int length, int compressionLevel) { /* Create an expandable byte array to hold the compressed data. * You cannot use an array that's the same size as the orginal because * there is no guarantee that the compressed data will be smaller than * the uncompressed data. */ ByteArrayOutputStream bos = new ByteArrayOutputStream(length); Deflater compressor = new Deflater(); try { compressor.setLevel(compressionLevel); compressor.setInput(value, offset, length); compressor.finish(); // Compress the data final byte[] buf = new byte[1024]; while (!compressor.finished()) { int count = compressor.deflate(buf); bos.write(buf, 0, count); } } finally { compressor.end(); } return bos.toByteArray(); } /** Compresses the specified byte range, with default BEST_COMPRESSION level */ public static byte[] compress(byte[] value, int offset, int length) { return compress(value, offset, length, Deflater.BEST_COMPRESSION); } /** Compresses all bytes in the array, with default BEST_COMPRESSION level */ public static byte[] compress(byte[] value) { return compress(value, 0, value.length, Deflater.BEST_COMPRESSION); } /** Compresses the String value, with default BEST_COMPRESSION level */ public static byte[] compressString(String value) { return compressString(value, Deflater.BEST_COMPRESSION); } /** Compresses the String value using the specified * compressionLevel (constants are defined in * java.util.zip.Deflater). */ public static byte[] compressString(String value, int compressionLevel) { UnicodeUtil.UTF8Result result = new UnicodeUtil.UTF8Result(); UnicodeUtil.UTF16toUTF8(value, 0, value.length(), result); return compress(result.result, 0, result.length, compressionLevel); } /** Decompress the byte array previously returned by * compress */ public static byte[] decompress(byte[] value) throws DataFormatException { // Create an expandable byte array to hold the decompressed data ByteArrayOutputStream bos = new ByteArrayOutputStream(value.length); Inflater decompressor = new Inflater(); try { decompressor.setInput(value); // Decompress the data final byte[] buf = new byte[1024]; while (!decompressor.finished()) { int count = decompressor.inflate(buf); bos.write(buf, 0, count); } } finally { decompressor.end(); } return bos.toByteArray(); } /** Decompress the byte array previously returned by * compressString back into a String */ public static String decompressString(byte[] value) throws DataFormatException { UnicodeUtil.UTF16Result result = new UnicodeUtil.UTF16Result(); final byte[] bytes = decompress(value); UnicodeUtil.UTF8toUTF16(bytes, 0, bytes.length, result); return new String(result.result, 0, result.length); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/CompressionTools.java
Java
art
4,490
package org.apache.lucene.document; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.NumericRangeQuery; // for javadocs import org.apache.lucene.util.NumericUtils; // for javadocs import java.util.Date; // for javadoc import java.util.Calendar; // for javadoc // do not remove in 3.0, needed for reading old indexes! /** * Provides support for converting dates to strings and vice-versa. * The strings are structured so that lexicographic sorting orders by date, * which makes them suitable for use as field values and search terms. * * <P>Note that this class saves dates with millisecond granularity, * which is bad for {@link TermRangeQuery} and {@link PrefixQuery}, as those * queries are expanded to a BooleanQuery with a potentially large number * of terms when searching. Thus you might want to use * {@link DateTools} instead. * * <P> * Note: dates before 1970 cannot be used, and therefore cannot be * indexed when using this class. See {@link DateTools} for an * alternative without such a limitation. * * <P> * Another approach is {@link NumericUtils}, which provides * a sortable binary representation (prefix encoded) of numeric values, which * date/time are. * For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as * <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and * index this as a numeric value with {@link NumericField} * and use {@link NumericRangeQuery} to query it. * * @deprecated If you build a new index, use {@link DateTools} or * {@link NumericField} instead. * This class is included for use with existing * indices and will be removed in a future release (possibly Lucene 4.0). */ public class DateField { private DateField() {} // make date strings long enough to last a millenium private static int DATE_LEN = Long.toString(1000L*365*24*60*60*1000, Character.MAX_RADIX).length(); public static String MIN_DATE_STRING() { return timeToString(0); } public static String MAX_DATE_STRING() { char[] buffer = new char[DATE_LEN]; char c = Character.forDigit(Character.MAX_RADIX-1, Character.MAX_RADIX); for (int i = 0 ; i < DATE_LEN; i++) buffer[i] = c; return new String(buffer); } /** * Converts a Date to a string suitable for indexing. * @throws RuntimeException if the date specified in the * method argument is before 1970 */ public static String dateToString(Date date) { return timeToString(date.getTime()); } /** * Converts a millisecond time to a string suitable for indexing. * @throws RuntimeException if the time specified in the * method argument is negative, that is, before 1970 */ public static String timeToString(long time) { if (time < 0) throw new RuntimeException("time '" + time + "' is too early, must be >= 0"); String s = Long.toString(time, Character.MAX_RADIX); if (s.length() > DATE_LEN) throw new RuntimeException("time '" + time + "' is too late, length of string " + "representation must be <= " + DATE_LEN); // Pad with leading zeros if (s.length() < DATE_LEN) { StringBuilder sb = new StringBuilder(s); while (sb.length() < DATE_LEN) sb.insert(0, 0); s = sb.toString(); } return s; } /** Converts a string-encoded date into a millisecond time. */ public static long stringToTime(String s) { return Long.parseLong(s, Character.MAX_RADIX); } /** Converts a string-encoded date into a Date object. */ public static Date stringToDate(String s) { return new Date(stringToTime(s)); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/DateField.java
Java
art
4,518
package org.apache.lucene.document; import java.util.Set; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Declare what fields to load normally and what fields to load lazily * **/ public class SetBasedFieldSelector implements FieldSelector { private Set<String> fieldsToLoad; private Set<String> lazyFieldsToLoad; /** * Pass in the Set of {@link Field} names to load and the Set of {@link Field} names to load lazily. If both are null, the * Document will not have any {@link Field} on it. * @param fieldsToLoad A Set of {@link String} field names to load. May be empty, but not null * @param lazyFieldsToLoad A Set of {@link String} field names to load lazily. May be empty, but not null */ public SetBasedFieldSelector(Set<String> fieldsToLoad, Set<String> lazyFieldsToLoad) { this.fieldsToLoad = fieldsToLoad; this.lazyFieldsToLoad = lazyFieldsToLoad; } /** * Indicate whether to load the field with the given name or not. If the {@link Field#name()} is not in either of the * initializing Sets, then {@link org.apache.lucene.document.FieldSelectorResult#NO_LOAD} is returned. If a Field name * is in both <code>fieldsToLoad</code> and <code>lazyFieldsToLoad</code>, lazy has precedence. * * @param fieldName The {@link Field} name to check * @return The {@link FieldSelectorResult} */ public FieldSelectorResult accept(String fieldName) { FieldSelectorResult result = FieldSelectorResult.NO_LOAD; if (fieldsToLoad.contains(fieldName) == true){ result = FieldSelectorResult.LOAD; } if (lazyFieldsToLoad.contains(fieldName) == true){ result = FieldSelectorResult.LAZY_LOAD; } return result; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/SetBasedFieldSelector.java
Java
art
2,330
package org.apache.lucene.document; /** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Load the First field and break. * <p/> * See {@link FieldSelectorResult#LOAD_AND_BREAK} */ public class LoadFirstFieldSelector implements FieldSelector { public FieldSelectorResult accept(String fieldName) { return FieldSelectorResult.LOAD_AND_BREAK; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/document/LoadFirstFieldSelector.java
Java
art
926
package org.apache.lucene.util; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.Serializable; import java.lang.reflect.Field; import java.lang.reflect.Modifier; /** * Base class for Attributes that can be added to a * {@link org.apache.lucene.util.AttributeSource}. * <p> * Attributes are used to add data in a dynamic, yet type-safe way to a source * of usually streamed objects, e. g. a {@link org.apache.lucene.analysis.TokenStream}. */ public abstract class AttributeImpl implements Cloneable, Serializable, Attribute { /** * Clears the values in this AttributeImpl and resets it to its * default value. If this implementation implements more than one Attribute interface * it clears all. */ public abstract void clear(); /** * The default implementation of this method accesses all declared * fields of this object and prints the values in the following syntax: * * <pre> * public String toString() { * return "start=" + startOffset + ",end=" + endOffset; * } * </pre> * * This method may be overridden by subclasses. */ @Override public String toString() { StringBuilder buffer = new StringBuilder(); Class clazz = this.getClass(); Field[] fields = clazz.getDeclaredFields(); try { for (int i = 0; i < fields.length; i++) { Field f = fields[i]; if (Modifier.isStatic(f.getModifiers())) continue; f.setAccessible(true); Object value = f.get(this); if (buffer.length()>0) { buffer.append(','); } if (value == null) { buffer.append(f.getName() + "=null"); } else { buffer.append(f.getName() + "=" + value); } } } catch (IllegalAccessException e) { // this should never happen, because we're just accessing fields // from 'this' throw new RuntimeException(e); } return buffer.toString(); } /** * Subclasses must implement this method and should compute * a hashCode similar to this: * <pre> * public int hashCode() { * int code = startOffset; * code = code * 31 + endOffset; * return code; * } * </pre> * * see also {@link #equals(Object)} */ @Override public abstract int hashCode(); /** * All values used for computation of {@link #hashCode()} * should be checked here for equality. * * see also {@link Object#equals(Object)} */ @Override public abstract boolean equals(Object other); /** * Copies the values from this Attribute into the passed-in * target attribute. The target implementation must support all the * Attributes this implementation supports. */ public abstract void copyTo(AttributeImpl target); /** * Shallow clone. Subclasses must override this if they * need to clone any members deeply, */ @Override public Object clone() { Object clone = null; try { clone = super.clone(); } catch (CloneNotSupportedException e) { throw new RuntimeException(e); // shouldn't happen } return clone; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/util/AttributeImpl.java
Java
art
3,892
package org.apache.lucene.util; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; /** Optimized implementation of a vector of bits. This is more-or-less like java.util.BitSet, but also includes the following: <ul> <li>a count() method, which efficiently computes the number of one bits;</li> <li>optimized read from and write to disk;</li> <li>inlinable get() method;</li> <li>store and load, as bit set or d-gaps, depending on sparseness;</li> </ul> */ public final class BitVector implements Cloneable { private byte[] bits; private int size; private int count; /** Constructs a vector capable of holding <code>n</code> bits. */ public BitVector(int n) { size = n; bits = new byte[(size >> 3) + 1]; count = 0; } BitVector(byte[] bits, int size) { this.bits = bits; this.size = size; count = -1; } @Override public Object clone() { byte[] copyBits = new byte[bits.length]; System.arraycopy(bits, 0, copyBits, 0, bits.length); BitVector clone = new BitVector(copyBits, size); clone.count = count; return clone; } /** Sets the value of <code>bit</code> to one. */ public final void set(int bit) { if (bit >= size) { throw new ArrayIndexOutOfBoundsException(bit); } bits[bit >> 3] |= 1 << (bit & 7); count = -1; } /** Sets the value of <code>bit</code> to true, and * returns true if bit was already set */ public final boolean getAndSet(int bit) { if (bit >= size) { throw new ArrayIndexOutOfBoundsException(bit); } final int pos = bit >> 3; final int v = bits[pos]; final int flag = 1 << (bit & 7); if ((flag & v) != 0) return true; else { bits[pos] = (byte) (v | flag); if (count != -1) count++; return false; } } /** Sets the value of <code>bit</code> to zero. */ public final void clear(int bit) { if (bit >= size) { throw new ArrayIndexOutOfBoundsException(bit); } bits[bit >> 3] &= ~(1 << (bit & 7)); count = -1; } /** Returns <code>true</code> if <code>bit</code> is one and <code>false</code> if it is zero. */ public final boolean get(int bit) { assert bit >= 0 && bit < size: "bit " + bit + " is out of bounds 0.." + (size-1); return (bits[bit >> 3] & (1 << (bit & 7))) != 0; } /** Returns the number of bits in this vector. This is also one greater than the number of the largest valid bit number. */ public final int size() { return size; } /** Returns the total number of one bits in this vector. This is efficiently computed and cached, so that, if the vector is not changed, no recomputation is done for repeated calls. */ public final int count() { // if the vector has been modified if (count == -1) { int c = 0; int end = bits.length; for (int i = 0; i < end; i++) c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte count = c; } return count; } /** For testing */ public final int getRecomputedCount() { int c = 0; int end = bits.length; for (int i = 0; i < end; i++) c += BYTE_COUNTS[bits[i] & 0xFF]; // sum bits per byte return c; } private static final byte[] BYTE_COUNTS = { // table of bits/byte 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8 }; /** Writes this vector to the file <code>name</code> in Directory <code>d</code>, in a format that can be read by the constructor {@link #BitVector(Directory, String)}. */ public final void write(Directory d, String name) throws IOException { IndexOutput output = d.createOutput(name); try { if (isSparse()) { writeDgaps(output); // sparse bit-set more efficiently saved as d-gaps. } else { writeBits(output); } } finally { output.close(); } } /** Write as a bit set */ private void writeBits(IndexOutput output) throws IOException { output.writeInt(size()); // write size output.writeInt(count()); // write count output.writeBytes(bits, bits.length); } /** Write as a d-gaps list */ private void writeDgaps(IndexOutput output) throws IOException { output.writeInt(-1); // mark using d-gaps output.writeInt(size()); // write size output.writeInt(count()); // write count int last=0; int n = count(); int m = bits.length; for (int i=0; i<m && n>0; i++) { if (bits[i]!=0) { output.writeVInt(i-last); output.writeByte(bits[i]); last = i; n -= BYTE_COUNTS[bits[i] & 0xFF]; } } } /** Indicates if the bit vector is sparse and should be saved as a d-gaps list, or dense, and should be saved as a bit set. */ private boolean isSparse() { // note: order of comparisons below set to favor smaller values (no binary range search.) // note: adding 4 because we start with ((int) -1) to indicate d-gaps format. // note: we write the d-gap for the byte number, and the byte (bits[i]) itself, therefore // multiplying count by (8+8) or (8+16) or (8+24) etc.: // - first 8 for writing bits[i] (1 byte vs. 1 bit), and // - second part for writing the byte-number d-gap as vint. // note: factor is for read/write of byte-arrays being faster than vints. int factor = 10; if (bits.length < (1<< 7)) return factor * (4 + (8+ 8)*count()) < size(); if (bits.length < (1<<14)) return factor * (4 + (8+16)*count()) < size(); if (bits.length < (1<<21)) return factor * (4 + (8+24)*count()) < size(); if (bits.length < (1<<28)) return factor * (4 + (8+32)*count()) < size(); return factor * (4 + (8+40)*count()) < size(); } /** Constructs a bit vector from the file <code>name</code> in Directory <code>d</code>, as written by the {@link #write} method. */ public BitVector(Directory d, String name) throws IOException { IndexInput input = d.openInput(name); try { size = input.readInt(); // read size if (size == -1) { readDgaps(input); } else { readBits(input); } } finally { input.close(); } } /** Read as a bit set */ private void readBits(IndexInput input) throws IOException { count = input.readInt(); // read count bits = new byte[(size >> 3) + 1]; // allocate bits input.readBytes(bits, 0, bits.length); } /** read as a d-gaps list */ private void readDgaps(IndexInput input) throws IOException { size = input.readInt(); // (re)read size count = input.readInt(); // read count bits = new byte[(size >> 3) + 1]; // allocate bits int last=0; int n = count(); while (n>0) { last += input.readVInt(); bits[last] = input.readByte(); n -= BYTE_COUNTS[bits[last] & 0xFF]; } } /** * Retrieve a subset of this BitVector. * * @param start * starting index, inclusive * @param end * ending index, exclusive * @return subset */ public BitVector subset(int start, int end) { if (start < 0 || end > size() || end < start) throw new IndexOutOfBoundsException(); // Special case -- return empty vector is start == end if (end == start) return new BitVector(0); byte[] bits = new byte[((end - start - 1) >>> 3) + 1]; int s = start >>> 3; for (int i = 0; i < bits.length; i++) { int cur = 0xFF & this.bits[i + s]; int next = i + s + 1 >= this.bits.length ? 0 : 0xFF & this.bits[i + s + 1]; bits[i] = (byte) ((cur >>> (start & 7)) | ((next << (8 - (start & 7))))); } int bitsToClear = (bits.length * 8 - (end - start)) % 8; bits[bits.length - 1] &= ~(0xFF << (8 - bitsToClear)); return new BitVector(bits, end - start); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/util/BitVector.java
Java
art
9,596
package org.apache.lucene.util; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Methods for manipulating arrays. */ public final class ArrayUtil { /* Begin Apache Harmony code Revision taken on Friday, June 12. https://svn.apache.org/repos/asf/harmony/enhanced/classlib/archive/java6/modules/luni/src/main/java/java/lang/Integer.java */ /** * Parses the string argument as if it was an int value and returns the * result. Throws NumberFormatException if the string does not represent an * int quantity. * * @param chars a string representation of an int quantity. * @return int the value represented by the argument * @throws NumberFormatException if the argument could not be parsed as an int quantity. */ public static int parseInt(char[] chars) throws NumberFormatException { return parseInt(chars, 0, chars.length, 10); } /** * Parses a char array into an int. * @param chars the character array * @param offset The offset into the array * @param len The length * @return the int * @throws NumberFormatException if it can't parse */ public static int parseInt(char[] chars, int offset, int len) throws NumberFormatException { return parseInt(chars, offset, len, 10); } /** * Parses the string argument as if it was an int value and returns the * result. Throws NumberFormatException if the string does not represent an * int quantity. The second argument specifies the radix to use when parsing * the value. * * @param chars a string representation of an int quantity. * @param radix the base to use for conversion. * @return int the value represented by the argument * @throws NumberFormatException if the argument could not be parsed as an int quantity. */ public static int parseInt(char[] chars, int offset, int len, int radix) throws NumberFormatException { if (chars == null || radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) { throw new NumberFormatException(); } int i = 0; if (len == 0) { throw new NumberFormatException("chars length is 0"); } boolean negative = chars[offset + i] == '-'; if (negative && ++i == len) { throw new NumberFormatException("can't convert to an int"); } if (negative == true){ offset++; len--; } return parse(chars, offset, len, radix, negative); } private static int parse(char[] chars, int offset, int len, int radix, boolean negative) throws NumberFormatException { int max = Integer.MIN_VALUE / radix; int result = 0; for (int i = 0; i < len; i++){ int digit = Character.digit(chars[i + offset], radix); if (digit == -1) { throw new NumberFormatException("Unable to parse"); } if (max > result) { throw new NumberFormatException("Unable to parse"); } int next = result * radix - digit; if (next > result) { throw new NumberFormatException("Unable to parse"); } result = next; } /*while (offset < len) { }*/ if (!negative) { result = -result; if (result < 0) { throw new NumberFormatException("Unable to parse"); } } return result; } /* END APACHE HARMONY CODE */ public static int getNextSize(int targetSize) { /* This over-allocates proportional to the list size, making room * for additional growth. The over-allocation is mild, but is * enough to give linear-time amortized behavior over a long * sequence of appends() in the presence of a poorly-performing * system realloc(). * The growth pattern is: 0, 4, 8, 16, 25, 35, 46, 58, 72, 88, ... */ return (targetSize >> 3) + (targetSize < 9 ? 3 : 6) + targetSize; } public static int getShrinkSize(int currentSize, int targetSize) { final int newSize = getNextSize(targetSize); // Only reallocate if we are "substantially" smaller. // This saves us from "running hot" (constantly making a // bit bigger then a bit smaller, over and over): if (newSize < currentSize / 2) return newSize; else return currentSize; } public static int[] grow(int[] array, int minSize) { if (array.length < minSize) { int[] newArray = new int[getNextSize(minSize)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static int[] grow(int[] array) { return grow(array, 1 + array.length); } public static int[] shrink(int[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize); if (newSize != array.length) { int[] newArray = new int[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } public static long[] grow(long[] array, int minSize) { if (array.length < minSize) { long[] newArray = new long[getNextSize(minSize)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static long[] grow(long[] array) { return grow(array, 1 + array.length); } public static long[] shrink(long[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize); if (newSize != array.length) { long[] newArray = new long[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } public static byte[] grow(byte[] array, int minSize) { if (array.length < minSize) { byte[] newArray = new byte[getNextSize(minSize)]; System.arraycopy(array, 0, newArray, 0, array.length); return newArray; } else return array; } public static byte[] grow(byte[] array) { return grow(array, 1 + array.length); } public static byte[] shrink(byte[] array, int targetSize) { final int newSize = getShrinkSize(array.length, targetSize); if (newSize != array.length) { byte[] newArray = new byte[newSize]; System.arraycopy(array, 0, newArray, 0, newSize); return newArray; } else return array; } /** * Returns hash of chars in range start (inclusive) to * end (inclusive) */ public static int hashCode(char[] array, int start, int end) { int code = 0; for (int i = end - 1; i >= start; i--) code = code * 31 + array[i]; return code; } /** * Returns hash of chars in range start (inclusive) to * end (inclusive) */ public static int hashCode(byte[] array, int start, int end) { int code = 0; for (int i = end - 1; i >= start; i--) code = code * 31 + array[i]; return code; } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/util/ArrayUtil.java
Java
art
7,552
package org.apache.lucene.util; /** * Copyright 2009 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.FieldCache; import org.apache.lucene.search.FieldCache.CacheEntry; /** * Provides methods for sanity checking that entries in the FieldCache * are not wasteful or inconsistent. * </p> * <p> * Lucene 2.9 Introduced numerous enhancements into how the FieldCache * is used by the low levels of Lucene searching (for Sorting and * ValueSourceQueries) to improve both the speed for Sorting, as well * as reopening of IndexReaders. But these changes have shifted the * usage of FieldCache from "top level" IndexReaders (frequently a * MultiReader or DirectoryReader) down to the leaf level SegmentReaders. * As a result, existing applications that directly access the FieldCache * may find RAM usage increase significantly when upgrading to 2.9 or * Later. This class provides an API for these applications (or their * Unit tests) to check at run time if the FieldCache contains "insane" * usages of the FieldCache. * </p> * <p> * <b>EXPERIMENTAL API:</b> This API is considered extremely advanced and * experimental. It may be removed or altered w/o warning in future releases * of Lucene. * </p> * @see FieldCache * @see FieldCacheSanityChecker.Insanity * @see FieldCacheSanityChecker.InsanityType */ public final class FieldCacheSanityChecker { private RamUsageEstimator ramCalc = null; public FieldCacheSanityChecker() { /* NOOP */ } /** * If set, will be used to estimate size for all CacheEntry objects * dealt with. */ public void setRamUsageEstimator(RamUsageEstimator r) { ramCalc = r; } /** * Quick and dirty convenience method * @see #check */ public static Insanity[] checkSanity(FieldCache cache) { return checkSanity(cache.getCacheEntries()); } /** * Quick and dirty convenience method that instantiates an instance with * "good defaults" and uses it to test the CacheEntrys * @see #check */ public static Insanity[] checkSanity(CacheEntry... cacheEntries) { FieldCacheSanityChecker sanityChecker = new FieldCacheSanityChecker(); // doesn't check for interned sanityChecker.setRamUsageEstimator(new RamUsageEstimator(false)); return sanityChecker.check(cacheEntries); } /** * Tests a CacheEntry[] for indication of "insane" cache usage. * <p> * <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored. * (:TODO: is this a bad idea? are we masking a real problem?) * </p> */ public Insanity[] check(CacheEntry... cacheEntries) { if (null == cacheEntries || 0 == cacheEntries.length) return new Insanity[0]; if (null != ramCalc) { for (int i = 0; i < cacheEntries.length; i++) { cacheEntries[i].estimateSize(ramCalc); } } // the indirect mapping lets MapOfSet dedup identical valIds for us // // maps the (valId) identityhashCode of cache values to // sets of CacheEntry instances final MapOfSets<Integer, CacheEntry> valIdToItems = new MapOfSets<Integer, CacheEntry>(new HashMap<Integer, Set<CacheEntry>>(17)); // maps ReaderField keys to Sets of ValueIds final MapOfSets<ReaderField, Integer> readerFieldToValIds = new MapOfSets<ReaderField, Integer>(new HashMap<ReaderField, Set<Integer>>(17)); // // any keys that we know result in more then one valId final Set<ReaderField> valMismatchKeys = new HashSet<ReaderField>(); // iterate over all the cacheEntries to get the mappings we'll need for (int i = 0; i < cacheEntries.length; i++) { final CacheEntry item = cacheEntries[i]; final Object val = item.getValue(); if (val instanceof FieldCache.CreationPlaceholder) continue; final ReaderField rf = new ReaderField(item.getReaderKey(), item.getFieldName()); final Integer valId = Integer.valueOf(System.identityHashCode(val)); // indirect mapping, so the MapOfSet will dedup identical valIds for us valIdToItems.put(valId, item); if (1 < readerFieldToValIds.put(rf, valId)) { valMismatchKeys.add(rf); } } final List<Insanity> insanity = new ArrayList<Insanity>(valMismatchKeys.size() * 3); insanity.addAll(checkValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys)); insanity.addAll(checkSubreaders(valIdToItems, readerFieldToValIds)); return insanity.toArray(new Insanity[insanity.size()]); } /** * Internal helper method used by check that iterates over * valMismatchKeys and generates a Collection of Insanity * instances accordingly. The MapOfSets are used to populate * the Insanity objects. * @see InsanityType#VALUEMISMATCH */ private Collection<Insanity> checkValueMismatch(MapOfSets<Integer, CacheEntry> valIdToItems, MapOfSets<ReaderField, Integer> readerFieldToValIds, Set<ReaderField> valMismatchKeys) { final List<Insanity> insanity = new ArrayList<Insanity>(valMismatchKeys.size() * 3); if (! valMismatchKeys.isEmpty() ) { // we have multiple values for some ReaderFields final Map<ReaderField, Set<Integer>> rfMap = readerFieldToValIds.getMap(); final Map<Integer, Set<CacheEntry>> valMap = valIdToItems.getMap(); for (final ReaderField rf : valMismatchKeys) { final List<CacheEntry> badEntries = new ArrayList<CacheEntry>(valMismatchKeys.size() * 2); for(final Integer value: rfMap.get(rf)) { for (final CacheEntry cacheEntry : valMap.get(value)) { badEntries.add(cacheEntry); } } CacheEntry[] badness = new CacheEntry[badEntries.size()]; badness = badEntries.toArray(badness); insanity.add(new Insanity(InsanityType.VALUEMISMATCH, "Multiple distinct value objects for " + rf.toString(), badness)); } } return insanity; } /** * Internal helper method used by check that iterates over * the keys of readerFieldToValIds and generates a Collection * of Insanity instances whenever two (or more) ReaderField instances are * found that have an ancestry relationships. * * @see InsanityType#SUBREADER */ private Collection<Insanity> checkSubreaders( MapOfSets<Integer, CacheEntry> valIdToItems, MapOfSets<ReaderField, Integer> readerFieldToValIds) { final List<Insanity> insanity = new ArrayList<Insanity>(23); Map<ReaderField, Set<ReaderField>> badChildren = new HashMap<ReaderField, Set<ReaderField>>(17); MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<ReaderField, ReaderField>(badChildren); // wrapper Map<Integer, Set<CacheEntry>> viToItemSets = valIdToItems.getMap(); Map<ReaderField, Set<Integer>> rfToValIdSets = readerFieldToValIds.getMap(); Set<ReaderField> seen = new HashSet<ReaderField>(17); Set<ReaderField> readerFields = rfToValIdSets.keySet(); for (final ReaderField rf : readerFields) { if (seen.contains(rf)) continue; List kids = getAllDecendentReaderKeys(rf.readerKey); for (Object kidKey : kids) { ReaderField kid = new ReaderField(kidKey, rf.fieldName); if (badChildren.containsKey(kid)) { // we've already process this kid as RF and found other problems // track those problems as our own badKids.put(rf, kid); badKids.putAll(rf, badChildren.get(kid)); badChildren.remove(kid); } else if (rfToValIdSets.containsKey(kid)) { // we have cache entries for the kid badKids.put(rf, kid); } seen.add(kid); } seen.add(rf); } // every mapping in badKids represents an Insanity for (final ReaderField parent : badChildren.keySet()) { Set<ReaderField> kids = badChildren.get(parent); List<CacheEntry> badEntries = new ArrayList<CacheEntry>(kids.size() * 2); // put parent entr(ies) in first { for (final Integer value : rfToValIdSets.get(parent)) { badEntries.addAll(viToItemSets.get(value)); } } // now the entries for the descendants for (final ReaderField kid : kids) { for (final Integer value : rfToValIdSets.get(kid)) { badEntries.addAll(viToItemSets.get(value)); } } CacheEntry[] badness = new CacheEntry[badEntries.size()]; badness = badEntries.toArray(badness); insanity.add(new Insanity(InsanityType.SUBREADER, "Found caches for decendents of " + parent.toString(), badness)); } return insanity; } /** * Checks if the seed is an IndexReader, and if so will walk * the hierarchy of subReaders building up a list of the objects * returned by obj.getFieldCacheKey() */ private List getAllDecendentReaderKeys(Object seed) { List<Object> all = new ArrayList<Object>(17); // will grow as we iter all.add(seed); for (int i = 0; i < all.size(); i++) { Object obj = all.get(i); if (obj instanceof IndexReader) { IndexReader[] subs = ((IndexReader)obj).getSequentialSubReaders(); for (int j = 0; (null != subs) && (j < subs.length); j++) { all.add(subs[j].getFieldCacheKey()); } } } // need to skip the first, because it was the seed return all.subList(1, all.size()); } /** * Simple pair object for using "readerKey + fieldName" a Map key */ private final static class ReaderField { public final Object readerKey; public final String fieldName; public ReaderField(Object readerKey, String fieldName) { this.readerKey = readerKey; this.fieldName = fieldName; } @Override public int hashCode() { return System.identityHashCode(readerKey) * fieldName.hashCode(); } @Override public boolean equals(Object that) { if (! (that instanceof ReaderField)) return false; ReaderField other = (ReaderField) that; return (this.readerKey == other.readerKey && this.fieldName.equals(other.fieldName)); } @Override public String toString() { return readerKey.toString() + "+" + fieldName; } } /** * Simple container for a collection of related CacheEntry objects that * in conjunction with each other represent some "insane" usage of the * FieldCache. */ public final static class Insanity { private final InsanityType type; private final String msg; private final CacheEntry[] entries; public Insanity(InsanityType type, String msg, CacheEntry... entries) { if (null == type) { throw new IllegalArgumentException ("Insanity requires non-null InsanityType"); } if (null == entries || 0 == entries.length) { throw new IllegalArgumentException ("Insanity requires non-null/non-empty CacheEntry[]"); } this.type = type; this.msg = msg; this.entries = entries; } /** * Type of insane behavior this object represents */ public InsanityType getType() { return type; } /** * Description of hte insane behavior */ public String getMsg() { return msg; } /** * CacheEntry objects which suggest a problem */ public CacheEntry[] getCacheEntries() { return entries; } /** * Multi-Line representation of this Insanity object, starting with * the Type and Msg, followed by each CacheEntry.toString() on it's * own line prefaced by a tab character */ @Override public String toString() { StringBuilder buf = new StringBuilder(); buf.append(getType()).append(": "); String m = getMsg(); if (null != m) buf.append(m); buf.append('\n'); CacheEntry[] ce = getCacheEntries(); for (int i = 0; i < ce.length; i++) { buf.append('\t').append(ce[i].toString()).append('\n'); } return buf.toString(); } } /** * An Enumeration of the different types of "insane" behavior that * may be detected in a FieldCache. * * @see InsanityType#SUBREADER * @see InsanityType#VALUEMISMATCH * @see InsanityType#EXPECTED */ public final static class InsanityType { private final String label; private InsanityType(final String label) { this.label = label; } @Override public String toString() { return label; } /** * Indicates an overlap in cache usage on a given field * in sub/super readers. */ public final static InsanityType SUBREADER = new InsanityType("SUBREADER"); /** * <p> * Indicates entries have the same reader+fieldname but * different cached values. This can happen if different datatypes, * or parsers are used -- and while it's not necessarily a bug * it's typically an indication of a possible problem. * </p> * <p> * <bPNOTE:</b> Only the reader, fieldname, and cached value are actually * tested -- if two cache entries have different parsers or datatypes but * the cached values are the same Object (== not just equal()) this method * does not consider that a red flag. This allows for subtle variations * in the way a Parser is specified (null vs DEFAULT_LONG_PARSER, etc...) * </p> */ public final static InsanityType VALUEMISMATCH = new InsanityType("VALUEMISMATCH"); /** * Indicates an expected bit of "insanity". This may be useful for * clients that wish to preserve/log information about insane usage * but indicate that it was expected. */ public final static InsanityType EXPECTED = new InsanityType("EXPECTED"); } }
zzh-simple-hr
Zlucene/src/java/org/apache/lucene/util/FieldCacheSanityChecker.java
Java
art
14,957