code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231
values | license stringclasses 13
values | size int64 1 2.01M |
|---|---|---|---|---|---|
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.FilenameFilter;
import java.util.HashSet;
/**
* Filename filter that accept filenames and extensions only created by Lucene.
*/
public class IndexFileNameFilter implements FilenameFilter {
private static IndexFileNameFilter singleton = new IndexFileNameFilter();
private HashSet<String> extensions;
private HashSet<String> extensionsInCFS;
// Prevent instantiation.
private IndexFileNameFilter() {
extensions = new HashSet<String>();
for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS.length; i++) {
extensions.add(IndexFileNames.INDEX_EXTENSIONS[i]);
}
extensionsInCFS = new HashSet<String>();
for (int i = 0; i < IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE.length; i++) {
extensionsInCFS.add(IndexFileNames.INDEX_EXTENSIONS_IN_COMPOUND_FILE[i]);
}
}
/* (non-Javadoc)
* @see java.io.FilenameFilter#accept(java.io.File, java.lang.String)
*/
public boolean accept(File dir, String name) {
int i = name.lastIndexOf('.');
if (i != -1) {
String extension = name.substring(1+i);
if (extensions.contains(extension)) {
return true;
} else if (extension.startsWith("f") &&
extension.matches("f\\d+")) {
return true;
} else if (extension.startsWith("s") &&
extension.matches("s\\d+")) {
return true;
}
} else {
if (name.equals(IndexFileNames.DELETABLE)) return true;
else if (name.startsWith(IndexFileNames.SEGMENTS)) return true;
}
return false;
}
/**
* Returns true if this is a file that would be contained
* in a CFS file. This function should only be called on
* files that pass the above "accept" (ie, are already
* known to be a Lucene index file).
*/
public boolean isCFSFile(String name) {
int i = name.lastIndexOf('.');
if (i != -1) {
String extension = name.substring(1+i);
if (extensionsInCFS.contains(extension)) {
return true;
}
if (extension.startsWith("f") &&
extension.matches("f\\d+")) {
return true;
}
}
return false;
}
public static IndexFileNameFilter getFilter() {
return singleton;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexFileNameFilter.java | Java | art | 3,070 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Collection;
import java.util.Map;
import java.io.IOException;
import org.apache.lucene.store.Directory;
/**
* <p>Expert: represents a single commit into an index as seen by the
* {@link IndexDeletionPolicy} or {@link IndexReader}.</p>
*
* <p> Changes to the content of an index are made visible
* only after the writer who made that change commits by
* writing a new segments file
* (<code>segments_N</code>). This point in time, when the
* action of writing of a new segments file to the directory
* is completed, is an index commit.</p>
*
* <p>Each index commit point has a unique segments file
* associated with it. The segments file associated with a
* later index commit point would have a larger N.</p>
*
* <p><b>WARNING</b>: This API is a new and experimental and
* may suddenly change. </p>
*/
public abstract class IndexCommit {
/**
* Get the segments file (<code>segments_N</code>) associated
* with this commit point.
*/
public abstract String getSegmentsFileName();
/**
* Returns all index files referenced by this commit point.
*/
public abstract Collection<String> getFileNames() throws IOException;
/**
* Returns the {@link Directory} for the index.
*/
public abstract Directory getDirectory();
/**
* Delete this commit point. This only applies when using
* the commit point in the context of IndexWriter's
* IndexDeletionPolicy.
* <p>
* Upon calling this, the writer is notified that this commit
* point should be deleted.
* <p>
* Decision that a commit-point should be deleted is taken by the {@link IndexDeletionPolicy} in effect
* and therefore this should only be called by its {@link IndexDeletionPolicy#onInit onInit()} or
* {@link IndexDeletionPolicy#onCommit onCommit()} methods.
*/
public abstract void delete();
public abstract boolean isDeleted();
/** Returns true if this commit is an optimized index. */
public abstract boolean isOptimized();
/** Two IndexCommits are equal if both their Directory and versions are equal. */
@Override
public boolean equals(Object other) {
if (other instanceof IndexCommit) {
IndexCommit otherCommit = (IndexCommit) other;
return otherCommit.getDirectory().equals(getDirectory()) && otherCommit.getVersion() == getVersion();
} else
return false;
}
@Override
public int hashCode() {
return (int) (getDirectory().hashCode() + getVersion());
}
/** Returns the version for this IndexCommit. This is the
* same value that {@link IndexReader#getVersion} would
* return if it were opened on this commit. */
public abstract long getVersion();
/** Returns the generation (the _N in segments_N) for this
* IndexCommit */
public abstract long getGeneration();
/** Convenience method that returns the last modified time
* of the segments_N file corresponding to this index
* commit, equivalent to
* getDirectory().fileModified(getSegmentsFileName()). */
public long getTimestamp() throws IOException {
return getDirectory().fileModified(getSegmentsFileName());
}
/** Returns userData, previously passed to {@link
* IndexWriter#commit(Map)} for this commit. Map is
* String -> String. */
public abstract Map<String,String> getUserData() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexCommit.java | Java | art | 4,177 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.ChecksumIndexOutput;
import org.apache.lucene.store.ChecksumIndexInput;
import org.apache.lucene.store.NoSuchDirectoryException;
import org.apache.lucene.util.ThreadInterruptedException;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Vector;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.HashMap;
import java.util.Map;
/**
* A collection of segmentInfo objects with methods for operating on
* those segments in relation to the file system.
*
* <p><b>NOTE:</b> This API is new and still experimental
* (subject to change suddenly in the next release)</p>
*/
public final class SegmentInfos extends Vector<SegmentInfo> {
/** The file format version, a negative number. */
/* Works since counter, the old 1st entry, is always >= 0 */
public static final int FORMAT = -1;
/** This format adds details used for lockless commits. It differs
* slightly from the previous format in that file names
* are never re-used (write once). Instead, each file is
* written to the next generation. For example,
* segments_1, segments_2, etc. This allows us to not use
* a commit lock. See <a
* href="http://lucene.apache.org/java/docs/fileformats.html">file
* formats</a> for details.
*/
public static final int FORMAT_LOCKLESS = -2;
/** This format adds a "hasSingleNormFile" flag into each segment info.
* See <a href="http://issues.apache.org/jira/browse/LUCENE-756">LUCENE-756</a>
* for details.
*/
public static final int FORMAT_SINGLE_NORM_FILE = -3;
/** This format allows multiple segments to share a single
* vectors and stored fields file. */
public static final int FORMAT_SHARED_DOC_STORE = -4;
/** This format adds a checksum at the end of the file to
* ensure all bytes were successfully written. */
public static final int FORMAT_CHECKSUM = -5;
/** This format adds the deletion count for each segment.
* This way IndexWriter can efficiently report numDocs(). */
public static final int FORMAT_DEL_COUNT = -6;
/** This format adds the boolean hasProx to record if any
* fields in the segment store prox information (ie, have
* omitTermFreqAndPositions==false) */
public static final int FORMAT_HAS_PROX = -7;
/** This format adds optional commit userData (String) storage. */
public static final int FORMAT_USER_DATA = -8;
/** This format adds optional per-segment String
* diagnostics storage, and switches userData to Map */
public static final int FORMAT_DIAGNOSTICS = -9;
/* This must always point to the most recent file format. */
static final int CURRENT_FORMAT = FORMAT_DIAGNOSTICS;
public int counter = 0; // used to name new segments
/**
* counts how often the index has been changed by adding or deleting docs.
* starting with the current time in milliseconds forces to create unique version numbers.
*/
private long version = System.currentTimeMillis();
private long generation = 0; // generation of the "segments_N" for the next commit
private long lastGeneration = 0; // generation of the "segments_N" file we last successfully read
// or wrote; this is normally the same as generation except if
// there was an IOException that had interrupted a commit
private Map<String,String> userData = Collections.<String,String>emptyMap(); // Opaque Map<String, String> that user can specify during IndexWriter.commit
/**
* If non-null, information about loading segments_N files
* will be printed here. @see #setInfoStream.
*/
private static PrintStream infoStream;
public final SegmentInfo info(int i) {
return get(i);
}
/**
* Get the generation (N) of the current segments_N file
* from a list of files.
*
* @param files -- array of file names to check
*/
public static long getCurrentSegmentGeneration(String[] files) {
if (files == null) {
return -1;
}
long max = -1;
for (int i = 0; i < files.length; i++) {
String file = files[i];
if (file.startsWith(IndexFileNames.SEGMENTS) && !file.equals(IndexFileNames.SEGMENTS_GEN)) {
long gen = generationFromSegmentsFileName(file);
if (gen > max) {
max = gen;
}
}
}
return max;
}
/**
* Get the generation (N) of the current segments_N file
* in the directory.
*
* @param directory -- directory to search for the latest segments_N file
*/
public static long getCurrentSegmentGeneration(Directory directory) throws IOException {
try {
return getCurrentSegmentGeneration(directory.listAll());
} catch (NoSuchDirectoryException nsde) {
return -1;
}
}
/**
* Get the filename of the current segments_N file
* from a list of files.
*
* @param files -- array of file names to check
*/
public static String getCurrentSegmentFileName(String[] files) throws IOException {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
getCurrentSegmentGeneration(files));
}
/**
* Get the filename of the current segments_N file
* in the directory.
*
* @param directory -- directory to search for the latest segments_N file
*/
public static String getCurrentSegmentFileName(Directory directory) throws IOException {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
getCurrentSegmentGeneration(directory));
}
/**
* Get the segments_N filename in use by this segment infos.
*/
public String getCurrentSegmentFileName() {
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
lastGeneration);
}
/**
* Parse the generation off the segments file name and
* return it.
*/
public static long generationFromSegmentsFileName(String fileName) {
if (fileName.equals(IndexFileNames.SEGMENTS)) {
return 0;
} else if (fileName.startsWith(IndexFileNames.SEGMENTS)) {
return Long.parseLong(fileName.substring(1+IndexFileNames.SEGMENTS.length()),
Character.MAX_RADIX);
} else {
throw new IllegalArgumentException("fileName \"" + fileName + "\" is not a segments file");
}
}
/**
* Get the next segments_N filename that will be written.
*/
public String getNextSegmentFileName() {
long nextGeneration;
if (generation == -1) {
nextGeneration = 1;
} else {
nextGeneration = generation+1;
}
return IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
nextGeneration);
}
/**
* Read a particular segmentFileName. Note that this may
* throw an IOException if a commit is in process.
*
* @param directory -- directory containing the segments file
* @param segmentFileName -- segment file to load
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public final void read(Directory directory, String segmentFileName) throws CorruptIndexException, IOException {
boolean success = false;
// Clear any previous segments:
clear();
ChecksumIndexInput input = new ChecksumIndexInput(directory.openInput(segmentFileName));
generation = generationFromSegmentsFileName(segmentFileName);
lastGeneration = generation;
try {
int format = input.readInt();
if(format < 0){ // file contains explicit format info
// check that it is a format we can understand
if (format < CURRENT_FORMAT)
throw new CorruptIndexException("Unknown format version: " + format);
version = input.readLong(); // read version
counter = input.readInt(); // read counter
}
else{ // file is in old format without explicit format info
counter = format;
}
for (int i = input.readInt(); i > 0; i--) { // read segmentInfos
add(new SegmentInfo(directory, format, input));
}
if(format >= 0){ // in old format the version number may be at the end of the file
if (input.getFilePointer() >= input.length())
version = System.currentTimeMillis(); // old file format without version number
else
version = input.readLong(); // read version
}
if (format <= FORMAT_USER_DATA) {
if (format <= FORMAT_DIAGNOSTICS) {
userData = input.readStringStringMap();
} else if (0 != input.readByte()) {
userData = Collections.singletonMap("userData", input.readString());
} else {
userData = Collections.<String,String>emptyMap();
}
} else {
userData = Collections.<String,String>emptyMap();
}
if (format <= FORMAT_CHECKSUM) {
final long checksumNow = input.getChecksum();
final long checksumThen = input.readLong();
if (checksumNow != checksumThen)
throw new CorruptIndexException("checksum mismatch in segments file");
}
success = true;
}
finally {
input.close();
if (!success) {
// Clear any segment infos we had loaded so we
// have a clean slate on retry:
clear();
}
}
}
/**
* This version of read uses the retry logic (for lock-less
* commits) to find the right segments file to load.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public final void read(Directory directory) throws CorruptIndexException, IOException {
generation = lastGeneration = -1;
new FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
read(directory, segmentFileName);
return null;
}
}.run();
}
// Only non-null after prepareCommit has been called and
// before finishCommit is called
ChecksumIndexOutput pendingSegnOutput;
private final void write(Directory directory) throws IOException {
String segmentFileName = getNextSegmentFileName();
// Always advance the generation on write:
if (generation == -1) {
generation = 1;
} else {
generation++;
}
ChecksumIndexOutput segnOutput = new ChecksumIndexOutput(directory.createOutput(segmentFileName));
boolean success = false;
try {
segnOutput.writeInt(CURRENT_FORMAT); // write FORMAT
segnOutput.writeLong(++version); // every write changes
// the index
segnOutput.writeInt(counter); // write counter
segnOutput.writeInt(size()); // write infos
for (int i = 0; i < size(); i++) {
info(i).write(segnOutput);
}
segnOutput.writeStringStringMap(userData);
segnOutput.prepareCommit();
success = true;
pendingSegnOutput = segnOutput;
} finally {
if (!success) {
// We hit an exception above; try to close the file
// but suppress any exception:
try {
segnOutput.close();
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
try {
// Try not to leave a truncated segments_N file in
// the index:
directory.deleteFile(segmentFileName);
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
}
}
}
/**
* Returns a copy of this instance, also copying each
* SegmentInfo.
*/
@Override
public Object clone() {
SegmentInfos sis = (SegmentInfos) super.clone();
for(int i=0;i<sis.size();i++) {
sis.set(i, (SegmentInfo) sis.info(i).clone());
}
sis.userData = new HashMap<String, String>(userData);
return sis;
}
/**
* version number when this SegmentInfos was generated.
*/
public long getVersion() {
return version;
}
public long getGeneration() {
return generation;
}
public long getLastGeneration() {
return lastGeneration;
}
/**
* Current version number from segments file.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static long readCurrentVersion(Directory directory)
throws CorruptIndexException, IOException {
// Fully read the segments file: this ensures that it's
// completely written so that if
// IndexWriter.prepareCommit has been called (but not
// yet commit), then the reader will still see itself as
// current:
SegmentInfos sis = new SegmentInfos();
sis.read(directory);
return sis.version;
}
/**
* Returns userData from latest segments file
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static Map<String,String> readCurrentUserData(Directory directory)
throws CorruptIndexException, IOException {
SegmentInfos sis = new SegmentInfos();
sis.read(directory);
return sis.getUserData();
}
/** If non-null, information about retries when loading
* the segments file will be printed to this.
*/
public static void setInfoStream(PrintStream infoStream) {
SegmentInfos.infoStream = infoStream;
}
/* Advanced configuration of retry logic in loading
segments_N file */
private static int defaultGenFileRetryCount = 10;
private static int defaultGenFileRetryPauseMsec = 50;
private static int defaultGenLookaheadCount = 10;
/**
* Advanced: set how many times to try loading the
* segments.gen file contents to determine current segment
* generation. This file is only referenced when the
* primary method (listing the directory) fails.
*/
public static void setDefaultGenFileRetryCount(int count) {
defaultGenFileRetryCount = count;
}
/**
* @see #setDefaultGenFileRetryCount
*/
public static int getDefaultGenFileRetryCount() {
return defaultGenFileRetryCount;
}
/**
* Advanced: set how many milliseconds to pause in between
* attempts to load the segments.gen file.
*/
public static void setDefaultGenFileRetryPauseMsec(int msec) {
defaultGenFileRetryPauseMsec = msec;
}
/**
* @see #setDefaultGenFileRetryPauseMsec
*/
public static int getDefaultGenFileRetryPauseMsec() {
return defaultGenFileRetryPauseMsec;
}
/**
* Advanced: set how many times to try incrementing the
* gen when loading the segments file. This only runs if
* the primary (listing directory) and secondary (opening
* segments.gen file) methods fail to find the segments
* file.
*/
public static void setDefaultGenLookaheadCount(int count) {
defaultGenLookaheadCount = count;
}
/**
* @see #setDefaultGenLookaheadCount
*/
public static int getDefaultGenLookahedCount() {
return defaultGenLookaheadCount;
}
/**
* @see #setInfoStream
*/
public static PrintStream getInfoStream() {
return infoStream;
}
private static void message(String message) {
if (infoStream != null) {
infoStream.println("SIS [" + Thread.currentThread().getName() + "]: " + message);
}
}
/**
* Utility class for executing code that needs to do
* something with the current segments file. This is
* necessary with lock-less commits because from the time
* you locate the current segments file name, until you
* actually open it, read its contents, or check modified
* time, etc., it could have been deleted due to a writer
* commit finishing.
*/
public abstract static class FindSegmentsFile {
final Directory directory;
public FindSegmentsFile(Directory directory) {
this.directory = directory;
}
public Object run() throws CorruptIndexException, IOException {
return run(null);
}
public Object run(IndexCommit commit) throws CorruptIndexException, IOException {
if (commit != null) {
if (directory != commit.getDirectory())
throw new IOException("the specified commit does not match the specified Directory");
return doBody(commit.getSegmentsFileName());
}
String segmentFileName = null;
long lastGen = -1;
long gen = 0;
int genLookaheadCount = 0;
IOException exc = null;
boolean retry = false;
int method = 0;
// Loop until we succeed in calling doBody() without
// hitting an IOException. An IOException most likely
// means a commit was in process and has finished, in
// the time it took us to load the now-old infos files
// (and segments files). It's also possible it's a
// true error (corrupt index). To distinguish these,
// on each retry we must see "forward progress" on
// which generation we are trying to load. If we
// don't, then the original error is real and we throw
// it.
// We have three methods for determining the current
// generation. We try the first two in parallel, and
// fall back to the third when necessary.
while(true) {
if (0 == method) {
// Method 1: list the directory and use the highest
// segments_N file. This method works well as long
// as there is no stale caching on the directory
// contents (NOTE: NFS clients often have such stale
// caching):
String[] files = null;
long genA = -1;
files = directory.listAll();
if (files != null)
genA = getCurrentSegmentGeneration(files);
message("directory listing genA=" + genA);
// Method 2: open segments.gen and read its
// contents. Then we take the larger of the two
// gen's. This way, if either approach is hitting
// a stale cache (NFS) we have a better chance of
// getting the right generation.
long genB = -1;
for(int i=0;i<defaultGenFileRetryCount;i++) {
IndexInput genInput = null;
try {
genInput = directory.openInput(IndexFileNames.SEGMENTS_GEN);
} catch (FileNotFoundException e) {
message("segments.gen open: FileNotFoundException " + e);
break;
} catch (IOException e) {
message("segments.gen open: IOException " + e);
}
if (genInput != null) {
try {
int version = genInput.readInt();
if (version == FORMAT_LOCKLESS) {
long gen0 = genInput.readLong();
long gen1 = genInput.readLong();
message("fallback check: " + gen0 + "; " + gen1);
if (gen0 == gen1) {
// The file is consistent.
genB = gen0;
break;
}
}
} catch (IOException err2) {
// will retry
} finally {
genInput.close();
}
}
try {
Thread.sleep(defaultGenFileRetryPauseMsec);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
message(IndexFileNames.SEGMENTS_GEN + " check: genB=" + genB);
// Pick the larger of the two gen's:
if (genA > genB)
gen = genA;
else
gen = genB;
if (gen == -1) {
// Neither approach found a generation
throw new FileNotFoundException("no segments* file found in " + directory + ": files: " + Arrays.toString(files));
}
}
// Third method (fallback if first & second methods
// are not reliable): since both directory cache and
// file contents cache seem to be stale, just
// advance the generation.
if (1 == method || (0 == method && lastGen == gen && retry)) {
method = 1;
if (genLookaheadCount < defaultGenLookaheadCount) {
gen++;
genLookaheadCount++;
message("look ahead increment gen to " + gen);
}
}
if (lastGen == gen) {
// This means we're about to try the same
// segments_N last tried. This is allowed,
// exactly once, because writer could have been in
// the process of writing segments_N last time.
if (retry) {
// OK, we've tried the same segments_N file
// twice in a row, so this must be a real
// error. We throw the original exception we
// got.
throw exc;
} else {
retry = true;
}
} else if (0 == method) {
// Segment file has advanced since our last loop, so
// reset retry:
retry = false;
}
lastGen = gen;
segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
gen);
try {
Object v = doBody(segmentFileName);
if (exc != null) {
message("success on " + segmentFileName);
}
return v;
} catch (IOException err) {
// Save the original root cause:
if (exc == null) {
exc = err;
}
message("primary Exception on '" + segmentFileName + "': " + err + "'; will retry: retry=" + retry + "; gen = " + gen);
if (!retry && gen > 1) {
// This is our first time trying this segments
// file (because retry is false), and, there is
// possibly a segments_(N-1) (because gen > 1).
// So, check if the segments_(N-1) exists and
// try it if so:
String prevSegmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
gen-1);
final boolean prevExists;
prevExists = directory.fileExists(prevSegmentFileName);
if (prevExists) {
message("fallback to prior segment file '" + prevSegmentFileName + "'");
try {
Object v = doBody(prevSegmentFileName);
if (exc != null) {
message("success on fallback " + prevSegmentFileName);
}
return v;
} catch (IOException err2) {
message("secondary Exception on '" + prevSegmentFileName + "': " + err2 + "'; will retry");
}
}
}
}
}
}
/**
* Subclass must implement this. The assumption is an
* IOException will be thrown if something goes wrong
* during the processing that could have been caused by
* a writer committing.
*/
protected abstract Object doBody(String segmentFileName) throws CorruptIndexException, IOException;
}
/**
* Returns a new SegmentInfos containing the SegmentInfo
* instances in the specified range first (inclusive) to
* last (exclusive), so total number of segments returned
* is last-first.
*/
public SegmentInfos range(int first, int last) {
SegmentInfos infos = new SegmentInfos();
infos.addAll(super.subList(first, last));
return infos;
}
// Carry over generation numbers from another SegmentInfos
void updateGeneration(SegmentInfos other) {
lastGeneration = other.lastGeneration;
generation = other.generation;
version = other.version;
}
final void rollbackCommit(Directory dir) throws IOException {
if (pendingSegnOutput != null) {
try {
pendingSegnOutput.close();
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
// in our caller
}
// Must carefully compute fileName from "generation"
// since lastGeneration isn't incremented:
try {
final String segmentFileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
generation);
dir.deleteFile(segmentFileName);
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
// in our caller
}
pendingSegnOutput = null;
}
}
/** Call this to start a commit. This writes the new
* segments file, but writes an invalid checksum at the
* end, so that it is not visible to readers. Once this
* is called you must call {@link #finishCommit} to complete
* the commit or {@link #rollbackCommit} to abort it. */
final void prepareCommit(Directory dir) throws IOException {
if (pendingSegnOutput != null)
throw new IllegalStateException("prepareCommit was already called");
write(dir);
}
/** Returns all file names referenced by SegmentInfo
* instances matching the provided Directory (ie files
* associated with any "external" segments are skipped).
* The returned collection is recomputed on each
* invocation. */
public Collection<String> files(Directory dir, boolean includeSegmentsFile) throws IOException {
HashSet<String> files = new HashSet<String>();
if (includeSegmentsFile) {
files.add(getCurrentSegmentFileName());
}
final int size = size();
for(int i=0;i<size;i++) {
final SegmentInfo info = info(i);
if (info.dir == dir) {
files.addAll(info(i).files());
}
}
return files;
}
final void finishCommit(Directory dir) throws IOException {
if (pendingSegnOutput == null)
throw new IllegalStateException("prepareCommit was not called");
boolean success = false;
try {
pendingSegnOutput.finishCommit();
pendingSegnOutput.close();
pendingSegnOutput = null;
success = true;
} finally {
if (!success)
rollbackCommit(dir);
}
// NOTE: if we crash here, we have left a segments_N
// file in the directory in a possibly corrupt state (if
// some bytes made it to stable storage and others
// didn't). But, the segments_N file includes checksum
// at the end, which should catch this case. So when a
// reader tries to read it, it will throw a
// CorruptIndexException, which should cause the retry
// logic in SegmentInfos to kick in and load the last
// good (previous) segments_N-1 file.
final String fileName = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,
"",
generation);
success = false;
try {
dir.sync(fileName);
success = true;
} finally {
if (!success) {
try {
dir.deleteFile(fileName);
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
}
}
lastGeneration = generation;
try {
IndexOutput genOutput = dir.createOutput(IndexFileNames.SEGMENTS_GEN);
try {
genOutput.writeInt(FORMAT_LOCKLESS);
genOutput.writeLong(generation);
genOutput.writeLong(generation);
} finally {
genOutput.close();
}
} catch (Throwable t) {
// It's OK if we fail to write this file since it's
// used only as one of the retry fallbacks.
}
}
/** Writes & syncs to the Directory dir, taking care to
* remove the segments file on exception */
final void commit(Directory dir) throws IOException {
prepareCommit(dir);
finishCommit(dir);
}
public synchronized String segString(Directory directory) {
StringBuilder buffer = new StringBuilder();
final int count = size();
for(int i = 0; i < count; i++) {
if (i > 0) {
buffer.append(' ');
}
final SegmentInfo info = info(i);
buffer.append(info.segString(directory));
if (info.dir != directory)
buffer.append("**");
}
return buffer.toString();
}
public Map<String,String> getUserData() {
return userData;
}
void setUserData(Map<String,String> data) {
if (data == null) {
userData = Collections.<String,String>emptyMap();
} else {
userData = data;
}
}
/** Replaces all segments in this instance, but keeps
* generation, version, counter so that future commits
* remain write once.
*/
void replace(SegmentInfos other) {
clear();
addAll(other);
lastGeneration = other.lastGeneration;
}
// Used only for testing
public boolean hasExternalSegments(Directory dir) {
final int numSegments = size();
for(int i=0;i<numSegments;i++)
if (info(i).dir != dir)
return true;
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentInfos.java | Java | art | 30,642 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
/** This is a DocFieldConsumer that inverts each field,
* separately, from a Document, and accepts a
* InvertedTermsConsumer to process those terms. */
final class DocInverter extends DocFieldConsumer {
final InvertedDocConsumer consumer;
final InvertedDocEndConsumer endConsumer;
public DocInverter(InvertedDocConsumer consumer, InvertedDocEndConsumer endConsumer) {
this.consumer = consumer;
this.endConsumer = endConsumer;
}
@Override
void setFieldInfos(FieldInfos fieldInfos) {
super.setFieldInfos(fieldInfos);
consumer.setFieldInfos(fieldInfos);
endConsumer.setFieldInfos(fieldInfos);
}
@Override
void flush(Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException {
Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> childThreadsAndFields = new HashMap<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>>();
Map<InvertedDocEndConsumerPerThread,Collection<InvertedDocEndConsumerPerField>> endChildThreadsAndFields = new HashMap<InvertedDocEndConsumerPerThread,Collection<InvertedDocEndConsumerPerField>>();
for (Map.Entry<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> entry : threadsAndFields.entrySet() ) {
DocInverterPerThread perThread = (DocInverterPerThread) entry.getKey();
Collection<InvertedDocConsumerPerField> childFields = new HashSet<InvertedDocConsumerPerField>();
Collection<InvertedDocEndConsumerPerField> endChildFields = new HashSet<InvertedDocEndConsumerPerField>();
for (final DocFieldConsumerPerField field: entry.getValue() ) {
DocInverterPerField perField = (DocInverterPerField) field;
childFields.add(perField.consumer);
endChildFields.add(perField.endConsumer);
}
childThreadsAndFields.put(perThread.consumer, childFields);
endChildThreadsAndFields.put(perThread.endConsumer, endChildFields);
}
consumer.flush(childThreadsAndFields, state);
endConsumer.flush(endChildThreadsAndFields, state);
}
@Override
public void closeDocStore(SegmentWriteState state) throws IOException {
consumer.closeDocStore(state);
endConsumer.closeDocStore(state);
}
@Override
void abort() {
consumer.abort();
endConsumer.abort();
}
@Override
public boolean freeRAM() {
return consumer.freeRAM();
}
@Override
public DocFieldConsumerPerThread addThread(DocFieldProcessorPerThread docFieldProcessorPerThread) {
return new DocInverterPerThread(docFieldProcessorPerThread, this);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocInverter.java | Java | art | 3,596 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.IndexInput;
/**
* This abstract class reads skip lists with multiple levels.
*
* See {@link MultiLevelSkipListWriter} for the information about the encoding
* of the multi level skip lists.
*
* Subclasses must implement the abstract method {@link #readSkipData(int, IndexInput)}
* which defines the actual format of the skip data.
*/
abstract class MultiLevelSkipListReader {
// the maximum number of skip levels possible for this index
private int maxNumberOfSkipLevels;
// number of levels in this skip list
private int numberOfSkipLevels;
// Expert: defines the number of top skip levels to buffer in memory.
// Reducing this number results in less memory usage, but possibly
// slower performance due to more random I/Os.
// Please notice that the space each level occupies is limited by
// the skipInterval. The top level can not contain more than
// skipLevel entries, the second top level can not contain more
// than skipLevel^2 entries and so forth.
private int numberOfLevelsToBuffer = 1;
private int docCount;
private boolean haveSkipped;
private IndexInput[] skipStream; // skipStream for each level
private long skipPointer[]; // the start pointer of each skip level
private int skipInterval[]; // skipInterval of each level
private int[] numSkipped; // number of docs skipped per level
private int[] skipDoc; // doc id of current skip entry per level
private int lastDoc; // doc id of last read skip entry with docId <= target
private long[] childPointer; // child pointer of current skip entry per level
private long lastChildPointer; // childPointer of last read skip entry with docId <= target
private boolean inputIsBuffered;
public MultiLevelSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) {
this.skipStream = new IndexInput[maxSkipLevels];
this.skipPointer = new long[maxSkipLevels];
this.childPointer = new long[maxSkipLevels];
this.numSkipped = new int[maxSkipLevels];
this.maxNumberOfSkipLevels = maxSkipLevels;
this.skipInterval = new int[maxSkipLevels];
this.skipStream [0]= skipStream;
this.inputIsBuffered = (skipStream instanceof BufferedIndexInput);
this.skipInterval[0] = skipInterval;
for (int i = 1; i < maxSkipLevels; i++) {
// cache skip intervals
this.skipInterval[i] = this.skipInterval[i - 1] * skipInterval;
}
skipDoc = new int[maxSkipLevels];
}
/** Returns the id of the doc to which the last call of {@link #skipTo(int)}
* has skipped. */
int getDoc() {
return lastDoc;
}
/** Skips entries to the first beyond the current whose document number is
* greater than or equal to <i>target</i>. Returns the current doc count.
*/
int skipTo(int target) throws IOException {
if (!haveSkipped) {
// first time, load skip levels
loadSkipLevels();
haveSkipped = true;
}
// walk up the levels until highest level is found that has a skip
// for this target
int level = 0;
while (level < numberOfSkipLevels - 1 && target > skipDoc[level + 1]) {
level++;
}
while (level >= 0) {
if (target > skipDoc[level]) {
if (!loadNextSkip(level)) {
continue;
}
} else {
// no more skips on this level, go down one level
if (level > 0 && lastChildPointer > skipStream[level - 1].getFilePointer()) {
seekChild(level - 1);
}
level--;
}
}
return numSkipped[0] - skipInterval[0] - 1;
}
private boolean loadNextSkip(int level) throws IOException {
// we have to skip, the target document is greater than the current
// skip list entry
setLastSkipData(level);
numSkipped[level] += skipInterval[level];
if (numSkipped[level] > docCount) {
// this skip list is exhausted
skipDoc[level] = Integer.MAX_VALUE;
if (numberOfSkipLevels > level) numberOfSkipLevels = level;
return false;
}
// read next skip entry
skipDoc[level] += readSkipData(level, skipStream[level]);
if (level != 0) {
// read the child pointer if we are not on the leaf level
childPointer[level] = skipStream[level].readVLong() + skipPointer[level - 1];
}
return true;
}
/** Seeks the skip entry on the given level */
protected void seekChild(int level) throws IOException {
skipStream[level].seek(lastChildPointer);
numSkipped[level] = numSkipped[level + 1] - skipInterval[level + 1];
skipDoc[level] = lastDoc;
if (level > 0) {
childPointer[level] = skipStream[level].readVLong() + skipPointer[level - 1];
}
}
void close() throws IOException {
for (int i = 1; i < skipStream.length; i++) {
if (skipStream[i] != null) {
skipStream[i].close();
}
}
}
/** initializes the reader */
void init(long skipPointer, int df) {
this.skipPointer[0] = skipPointer;
this.docCount = df;
Arrays.fill(skipDoc, 0);
Arrays.fill(numSkipped, 0);
Arrays.fill(childPointer, 0);
haveSkipped = false;
for (int i = 1; i < numberOfSkipLevels; i++) {
skipStream[i] = null;
}
}
/** Loads the skip levels */
private void loadSkipLevels() throws IOException {
numberOfSkipLevels = docCount == 0 ? 0 : (int) Math.floor(Math.log(docCount) / Math.log(skipInterval[0]));
if (numberOfSkipLevels > maxNumberOfSkipLevels) {
numberOfSkipLevels = maxNumberOfSkipLevels;
}
skipStream[0].seek(skipPointer[0]);
int toBuffer = numberOfLevelsToBuffer;
for (int i = numberOfSkipLevels - 1; i > 0; i--) {
// the length of the current level
long length = skipStream[0].readVLong();
// the start pointer of the current level
skipPointer[i] = skipStream[0].getFilePointer();
if (toBuffer > 0) {
// buffer this level
skipStream[i] = new SkipBuffer(skipStream[0], (int) length);
toBuffer--;
} else {
// clone this stream, it is already at the start of the current level
skipStream[i] = (IndexInput) skipStream[0].clone();
if (inputIsBuffered && length < BufferedIndexInput.BUFFER_SIZE) {
((BufferedIndexInput) skipStream[i]).setBufferSize((int) length);
}
// move base stream beyond the current level
skipStream[0].seek(skipStream[0].getFilePointer() + length);
}
}
// use base stream for the lowest level
skipPointer[0] = skipStream[0].getFilePointer();
}
/**
* Subclasses must implement the actual skip data encoding in this method.
*
* @param level the level skip data shall be read from
* @param skipStream the skip stream to read from
*/
protected abstract int readSkipData(int level, IndexInput skipStream) throws IOException;
/** Copies the values of the last read skip entry on this level */
protected void setLastSkipData(int level) {
lastDoc = skipDoc[level];
lastChildPointer = childPointer[level];
}
/** used to buffer the top skip levels */
private final static class SkipBuffer extends IndexInput {
private byte[] data;
private long pointer;
private int pos;
SkipBuffer(IndexInput input, int length) throws IOException {
data = new byte[length];
pointer = input.getFilePointer();
input.readBytes(data, 0, length);
}
@Override
public void close() throws IOException {
data = null;
}
@Override
public long getFilePointer() {
return pointer + pos;
}
@Override
public long length() {
return data.length;
}
@Override
public byte readByte() throws IOException {
return data[pos++];
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
System.arraycopy(data, pos, b, offset, len);
pos += len;
}
@Override
public void seek(long pos) throws IOException {
this.pos = (int) (pos - pointer);
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MultiLevelSkipListReader.java | Java | art | 9,149 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
abstract class InvertedDocEndConsumerPerThread {
abstract void startDocument();
abstract InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
abstract void finishDocument();
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/InvertedDocEndConsumerPerThread.java | Java | art | 1,095 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
abstract class DocConsumerPerThread {
/** Process the document. If there is
* something for this document to be done in docID order,
* you should encapsulate that as a
* DocumentsWriter.DocWriter and return it.
* DocumentsWriter then calls finish() on this object
* when it's its turn. */
abstract DocumentsWriter.DocWriter processDocument() throws IOException;
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocConsumerPerThread.java | Java | art | 1,282 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
abstract class TermsHashConsumer {
abstract int bytesPerPosting();
abstract void createPostings(RawPostingList[] postings, int start, int count);
abstract TermsHashConsumerPerThread addThread(TermsHashPerThread perThread);
abstract void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException;
abstract void abort();
abstract void closeDocStore(SegmentWriteState state) throws IOException;
FieldInfos fieldInfos;
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermsHashConsumer.java | Java | art | 1,521 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Collection;
import java.util.HashSet;
import java.util.List;
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.ArrayUtil;
/**
* Gathers all Fieldables for a document under the same
* name, updates FieldInfos, and calls per-field consumers
* to process field by field.
*
* Currently, only a single thread visits the fields,
* sequentially, for processing.
*/
final class DocFieldProcessorPerThread extends DocConsumerPerThread {
float docBoost;
int fieldGen;
final DocFieldProcessor docFieldProcessor;
final FieldInfos fieldInfos;
final DocFieldConsumerPerThread consumer;
// Holds all fields seen in current doc
DocFieldProcessorPerField[] fields = new DocFieldProcessorPerField[1];
int fieldCount;
// Hash table for all fields ever seen
DocFieldProcessorPerField[] fieldHash = new DocFieldProcessorPerField[2];
int hashMask = 1;
int totalFieldCount;
final StoredFieldsWriterPerThread fieldsWriter;
final DocumentsWriter.DocState docState;
public DocFieldProcessorPerThread(DocumentsWriterThreadState threadState, DocFieldProcessor docFieldProcessor) throws IOException {
this.docState = threadState.docState;
this.docFieldProcessor = docFieldProcessor;
this.fieldInfos = docFieldProcessor.fieldInfos;
this.consumer = docFieldProcessor.consumer.addThread(this);
fieldsWriter = docFieldProcessor.fieldsWriter.addThread(docState);
}
@Override
public void abort() {
for(int i=0;i<fieldHash.length;i++) {
DocFieldProcessorPerField field = fieldHash[i];
while(field != null) {
final DocFieldProcessorPerField next = field.next;
field.abort();
field = next;
}
}
fieldsWriter.abort();
consumer.abort();
}
public Collection<DocFieldConsumerPerField> fields() {
Collection<DocFieldConsumerPerField> fields = new HashSet<DocFieldConsumerPerField>();
for(int i=0;i<fieldHash.length;i++) {
DocFieldProcessorPerField field = fieldHash[i];
while(field != null) {
fields.add(field.consumer);
field = field.next;
}
}
assert fields.size() == totalFieldCount;
return fields;
}
/** If there are fields we've seen but did not see again
* in the last run, then free them up. */
void trimFields(SegmentWriteState state) {
for(int i=0;i<fieldHash.length;i++) {
DocFieldProcessorPerField perField = fieldHash[i];
DocFieldProcessorPerField lastPerField = null;
while (perField != null) {
if (perField.lastGen == -1) {
// This field was not seen since the previous
// flush, so, free up its resources now
// Unhash
if (lastPerField == null)
fieldHash[i] = perField.next;
else
lastPerField.next = perField.next;
if (state.docWriter.infoStream != null)
state.docWriter.infoStream.println(" purge field=" + perField.fieldInfo.name);
totalFieldCount--;
} else {
// Reset
perField.lastGen = -1;
lastPerField = perField;
}
perField = perField.next;
}
}
}
private void rehash() {
final int newHashSize = (fieldHash.length*2);
assert newHashSize > fieldHash.length;
final DocFieldProcessorPerField newHashArray[] = new DocFieldProcessorPerField[newHashSize];
// Rehash
int newHashMask = newHashSize-1;
for(int j=0;j<fieldHash.length;j++) {
DocFieldProcessorPerField fp0 = fieldHash[j];
while(fp0 != null) {
final int hashPos2 = fp0.fieldInfo.name.hashCode() & newHashMask;
DocFieldProcessorPerField nextFP0 = fp0.next;
fp0.next = newHashArray[hashPos2];
newHashArray[hashPos2] = fp0;
fp0 = nextFP0;
}
}
fieldHash = newHashArray;
hashMask = newHashMask;
}
@Override
public DocumentsWriter.DocWriter processDocument() throws IOException {
consumer.startDocument();
fieldsWriter.startDocument();
final Document doc = docState.doc;
assert docFieldProcessor.docWriter.writer.testPoint("DocumentsWriter.ThreadState.init start");
fieldCount = 0;
final int thisFieldGen = fieldGen++;
final List<Fieldable> docFields = doc.getFields();
final int numDocFields = docFields.size();
// Absorb any new fields first seen in this document.
// Also absorb any changes to fields we had already
// seen before (eg suddenly turning on norms or
// vectors, etc.):
for(int i=0;i<numDocFields;i++) {
Fieldable field = docFields.get(i);
final String fieldName = field.name();
// Make sure we have a PerField allocated
final int hashPos = fieldName.hashCode() & hashMask;
DocFieldProcessorPerField fp = fieldHash[hashPos];
while(fp != null && !fp.fieldInfo.name.equals(fieldName))
fp = fp.next;
if (fp == null) {
// TODO FI: we need to genericize the "flags" that a
// field holds, and, how these flags are merged; it
// needs to be more "pluggable" such that if I want
// to have a new "thing" my Fields can do, I can
// easily add it
FieldInfo fi = fieldInfos.add(fieldName, field.isIndexed(), field.isTermVectorStored(),
field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(),
field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
fp = new DocFieldProcessorPerField(this, fi);
fp.next = fieldHash[hashPos];
fieldHash[hashPos] = fp;
totalFieldCount++;
if (totalFieldCount >= fieldHash.length/2)
rehash();
} else
fp.fieldInfo.update(field.isIndexed(), field.isTermVectorStored(),
field.isStorePositionWithTermVector(), field.isStoreOffsetWithTermVector(),
field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
if (thisFieldGen != fp.lastGen) {
// First time we're seeing this field for this doc
fp.fieldCount = 0;
if (fieldCount == fields.length) {
final int newSize = fields.length*2;
DocFieldProcessorPerField newArray[] = new DocFieldProcessorPerField[newSize];
System.arraycopy(fields, 0, newArray, 0, fieldCount);
fields = newArray;
}
fields[fieldCount++] = fp;
fp.lastGen = thisFieldGen;
}
if (fp.fieldCount == fp.fields.length) {
Fieldable[] newArray = new Fieldable[fp.fields.length*2];
System.arraycopy(fp.fields, 0, newArray, 0, fp.fieldCount);
fp.fields = newArray;
}
fp.fields[fp.fieldCount++] = field;
if (field.isStored()) {
fieldsWriter.addField(field, fp.fieldInfo);
}
}
// If we are writing vectors then we must visit
// fields in sorted order so they are written in
// sorted order. TODO: we actually only need to
// sort the subset of fields that have vectors
// enabled; we could save [small amount of] CPU
// here.
quickSort(fields, 0, fieldCount-1);
for(int i=0;i<fieldCount;i++)
fields[i].consumer.processFields(fields[i].fields, fields[i].fieldCount);
if (docState.maxTermPrefix != null && docState.infoStream != null) {
docState.infoStream.println("WARNING: document contains at least one immense term (longer than the max length " + DocumentsWriter.MAX_TERM_LENGTH + "), all of which were skipped. Please correct the analyzer to not produce such terms. The prefix of the first immense term is: '" + docState.maxTermPrefix + "...'");
docState.maxTermPrefix = null;
}
final DocumentsWriter.DocWriter one = fieldsWriter.finishDocument();
final DocumentsWriter.DocWriter two = consumer.finishDocument();
if (one == null) {
return two;
} else if (two == null) {
return one;
} else {
PerDoc both = getPerDoc();
both.docID = docState.docID;
assert one.docID == docState.docID;
assert two.docID == docState.docID;
both.one = one;
both.two = two;
return both;
}
}
void quickSort(DocFieldProcessorPerField[] array, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (array[lo].fieldInfo.name.compareTo(array[hi].fieldInfo.name) > 0) {
final DocFieldProcessorPerField tmp = array[lo];
array[lo] = array[hi];
array[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (array[lo].fieldInfo.name.compareTo(array[mid].fieldInfo.name) > 0) {
DocFieldProcessorPerField tmp = array[lo];
array[lo] = array[mid];
array[mid] = tmp;
}
if (array[mid].fieldInfo.name.compareTo(array[hi].fieldInfo.name) > 0) {
DocFieldProcessorPerField tmp = array[mid];
array[mid] = array[hi];
array[hi] = tmp;
if (array[lo].fieldInfo.name.compareTo(array[mid].fieldInfo.name) > 0) {
DocFieldProcessorPerField tmp2 = array[lo];
array[lo] = array[mid];
array[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
DocFieldProcessorPerField partition = array[mid];
for (; ;) {
while (array[right].fieldInfo.name.compareTo(partition.fieldInfo.name) > 0)
--right;
while (left < right && array[left].fieldInfo.name.compareTo(partition.fieldInfo.name) <= 0)
++left;
if (left < right) {
DocFieldProcessorPerField tmp = array[left];
array[left] = array[right];
array[right] = tmp;
--right;
} else {
break;
}
}
quickSort(array, lo, left);
quickSort(array, left + 1, hi);
}
PerDoc[] docFreeList = new PerDoc[1];
int freeCount;
int allocCount;
synchronized PerDoc getPerDoc() {
if (freeCount == 0) {
allocCount++;
if (allocCount > docFreeList.length) {
// Grow our free list up front to make sure we have
// enough space to recycle all outstanding PerDoc
// instances
assert allocCount == 1+docFreeList.length;
docFreeList = new PerDoc[ArrayUtil.getNextSize(allocCount)];
}
return new PerDoc();
} else
return docFreeList[--freeCount];
}
synchronized void freePerDoc(PerDoc perDoc) {
assert freeCount < docFreeList.length;
docFreeList[freeCount++] = perDoc;
}
class PerDoc extends DocumentsWriter.DocWriter {
DocumentsWriter.DocWriter one;
DocumentsWriter.DocWriter two;
@Override
public long sizeInBytes() {
return one.sizeInBytes() + two.sizeInBytes();
}
@Override
public void finish() throws IOException {
try {
try {
one.finish();
} finally {
two.finish();
}
} finally {
freePerDoc(this);
}
}
@Override
public void abort() {
try {
try {
one.abort();
} finally {
two.abort();
}
} finally {
freePerDoc(this);
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java | Java | art | 12,070 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
import java.io.IOException;
import java.util.*;
/** An IndexReader which reads multiple, parallel indexes. Each index added
* must have the same number of documents, but typically each contains
* different fields. Each document contains the union of the fields of all
* documents with the same document number. When searching, matches for a
* query term are from the first index added that has the field.
*
* <p>This is useful, e.g., with collections that have large fields which
* change rarely and small fields that change more frequently. The smaller
* fields may be re-indexed in a new index and both indexes may be searched
* together.
*
* <p><strong>Warning:</strong> It is up to you to make sure all indexes
* are created and modified the same way. For example, if you add
* documents to one index, you need to add the same documents in the
* same order to the other indexes. <em>Failure to do so will result in
* undefined behavior</em>.
*/
public class ParallelReader extends IndexReader {
private List<IndexReader> readers = new ArrayList<IndexReader>();
private List<Boolean> decrefOnClose = new ArrayList<Boolean>(); // remember which subreaders to decRef on close
boolean incRefReaders = false;
private SortedMap<String,IndexReader> fieldToReader = new TreeMap<String,IndexReader>();
private Map<IndexReader,Collection<String>> readerToFields = new HashMap<IndexReader,Collection<String>>();
private List<IndexReader> storedFieldReaders = new ArrayList<IndexReader>();
private int maxDoc;
private int numDocs;
private boolean hasDeletions;
/** Construct a ParallelReader.
* <p>Note that all subreaders are closed if this ParallelReader is closed.</p>
*/
public ParallelReader() throws IOException { this(true); }
/** Construct a ParallelReader.
* @param closeSubReaders indicates whether the subreaders should be closed
* when this ParallelReader is closed
*/
public ParallelReader(boolean closeSubReaders) throws IOException {
super();
this.incRefReaders = !closeSubReaders;
}
/** Add an IndexReader.
* @throws IOException if there is a low-level IO error
*/
public void add(IndexReader reader) throws IOException {
ensureOpen();
add(reader, false);
}
/** Add an IndexReader whose stored fields will not be returned. This can
* accelerate search when stored fields are only needed from a subset of
* the IndexReaders.
*
* @throws IllegalArgumentException if not all indexes contain the same number
* of documents
* @throws IllegalArgumentException if not all indexes have the same value
* of {@link IndexReader#maxDoc()}
* @throws IOException if there is a low-level IO error
*/
public void add(IndexReader reader, boolean ignoreStoredFields)
throws IOException {
ensureOpen();
if (readers.size() == 0) {
this.maxDoc = reader.maxDoc();
this.numDocs = reader.numDocs();
this.hasDeletions = reader.hasDeletions();
}
if (reader.maxDoc() != maxDoc) // check compatibility
throw new IllegalArgumentException
("All readers must have same maxDoc: "+maxDoc+"!="+reader.maxDoc());
if (reader.numDocs() != numDocs)
throw new IllegalArgumentException
("All readers must have same numDocs: "+numDocs+"!="+reader.numDocs());
Collection<String> fields = reader.getFieldNames(IndexReader.FieldOption.ALL);
readerToFields.put(reader, fields);
for (final String field : fields) { // update fieldToReader map
if (fieldToReader.get(field) == null)
fieldToReader.put(field, reader);
}
if (!ignoreStoredFields)
storedFieldReaders.add(reader); // add to storedFieldReaders
readers.add(reader);
if (incRefReaders) {
reader.incRef();
}
decrefOnClose.add(Boolean.valueOf(incRefReaders));
}
@Override
public synchronized Object clone() {
try {
return doReopen(true);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* Tries to reopen the subreaders.
* <br>
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
* returned a new instance != subReader), then a new ParallelReader instance
* is returned, otherwise this instance is returned.
* <p>
* A re-opened instance might share one or more subreaders with the old
* instance. Index modification operations result in undefined behavior
* when performed before the old instance is closed.
* (see {@link IndexReader#reopen()}).
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
ensureOpen();
boolean reopened = false;
List<IndexReader> newReaders = new ArrayList<IndexReader>();
boolean success = false;
try {
for (final IndexReader oldReader : readers) {
IndexReader newReader = null;
if (doClone) {
newReader = (IndexReader) oldReader.clone();
} else {
newReader = oldReader.reopen();
}
newReaders.add(newReader);
// if at least one of the subreaders was updated we remember that
// and return a new ParallelReader
if (newReader != oldReader) {
reopened = true;
}
}
success = true;
} finally {
if (!success && reopened) {
for (int i = 0; i < newReaders.size(); i++) {
IndexReader r = newReaders.get(i);
if (r != readers.get(i)) {
try {
r.close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (reopened) {
List<Boolean> newDecrefOnClose = new ArrayList<Boolean>();
ParallelReader pr = new ParallelReader();
for (int i = 0; i < readers.size(); i++) {
IndexReader oldReader = readers.get(i);
IndexReader newReader = newReaders.get(i);
if (newReader == oldReader) {
newDecrefOnClose.add(Boolean.TRUE);
newReader.incRef();
} else {
// this is a new subreader instance, so on close() we don't
// decRef but close it
newDecrefOnClose.add(Boolean.FALSE);
}
pr.add(newReader, !storedFieldReaders.contains(oldReader));
}
pr.decrefOnClose = newDecrefOnClose;
pr.incRefReaders = incRefReaders;
return pr;
} else {
// No subreader was refreshed
return this;
}
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return numDocs;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
// check first reader
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
if (readers.size() > 0)
return readers.get(0).isDeleted(n);
return false;
}
// delete in all readers
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
reader.deleteDocument(n);
}
hasDeletions = true;
}
// undeleteAll in all readers
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
reader.undeleteAll();
}
hasDeletions = false;
}
// append fields from storedFieldReaders
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
Document result = new Document();
for (final IndexReader reader: storedFieldReaders) {
boolean include = (fieldSelector==null);
if (!include) {
Collection<String> fields = readerToFields.get(reader);
for (final String field : fields)
if (fieldSelector.accept(field) != FieldSelectorResult.NO_LOAD) {
include = true;
break;
}
}
if (include) {
List<Fieldable> fields = reader.document(n, fieldSelector).getFields();
for (Fieldable field : fields) {
result.add(field);
}
}
}
return result;
}
// get all vectors
@Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
ArrayList<TermFreqVector> results = new ArrayList<TermFreqVector>();
for (final Map.Entry<String,IndexReader> e: fieldToReader.entrySet()) {
String field = e.getKey();
IndexReader reader = e.getValue();
TermFreqVector vector = reader.getTermFreqVector(n, field);
if (vector != null)
results.add(vector);
}
return results.toArray(new TermFreqVector[results.size()]);
}
@Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
return reader==null ? null : reader.getTermFreqVector(n, field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
if (reader != null) {
reader.getTermFreqVector(docNumber, field, mapper);
}
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
for (final Map.Entry<String,IndexReader> e : fieldToReader.entrySet()) {
String field = e.getKey();
IndexReader reader = e.getValue();
reader.getTermFreqVector(docNumber, field, mapper);
}
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
return reader==null ? false : reader.hasNorms(field);
}
@Override
public byte[] norms(String field) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
return reader==null ? null : reader.norms(field);
}
@Override
public void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(field);
if (reader!=null)
reader.norms(field, result, offset);
}
@Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
IndexReader reader = fieldToReader.get(field);
if (reader!=null)
reader.doSetNorm(n, field, value);
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return new ParallelTermEnum();
}
@Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new ParallelTermEnum(term);
}
@Override
public int docFreq(Term term) throws IOException {
ensureOpen();
IndexReader reader = fieldToReader.get(term.field());
return reader==null ? 0 : reader.docFreq(term);
}
@Override
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
return new ParallelTermDocs(term);
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new ParallelTermDocs();
}
@Override
public TermPositions termPositions(Term term) throws IOException {
ensureOpen();
return new ParallelTermPositions(term);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new ParallelTermPositions();
}
/**
* Checks recursively if all subreaders are up to date.
*/
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (final IndexReader reader : readers) {
if (!reader.isCurrent()) {
return false;
}
}
// all subreaders are up to date
return true;
}
/**
* Checks recursively if all subindexes are optimized
*/
@Override
public boolean isOptimized() {
for (final IndexReader reader : readers) {
if (!reader.isOptimized()) {
return false;
}
}
// all subindexes are optimized
return true;
}
/** Not implemented.
* @throws UnsupportedOperationException
*/
@Override
public long getVersion() {
throw new UnsupportedOperationException("ParallelReader does not support this method.");
}
// for testing
IndexReader[] getSubReaders() {
return readers.toArray(new IndexReader[readers.size()]);
}
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (final IndexReader reader : readers)
reader.commit(commitUserData);
}
@Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < readers.size(); i++) {
if (decrefOnClose.get(i).booleanValue()) {
readers.get(i).decRef();
} else {
readers.get(i).close();
}
}
FieldCache.DEFAULT.purge(this);
}
@Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
Set<String> fieldSet = new HashSet<String>();
for (final IndexReader reader : readers) {
Collection<String> names = reader.getFieldNames(fieldNames);
fieldSet.addAll(names);
}
return fieldSet;
}
private class ParallelTermEnum extends TermEnum {
private String field;
private Iterator<String> fieldIterator;
private TermEnum termEnum;
public ParallelTermEnum() throws IOException {
try {
field = fieldToReader.firstKey();
} catch(NoSuchElementException e) {
// No fields, so keep field == null, termEnum == null
return;
}
if (field != null)
termEnum = fieldToReader.get(field).terms();
}
public ParallelTermEnum(Term term) throws IOException {
field = term.field();
IndexReader reader = fieldToReader.get(field);
if (reader!=null)
termEnum = reader.terms(term);
}
@Override
public boolean next() throws IOException {
if (termEnum==null)
return false;
// another term in this field?
if (termEnum.next() && termEnum.term().field()==field)
return true; // yes, keep going
termEnum.close(); // close old termEnum
// find the next field with terms, if any
if (fieldIterator==null) {
fieldIterator = fieldToReader.tailMap(field).keySet().iterator();
fieldIterator.next(); // Skip field to get next one
}
while (fieldIterator.hasNext()) {
field = fieldIterator.next();
termEnum = fieldToReader.get(field).terms(new Term(field));
Term term = termEnum.term();
if (term!=null && term.field()==field)
return true;
else
termEnum.close();
}
return false; // no more fields
}
@Override
public Term term() {
if (termEnum==null)
return null;
return termEnum.term();
}
@Override
public int docFreq() {
if (termEnum==null)
return 0;
return termEnum.docFreq();
}
@Override
public void close() throws IOException {
if (termEnum!=null)
termEnum.close();
}
}
// wrap a TermDocs in order to support seek(Term)
private class ParallelTermDocs implements TermDocs {
protected TermDocs termDocs;
public ParallelTermDocs() {}
public ParallelTermDocs(Term term) throws IOException {
if (term == null)
termDocs = readers.isEmpty() ? null : readers.get(0).termDocs(null);
else
seek(term);
}
public int doc() { return termDocs.doc(); }
public int freq() { return termDocs.freq(); }
public void seek(Term term) throws IOException {
IndexReader reader = fieldToReader.get(term.field());
termDocs = reader!=null ? reader.termDocs(term) : null;
}
public void seek(TermEnum termEnum) throws IOException {
seek(termEnum.term());
}
public boolean next() throws IOException {
if (termDocs==null)
return false;
return termDocs.next();
}
public int read(final int[] docs, final int[] freqs) throws IOException {
if (termDocs==null)
return 0;
return termDocs.read(docs, freqs);
}
public boolean skipTo(int target) throws IOException {
if (termDocs==null)
return false;
return termDocs.skipTo(target);
}
public void close() throws IOException {
if (termDocs!=null)
termDocs.close();
}
}
private class ParallelTermPositions
extends ParallelTermDocs implements TermPositions {
public ParallelTermPositions() {}
public ParallelTermPositions(Term term) throws IOException { seek(term); }
@Override
public void seek(Term term) throws IOException {
IndexReader reader = fieldToReader.get(term.field());
termDocs = reader!=null ? reader.termPositions(term) : null;
}
public int nextPosition() throws IOException {
// It is an error to call this if there is no next position, e.g. if termDocs==null
return ((TermPositions)termDocs).nextPosition();
}
public int getPayloadLength() {
return ((TermPositions)termDocs).getPayloadLength();
}
public byte[] getPayload(byte[] data, int offset) throws IOException {
return ((TermPositions)termDocs).getPayload(data, offset);
}
// TODO: Remove warning after API has been finalized
public boolean isPayloadAvailable() {
return ((TermPositions) termDocs).isPayloadAvailable();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ParallelReader.java | Java | art | 19,527 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
/** This is a DocFieldConsumer that inverts each field,
* separately, from a Document, and accepts a
* InvertedTermsConsumer to process those terms. */
final class DocInverterPerThread extends DocFieldConsumerPerThread {
final DocInverter docInverter;
final InvertedDocConsumerPerThread consumer;
final InvertedDocEndConsumerPerThread endConsumer;
final SingleTokenAttributeSource singleToken = new SingleTokenAttributeSource();
static class SingleTokenAttributeSource extends AttributeSource {
final TermAttribute termAttribute;
final OffsetAttribute offsetAttribute;
private SingleTokenAttributeSource() {
termAttribute = addAttribute(TermAttribute.class);
offsetAttribute = addAttribute(OffsetAttribute.class);
}
public void reinit(String stringValue, int startOffset, int endOffset) {
termAttribute.setTermBuffer(stringValue);
offsetAttribute.setOffset(startOffset, endOffset);
}
}
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState = new FieldInvertState();
// Used to read a string value for a field
final ReusableStringReader stringReader = new ReusableStringReader();
public DocInverterPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread, DocInverter docInverter) {
this.docInverter = docInverter;
docState = docFieldProcessorPerThread.docState;
consumer = docInverter.consumer.addThread(this);
endConsumer = docInverter.endConsumer.addThread(this);
}
@Override
public void startDocument() throws IOException {
consumer.startDocument();
endConsumer.startDocument();
}
@Override
public DocumentsWriter.DocWriter finishDocument() throws IOException {
// TODO: allow endConsumer.finishDocument to also return
// a DocWriter
endConsumer.finishDocument();
return consumer.finishDocument();
}
@Override
void abort() {
try {
consumer.abort();
} finally {
endConsumer.abort();
}
}
@Override
public DocFieldConsumerPerField addField(FieldInfo fi) {
return new DocInverterPerField(this, fi);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocInverterPerThread.java | Java | art | 3,170 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BitVector;
import org.apache.lucene.util.CloseableThreadLocal;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
/** @version $Id */
/**
* <p><b>NOTE:</b> This API is new and still experimental
* (subject to change suddenly in the next release)</p>
*/
public class SegmentReader extends IndexReader implements Cloneable {
protected boolean readOnly;
private SegmentInfo si;
private int readBufferSize;
CloseableThreadLocal<FieldsReader> fieldsReaderLocal = new FieldsReaderLocal();
CloseableThreadLocal<TermVectorsReader> termVectorsLocal = new CloseableThreadLocal<TermVectorsReader>();
BitVector deletedDocs = null;
Ref deletedDocsRef = null;
private boolean deletedDocsDirty = false;
private boolean normsDirty = false;
private int pendingDeleteCount;
private boolean rollbackHasChanges = false;
private boolean rollbackDeletedDocsDirty = false;
private boolean rollbackNormsDirty = false;
private int rollbackPendingDeleteCount;
// optionally used for the .nrm file shared by multiple norms
private IndexInput singleNormStream;
private Ref singleNormRef;
CoreReaders core;
// Holds core readers that are shared (unchanged) when
// SegmentReader is cloned or reopened
static final class CoreReaders {
// Counts how many other reader share the core objects
// (freqStream, proxStream, tis, etc.) of this reader;
// when coreRef drops to 0, these core objects may be
// closed. A given instance of SegmentReader may be
// closed, even those it shares core objects with other
// SegmentReaders:
private final Ref ref = new Ref();
final String segment;
final FieldInfos fieldInfos;
final IndexInput freqStream;
final IndexInput proxStream;
final TermInfosReader tisNoIndex;
final Directory dir;
final Directory cfsDir;
final int readBufferSize;
final int termsIndexDivisor;
private final SegmentReader origInstance;
TermInfosReader tis;
FieldsReader fieldsReaderOrig;
TermVectorsReader termVectorsReaderOrig;
CompoundFileReader cfsReader;
CompoundFileReader storeCFSReader;
CoreReaders(SegmentReader origInstance, Directory dir, SegmentInfo si, int readBufferSize, int termsIndexDivisor) throws IOException {
segment = si.name;
this.readBufferSize = readBufferSize;
this.dir = dir;
boolean success = false;
try {
Directory dir0 = dir;
if (si.getUseCompoundFile()) {
cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
dir0 = cfsReader;
}
cfsDir = dir0;
fieldInfos = new FieldInfos(cfsDir, segment + "." + IndexFileNames.FIELD_INFOS_EXTENSION);
this.termsIndexDivisor = termsIndexDivisor;
TermInfosReader reader = new TermInfosReader(cfsDir, segment, fieldInfos, readBufferSize, termsIndexDivisor);
if (termsIndexDivisor == -1) {
tisNoIndex = reader;
} else {
tis = reader;
tisNoIndex = null;
}
// make sure that all index files have been read or are kept open
// so that if an index update removes them we'll still have them
freqStream = cfsDir.openInput(segment + "." + IndexFileNames.FREQ_EXTENSION, readBufferSize);
if (fieldInfos.hasProx()) {
proxStream = cfsDir.openInput(segment + "." + IndexFileNames.PROX_EXTENSION, readBufferSize);
} else {
proxStream = null;
}
success = true;
} finally {
if (!success) {
decRef();
}
}
// Must assign this at the end -- if we hit an
// exception above core, we don't want to attempt to
// purge the FieldCache (will hit NPE because core is
// not assigned yet).
this.origInstance = origInstance;
}
synchronized TermVectorsReader getTermVectorsReaderOrig() {
return termVectorsReaderOrig;
}
synchronized FieldsReader getFieldsReaderOrig() {
return fieldsReaderOrig;
}
synchronized void incRef() {
ref.incRef();
}
synchronized Directory getCFSReader() {
return cfsReader;
}
synchronized TermInfosReader getTermsReader() {
if (tis != null) {
return tis;
} else {
return tisNoIndex;
}
}
synchronized boolean termsIndexIsLoaded() {
return tis != null;
}
// NOTE: only called from IndexWriter when a near
// real-time reader is opened, or applyDeletes is run,
// sharing a segment that's still being merged. This
// method is not fully thread safe, and relies on the
// synchronization in IndexWriter
synchronized void loadTermsIndex(SegmentInfo si, int termsIndexDivisor) throws IOException {
if (tis == null) {
Directory dir0;
if (si.getUseCompoundFile()) {
// In some cases, we were originally opened when CFS
// was not used, but then we are asked to open the
// terms reader with index, the segment has switched
// to CFS
if (cfsReader == null) {
cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
}
dir0 = cfsReader;
} else {
dir0 = dir;
}
tis = new TermInfosReader(dir0, segment, fieldInfos, readBufferSize, termsIndexDivisor);
}
}
synchronized void decRef() throws IOException {
if (ref.decRef() == 0) {
// close everything, nothing is shared anymore with other readers
if (tis != null) {
tis.close();
// null so if an app hangs on to us we still free most ram
tis = null;
}
if (tisNoIndex != null) {
tisNoIndex.close();
}
if (freqStream != null) {
freqStream.close();
}
if (proxStream != null) {
proxStream.close();
}
if (termVectorsReaderOrig != null) {
termVectorsReaderOrig.close();
}
if (fieldsReaderOrig != null) {
fieldsReaderOrig.close();
}
if (cfsReader != null) {
cfsReader.close();
}
if (storeCFSReader != null) {
storeCFSReader.close();
}
// Force FieldCache to evict our entries at this point
if (origInstance != null) {
FieldCache.DEFAULT.purge(origInstance);
}
}
}
synchronized void openDocStores(SegmentInfo si) throws IOException {
assert si.name.equals(segment);
if (fieldsReaderOrig == null) {
final Directory storeDir;
if (si.getDocStoreOffset() != -1) {
if (si.getDocStoreIsCompoundFile()) {
assert storeCFSReader == null;
storeCFSReader = new CompoundFileReader(dir,
si.getDocStoreSegment() + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION,
readBufferSize);
storeDir = storeCFSReader;
assert storeDir != null;
} else {
storeDir = dir;
assert storeDir != null;
}
} else if (si.getUseCompoundFile()) {
// In some cases, we were originally opened when CFS
// was not used, but then we are asked to open doc
// stores after the segment has switched to CFS
if (cfsReader == null) {
cfsReader = new CompoundFileReader(dir, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION, readBufferSize);
}
storeDir = cfsReader;
assert storeDir != null;
} else {
storeDir = dir;
assert storeDir != null;
}
final String storesSegment;
if (si.getDocStoreOffset() != -1) {
storesSegment = si.getDocStoreSegment();
} else {
storesSegment = segment;
}
fieldsReaderOrig = new FieldsReader(storeDir, storesSegment, fieldInfos, readBufferSize,
si.getDocStoreOffset(), si.docCount);
// Verify two sources of "maxDoc" agree:
if (si.getDocStoreOffset() == -1 && fieldsReaderOrig.size() != si.docCount) {
throw new CorruptIndexException("doc counts differ for segment " + segment + ": fieldsReader shows " + fieldsReaderOrig.size() + " but segmentInfo shows " + si.docCount);
}
if (fieldInfos.hasVectors()) { // open term vector files only as needed
termVectorsReaderOrig = new TermVectorsReader(storeDir, storesSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount);
}
}
}
}
/**
* Sets the initial value
*/
private class FieldsReaderLocal extends CloseableThreadLocal<FieldsReader> {
@Override
protected FieldsReader initialValue() {
return (FieldsReader) core.getFieldsReaderOrig().clone();
}
}
static class Ref {
private int refCount = 1;
@Override
public String toString() {
return "refcount: "+refCount;
}
public synchronized int refCount() {
return refCount;
}
public synchronized int incRef() {
assert refCount > 0;
refCount++;
return refCount;
}
public synchronized int decRef() {
assert refCount > 0;
refCount--;
return refCount;
}
}
/**
* Byte[] referencing is used because a new norm object needs
* to be created for each clone, and the byte array is all
* that is needed for sharing between cloned readers. The
* current norm referencing is for sharing between readers
* whereas the byte[] referencing is for copy on write which
* is independent of reader references (i.e. incRef, decRef).
*/
final class Norm implements Cloneable {
private int refCount = 1;
// If this instance is a clone, the originalNorm
// references the Norm that has a real open IndexInput:
private Norm origNorm;
private IndexInput in;
private long normSeek;
// null until bytes is set
private Ref bytesRef;
private byte[] bytes;
private boolean dirty;
private int number;
private boolean rollbackDirty;
public Norm(IndexInput in, int number, long normSeek) {
this.in = in;
this.number = number;
this.normSeek = normSeek;
}
public synchronized void incRef() {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
refCount++;
}
private void closeInput() throws IOException {
if (in != null) {
if (in != singleNormStream) {
// It's private to us -- just close it
in.close();
} else {
// We are sharing this with others -- decRef and
// maybe close the shared norm stream
if (singleNormRef.decRef() == 0) {
singleNormStream.close();
singleNormStream = null;
}
}
in = null;
}
}
public synchronized void decRef() throws IOException {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
if (--refCount == 0) {
if (origNorm != null) {
origNorm.decRef();
origNorm = null;
} else {
closeInput();
}
if (bytes != null) {
assert bytesRef != null;
bytesRef.decRef();
bytes = null;
bytesRef = null;
} else {
assert bytesRef == null;
}
}
}
// Load bytes but do not cache them if they were not
// already cached
public synchronized void bytes(byte[] bytesOut, int offset, int len) throws IOException {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
if (bytes != null) {
// Already cached -- copy from cache:
assert len <= maxDoc();
System.arraycopy(bytes, 0, bytesOut, offset, len);
} else {
// Not cached
if (origNorm != null) {
// Ask origNorm to load
origNorm.bytes(bytesOut, offset, len);
} else {
// We are orig -- read ourselves from disk:
synchronized(in) {
in.seek(normSeek);
in.readBytes(bytesOut, offset, len, false);
}
}
}
}
// Load & cache full bytes array. Returns bytes.
public synchronized byte[] bytes() throws IOException {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
if (bytes == null) { // value not yet read
assert bytesRef == null;
if (origNorm != null) {
// Ask origNorm to load so that for a series of
// reopened readers we share a single read-only
// byte[]
bytes = origNorm.bytes();
bytesRef = origNorm.bytesRef;
bytesRef.incRef();
// Once we've loaded the bytes we no longer need
// origNorm:
origNorm.decRef();
origNorm = null;
} else {
// We are the origNorm, so load the bytes for real
// ourself:
final int count = maxDoc();
bytes = new byte[count];
// Since we are orig, in must not be null
assert in != null;
// Read from disk.
synchronized(in) {
in.seek(normSeek);
in.readBytes(bytes, 0, count, false);
}
bytesRef = new Ref();
closeInput();
}
}
return bytes;
}
// Only for testing
Ref bytesRef() {
return bytesRef;
}
// Called if we intend to change a norm value. We make a
// private copy of bytes if it's shared with others:
public synchronized byte[] copyOnWrite() throws IOException {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
bytes();
assert bytes != null;
assert bytesRef != null;
if (bytesRef.refCount() > 1) {
// I cannot be the origNorm for another norm
// instance if I'm being changed. Ie, only the
// "head Norm" can be changed:
assert refCount == 1;
final Ref oldRef = bytesRef;
bytes = cloneNormBytes(bytes);
bytesRef = new Ref();
oldRef.decRef();
}
dirty = true;
return bytes;
}
// Returns a copy of this Norm instance that shares
// IndexInput & bytes with the original one
@Override
public synchronized Object clone() {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0);
Norm clone;
try {
clone = (Norm) super.clone();
} catch (CloneNotSupportedException cnse) {
// Cannot happen
throw new RuntimeException("unexpected CloneNotSupportedException", cnse);
}
clone.refCount = 1;
if (bytes != null) {
assert bytesRef != null;
assert origNorm == null;
// Clone holds a reference to my bytes:
clone.bytesRef.incRef();
} else {
assert bytesRef == null;
if (origNorm == null) {
// I become the origNorm for the clone:
clone.origNorm = this;
}
clone.origNorm.incRef();
}
// Only the origNorm will actually readBytes from in:
clone.in = null;
return clone;
}
// Flush all pending changes to the next generation
// separate norms file.
public void reWrite(SegmentInfo si) throws IOException {
assert refCount > 0 && (origNorm == null || origNorm.refCount > 0): "refCount=" + refCount + " origNorm=" + origNorm;
// NOTE: norms are re-written in regular directory, not cfs
si.advanceNormGen(this.number);
IndexOutput out = directory().createOutput(si.getNormFileName(this.number));
try {
out.writeBytes(bytes, maxDoc());
} finally {
out.close();
}
this.dirty = false;
}
}
Map<String,Norm> norms = new HashMap<String,Norm>();
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static SegmentReader get(boolean readOnly, SegmentInfo si, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return get(readOnly, si.dir, si, BufferedIndexInput.BUFFER_SIZE, true, termInfosIndexDivisor);
}
/**
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static SegmentReader get(boolean readOnly,
Directory dir,
SegmentInfo si,
int readBufferSize,
boolean doOpenStores,
int termInfosIndexDivisor)
throws CorruptIndexException, IOException {
SegmentReader instance = readOnly ? new ReadOnlySegmentReader() : new SegmentReader();
instance.readOnly = readOnly;
instance.si = si;
instance.readBufferSize = readBufferSize;
boolean success = false;
try {
instance.core = new CoreReaders(instance, dir, si, readBufferSize, termInfosIndexDivisor);
if (doOpenStores) {
instance.core.openDocStores(si);
}
instance.loadDeletedDocs();
instance.openNorms(instance.core.cfsDir, readBufferSize);
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
instance.doClose();
}
}
return instance;
}
void openDocStores() throws IOException {
core.openDocStores(si);
}
private boolean checkDeletedCounts() throws IOException {
final int recomputedCount = deletedDocs.getRecomputedCount();
assert deletedDocs.count() == recomputedCount : "deleted count=" + deletedDocs.count() + " vs recomputed count=" + recomputedCount;
assert si.getDelCount() == recomputedCount :
"delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + recomputedCount;
// Verify # deletes does not exceed maxDoc for this
// segment:
assert si.getDelCount() <= maxDoc() :
"delete count mismatch: " + recomputedCount + ") exceeds max doc (" + maxDoc() + ") for segment " + si.name;
return true;
}
private void loadDeletedDocs() throws IOException {
// NOTE: the bitvector is stored using the regular directory, not cfs
if (hasDeletions(si)) {
deletedDocs = new BitVector(directory(), si.getDelFileName());
deletedDocsRef = new Ref();
assert checkDeletedCounts();
} else
assert si.getDelCount() == 0;
}
/**
* Clones the norm bytes. May be overridden by subclasses. New and experimental.
* @param bytes Byte array to clone
* @return New BitVector
*/
protected byte[] cloneNormBytes(byte[] bytes) {
byte[] cloneBytes = new byte[bytes.length];
System.arraycopy(bytes, 0, cloneBytes, 0, bytes.length);
return cloneBytes;
}
/**
* Clones the deleteDocs BitVector. May be overridden by subclasses. New and experimental.
* @param bv BitVector to clone
* @return New BitVector
*/
protected BitVector cloneDeletedDocs(BitVector bv) {
return (BitVector)bv.clone();
}
@Override
public final synchronized Object clone() {
try {
return clone(readOnly); // Preserve current readOnly
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
@Override
public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
return reopenSegment(si, true, openReadOnly);
}
synchronized SegmentReader reopenSegment(SegmentInfo si, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())
&& (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));
boolean normsUpToDate = true;
boolean[] fieldNormsChanged = new boolean[core.fieldInfos.size()];
final int fieldCount = core.fieldInfos.size();
for (int i = 0; i < fieldCount; i++) {
if (!this.si.getNormFileName(i).equals(si.getNormFileName(i))) {
normsUpToDate = false;
fieldNormsChanged[i] = true;
}
}
// if we're cloning we need to run through the reopenSegment logic
// also if both old and new readers aren't readonly, we clone to avoid sharing modifications
if (normsUpToDate && deletionsUpToDate && !doClone && openReadOnly && readOnly) {
return this;
}
// When cloning, the incoming SegmentInfos should not
// have any changes in it:
assert !doClone || (normsUpToDate && deletionsUpToDate);
// clone reader
SegmentReader clone = openReadOnly ? new ReadOnlySegmentReader() : new SegmentReader();
boolean success = false;
try {
core.incRef();
clone.core = core;
clone.readOnly = openReadOnly;
clone.si = si;
clone.readBufferSize = readBufferSize;
if (!openReadOnly && hasChanges) {
// My pending changes transfer to the new reader
clone.pendingDeleteCount = pendingDeleteCount;
clone.deletedDocsDirty = deletedDocsDirty;
clone.normsDirty = normsDirty;
clone.hasChanges = hasChanges;
hasChanges = false;
}
if (doClone) {
if (deletedDocs != null) {
deletedDocsRef.incRef();
clone.deletedDocs = deletedDocs;
clone.deletedDocsRef = deletedDocsRef;
}
} else {
if (!deletionsUpToDate) {
// load deleted docs
assert clone.deletedDocs == null;
clone.loadDeletedDocs();
} else if (deletedDocs != null) {
deletedDocsRef.incRef();
clone.deletedDocs = deletedDocs;
clone.deletedDocsRef = deletedDocsRef;
}
}
clone.norms = new HashMap<String,Norm>();
// Clone norms
for (int i = 0; i < fieldNormsChanged.length; i++) {
// Clone unchanged norms to the cloned reader
if (doClone || !fieldNormsChanged[i]) {
final String curField = core.fieldInfos.fieldInfo(i).name;
Norm norm = this.norms.get(curField);
if (norm != null)
clone.norms.put(curField, (Norm) norm.clone());
}
}
// If we are not cloning, then this will open anew
// any norms that have changed:
clone.openNorms(si.getUseCompoundFile() ? core.getCFSReader() : directory(), readBufferSize);
success = true;
} finally {
if (!success) {
// An exception occurred during reopen, we have to decRef the norms
// that we incRef'ed already and close singleNormsStream and FieldsReader
clone.decRef();
}
}
return clone;
}
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
if (hasChanges) {
if (deletedDocsDirty) { // re-write deleted
si.advanceDelGen();
// We can write directly to the actual name (vs to a
// .tmp & renaming it) because the file is not live
// until segments file is written:
deletedDocs.write(directory(), si.getDelFileName());
si.setDelCount(si.getDelCount()+pendingDeleteCount);
pendingDeleteCount = 0;
assert deletedDocs.count() == si.getDelCount(): "delete count mismatch during commit: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();
} else {
assert pendingDeleteCount == 0;
}
if (normsDirty) { // re-write norms
si.setNumFields(core.fieldInfos.size());
for (final Norm norm : norms.values()) {
if (norm.dirty) {
norm.reWrite(si);
}
}
}
deletedDocsDirty = false;
normsDirty = false;
hasChanges = false;
}
}
FieldsReader getFieldsReader() {
return fieldsReaderLocal.get();
}
@Override
protected void doClose() throws IOException {
termVectorsLocal.close();
fieldsReaderLocal.close();
if (deletedDocs != null) {
deletedDocsRef.decRef();
// null so if an app hangs on to us we still free most ram
deletedDocs = null;
}
for (final Norm norm : norms.values()) {
norm.decRef();
}
if (core != null) {
core.decRef();
}
}
static boolean hasDeletions(SegmentInfo si) throws IOException {
// Don't call ensureOpen() here (it could affect performance)
return si.hasDeletions();
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return deletedDocs != null;
}
static boolean usesCompoundFile(SegmentInfo si) throws IOException {
return si.getUseCompoundFile();
}
static boolean hasSeparateNorms(SegmentInfo si) throws IOException {
return si.hasSeparateNorms();
}
@Override
protected void doDelete(int docNum) {
if (deletedDocs == null) {
deletedDocs = new BitVector(maxDoc());
deletedDocsRef = new Ref();
}
// there is more than 1 SegmentReader with a reference to this
// deletedDocs BitVector so decRef the current deletedDocsRef,
// clone the BitVector, create a new deletedDocsRef
if (deletedDocsRef.refCount() > 1) {
Ref oldRef = deletedDocsRef;
deletedDocs = cloneDeletedDocs(deletedDocs);
deletedDocsRef = new Ref();
oldRef.decRef();
}
deletedDocsDirty = true;
if (!deletedDocs.getAndSet(docNum))
pendingDeleteCount++;
}
@Override
protected void doUndeleteAll() {
deletedDocsDirty = false;
if (deletedDocs != null) {
assert deletedDocsRef != null;
deletedDocsRef.decRef();
deletedDocs = null;
deletedDocsRef = null;
pendingDeleteCount = 0;
si.clearDelGen();
si.setDelCount(0);
} else {
assert deletedDocsRef == null;
assert pendingDeleteCount == 0;
}
}
List<String> files() throws IOException {
return new ArrayList<String>(si.files());
}
@Override
public TermEnum terms() {
ensureOpen();
return core.getTermsReader().terms();
}
@Override
public TermEnum terms(Term t) throws IOException {
ensureOpen();
return core.getTermsReader().terms(t);
}
FieldInfos fieldInfos() {
return core.fieldInfos;
}
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
return getFieldsReader().doc(n, fieldSelector);
}
@Override
public synchronized boolean isDeleted(int n) {
return (deletedDocs != null && deletedDocs.get(n));
}
@Override
public TermDocs termDocs(Term term) throws IOException {
if (term == null) {
return new AllTermDocs(this);
} else {
return super.termDocs(term);
}
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new SegmentTermDocs(this);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new SegmentTermPositions(this);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
TermInfo ti = core.getTermsReader().get(t);
if (ti != null)
return ti.docFreq;
else
return 0;
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
int n = maxDoc();
if (deletedDocs != null)
n -= deletedDocs.count();
return n;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return si.docCount;
}
/**
* @see IndexReader#getFieldNames(org.apache.lucene.index.IndexReader.FieldOption)
*/
@Override
public Collection<String> getFieldNames(IndexReader.FieldOption fieldOption) {
ensureOpen();
Set<String> fieldSet = new HashSet<String>();
for (int i = 0; i < core.fieldInfos.size(); i++) {
FieldInfo fi = core.fieldInfos.fieldInfo(i);
if (fieldOption == IndexReader.FieldOption.ALL) {
fieldSet.add(fi.name);
}
else if (!fi.isIndexed && fieldOption == IndexReader.FieldOption.UNINDEXED) {
fieldSet.add(fi.name);
}
else if (fi.omitTermFreqAndPositions && fieldOption == IndexReader.FieldOption.OMIT_TERM_FREQ_AND_POSITIONS) {
fieldSet.add(fi.name);
}
else if (fi.storePayloads && fieldOption == IndexReader.FieldOption.STORES_PAYLOADS) {
fieldSet.add(fi.name);
}
else if (fi.isIndexed && fieldOption == IndexReader.FieldOption.INDEXED) {
fieldSet.add(fi.name);
}
else if (fi.isIndexed && fi.storeTermVector == false && fieldOption == IndexReader.FieldOption.INDEXED_NO_TERMVECTOR) {
fieldSet.add(fi.name);
}
else if (fi.storeTermVector == true &&
fi.storePositionWithTermVector == false &&
fi.storeOffsetWithTermVector == false &&
fieldOption == IndexReader.FieldOption.TERMVECTOR) {
fieldSet.add(fi.name);
}
else if (fi.isIndexed && fi.storeTermVector && fieldOption == IndexReader.FieldOption.INDEXED_WITH_TERMVECTOR) {
fieldSet.add(fi.name);
}
else if (fi.storePositionWithTermVector && fi.storeOffsetWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION) {
fieldSet.add(fi.name);
}
else if (fi.storeOffsetWithTermVector && fi.storePositionWithTermVector == false && fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET) {
fieldSet.add(fi.name);
}
else if ((fi.storeOffsetWithTermVector && fi.storePositionWithTermVector) &&
fieldOption == IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET) {
fieldSet.add(fi.name);
}
}
return fieldSet;
}
@Override
public synchronized boolean hasNorms(String field) {
ensureOpen();
return norms.containsKey(field);
}
// can return null if norms aren't stored
protected synchronized byte[] getNorms(String field) throws IOException {
Norm norm = norms.get(field);
if (norm == null) return null; // not indexed, or norms not stored
return norm.bytes();
}
// returns fake norms if norms aren't available
@Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = getNorms(field);
return bytes;
}
@Override
protected void doSetNorm(int doc, String field, byte value)
throws IOException {
Norm norm = norms.get(field);
if (norm == null) // not an indexed field
return;
normsDirty = true;
norm.copyOnWrite()[doc] = value; // set the value
}
/** Read norms into a pre-allocated array. */
@Override
public synchronized void norms(String field, byte[] bytes, int offset)
throws IOException {
ensureOpen();
Norm norm = norms.get(field);
if (norm == null) {
Arrays.fill(bytes, offset, bytes.length, DefaultSimilarity.encodeNorm(1.0f));
return;
}
norm.bytes(bytes, offset, maxDoc());
}
private void openNorms(Directory cfsDir, int readBufferSize) throws IOException {
long nextNormSeek = SegmentMerger.NORMS_HEADER.length; //skip header (header unused for now)
int maxDoc = maxDoc();
for (int i = 0; i < core.fieldInfos.size(); i++) {
FieldInfo fi = core.fieldInfos.fieldInfo(i);
if (norms.containsKey(fi.name)) {
// in case this SegmentReader is being re-opened, we might be able to
// reuse some norm instances and skip loading them here
continue;
}
if (fi.isIndexed && !fi.omitNorms) {
Directory d = directory();
String fileName = si.getNormFileName(fi.number);
if (!si.hasSeparateNorms(fi.number)) {
d = cfsDir;
}
// singleNormFile means multiple norms share this file
boolean singleNormFile = fileName.endsWith("." + IndexFileNames.NORMS_EXTENSION);
IndexInput normInput = null;
long normSeek;
if (singleNormFile) {
normSeek = nextNormSeek;
if (singleNormStream == null) {
singleNormStream = d.openInput(fileName, readBufferSize);
singleNormRef = new Ref();
} else {
singleNormRef.incRef();
}
// All norms in the .nrm file can share a single IndexInput since
// they are only used in a synchronized context.
// If this were to change in the future, a clone could be done here.
normInput = singleNormStream;
} else {
normSeek = 0;
normInput = d.openInput(fileName);
}
norms.put(fi.name, new Norm(normInput, fi.number, normSeek));
nextNormSeek += maxDoc; // increment also if some norms are separate
}
}
}
boolean termsIndexLoaded() {
return core.termsIndexIsLoaded();
}
// NOTE: only called from IndexWriter when a near
// real-time reader is opened, or applyDeletes is run,
// sharing a segment that's still being merged. This
// method is not thread safe, and relies on the
// synchronization in IndexWriter
void loadTermsIndex(int termsIndexDivisor) throws IOException {
core.loadTermsIndex(si, termsIndexDivisor);
}
// for testing only
boolean normsClosed() {
if (singleNormStream != null) {
return false;
}
for (final Norm norm : norms.values()) {
if (norm.refCount > 0) {
return false;
}
}
return true;
}
// for testing only
boolean normsClosed(String field) {
return norms.get(field).refCount == 0;
}
/**
* Create a clone from the initial TermVectorsReader and store it in the ThreadLocal.
* @return TermVectorsReader
*/
TermVectorsReader getTermVectorsReader() {
TermVectorsReader tvReader = termVectorsLocal.get();
if (tvReader == null) {
TermVectorsReader orig = core.getTermVectorsReaderOrig();
if (orig == null) {
return null;
} else {
try {
tvReader = (TermVectorsReader) orig.clone();
} catch (CloneNotSupportedException cnse) {
return null;
}
}
termVectorsLocal.set(tvReader);
}
return tvReader;
}
TermVectorsReader getTermVectorsReaderOrig() {
return core.getTermVectorsReaderOrig();
}
/** Return a term frequency vector for the specified document and field. The
* vector returned contains term numbers and frequencies for all terms in
* the specified field of this document, if the field had storeTermVector
* flag set. If the flag was not set, the method returns null.
* @throws IOException
*/
@Override
public TermFreqVector getTermFreqVector(int docNumber, String field) throws IOException {
// Check if this field is invalid or has no stored term vector
ensureOpen();
FieldInfo fi = core.fieldInfos.fieldInfo(field);
if (fi == null || !fi.storeTermVector)
return null;
TermVectorsReader termVectorsReader = getTermVectorsReader();
if (termVectorsReader == null)
return null;
return termVectorsReader.get(docNumber, field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
FieldInfo fi = core.fieldInfos.fieldInfo(field);
if (fi == null || !fi.storeTermVector)
return;
TermVectorsReader termVectorsReader = getTermVectorsReader();
if (termVectorsReader == null) {
return;
}
termVectorsReader.get(docNumber, field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
TermVectorsReader termVectorsReader = getTermVectorsReader();
if (termVectorsReader == null)
return;
termVectorsReader.get(docNumber, mapper);
}
/** Return an array of term frequency vectors for the specified document.
* The array contains a vector for each vectorized field in the document.
* Each vector vector contains term numbers and frequencies for all terms
* in a given vectorized field.
* If no such fields existed, the method returns null.
* @throws IOException
*/
@Override
public TermFreqVector[] getTermFreqVectors(int docNumber) throws IOException {
ensureOpen();
TermVectorsReader termVectorsReader = getTermVectorsReader();
if (termVectorsReader == null)
return null;
return termVectorsReader.get(docNumber);
}
/**
* Return the name of the segment this reader is reading.
*/
public String getSegmentName() {
return core.segment;
}
/**
* Return the SegmentInfo of the segment this reader is reading.
*/
SegmentInfo getSegmentInfo() {
return si;
}
void setSegmentInfo(SegmentInfo info) {
si = info;
}
void startCommit() {
rollbackHasChanges = hasChanges;
rollbackDeletedDocsDirty = deletedDocsDirty;
rollbackNormsDirty = normsDirty;
rollbackPendingDeleteCount = pendingDeleteCount;
for (Norm norm : norms.values()) {
norm.rollbackDirty = norm.dirty;
}
}
void rollbackCommit() {
hasChanges = rollbackHasChanges;
deletedDocsDirty = rollbackDeletedDocsDirty;
normsDirty = rollbackNormsDirty;
pendingDeleteCount = rollbackPendingDeleteCount;
for (Norm norm : norms.values()) {
norm.dirty = norm.rollbackDirty;
}
}
/** Returns the directory this index resides in. */
@Override
public Directory directory() {
// Don't ensureOpen here -- in certain cases, when a
// cloned/reopened reader needs to commit, it may call
// this method on the closed original reader
return core.dir;
}
// This is necessary so that cloned SegmentReaders (which
// share the underlying postings data) will map to the
// same entry in the FieldCache. See LUCENE-1579.
@Override
public final Object getFieldCacheKey() {
return core.freqStream;
}
@Override
public Object getDeletesCacheKey() {
return deletedDocs;
}
@Override
public long getUniqueTermCount() {
return core.getTermsReader().size();
}
/**
* Lotsa tests did hacks like:<br/>
* SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
* They broke. This method serves as a hack to keep hacks working
* We do it with R/W access for the tests (BW compatibility)
* @deprecated Remove this when tests are fixed!
*/
static SegmentReader getOnlySegmentReader(Directory dir) throws IOException {
return getOnlySegmentReader(IndexReader.open(dir,false));
}
static SegmentReader getOnlySegmentReader(IndexReader reader) {
if (reader instanceof SegmentReader)
return (SegmentReader) reader;
if (reader instanceof DirectoryReader) {
IndexReader[] subReaders = reader.getSequentialSubReaders();
if (subReaders.length != 1)
throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one");
return (SegmentReader) subReaders[0];
}
throw new IllegalArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
}
@Override
public int getTermInfosIndexDivisor() {
return core.termsIndexDivisor;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentReader.java | Java | art | 41,323 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
/**
* This exception is thrown when Lucene detects
* an inconsistency in the index.
*/
public class CorruptIndexException extends IOException {
public CorruptIndexException(String message) {
super(message);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/CorruptIndexException.java | Java | art | 1,089 |
package org.apache.lucene.index;
/**
* Copyright 2007 The Apache Software Foundation
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Convenience class for holding TermVector information.
*/
public class TermVectorEntry {
private String field;
private String term;
private int frequency;
private TermVectorOffsetInfo [] offsets;
int [] positions;
public TermVectorEntry() {
}
public TermVectorEntry(String field, String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
this.field = field;
this.term = term;
this.frequency = frequency;
this.offsets = offsets;
this.positions = positions;
}
public String getField() {
return field;
}
public int getFrequency() {
return frequency;
}
public TermVectorOffsetInfo[] getOffsets() {
return offsets;
}
public int[] getPositions() {
return positions;
}
public String getTerm() {
return term;
}
//Keep package local
void setFrequency(int frequency) {
this.frequency = frequency;
}
void setOffsets(TermVectorOffsetInfo[] offsets) {
this.offsets = offsets;
}
void setPositions(int[] positions) {
this.positions = positions;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
TermVectorEntry that = (TermVectorEntry) o;
if (term != null ? !term.equals(that.term) : that.term != null) return false;
return true;
}
@Override
public int hashCode() {
return (term != null ? term.hashCode() : 0);
}
@Override
public String toString() {
return "TermVectorEntry{" +
"field='" + field + '\'' +
", term='" + term + '\'' +
", frequency=" + frequency +
'}';
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorEntry.java | Java | art | 2,344 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.cache.Cache;
import org.apache.lucene.util.cache.SimpleLRUCache;
import org.apache.lucene.util.CloseableThreadLocal;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
* Directory. Pairs are accessed either by Term or by ordinal position the
* set. */
final class TermInfosReader {
private final Directory directory;
private final String segment;
private final FieldInfos fieldInfos;
private final CloseableThreadLocal<ThreadResources> threadResources = new CloseableThreadLocal<ThreadResources>();
private final SegmentTermEnum origEnum;
private final long size;
private final Term[] indexTerms;
private final TermInfo[] indexInfos;
private final long[] indexPointers;
private final int totalIndexInterval;
private final static int DEFAULT_CACHE_SIZE = 1024;
/**
* Per-thread resources managed by ThreadLocal
*/
private static final class ThreadResources {
SegmentTermEnum termEnum;
// Used for caching the least recently looked-up Terms
Cache<Term,TermInfo> termInfoCache;
}
TermInfosReader(Directory dir, String seg, FieldInfos fis, int readBufferSize, int indexDivisor)
throws CorruptIndexException, IOException {
boolean success = false;
if (indexDivisor < 1 && indexDivisor != -1) {
throw new IllegalArgumentException("indexDivisor must be -1 (don't load terms index) or greater than 0: got " + indexDivisor);
}
try {
directory = dir;
segment = seg;
fieldInfos = fis;
origEnum = new SegmentTermEnum(directory.openInput(segment + "." + IndexFileNames.TERMS_EXTENSION,
readBufferSize), fieldInfos, false);
size = origEnum.size;
if (indexDivisor != -1) {
// Load terms index
totalIndexInterval = origEnum.indexInterval * indexDivisor;
final SegmentTermEnum indexEnum = new SegmentTermEnum(directory.openInput(segment + "." + IndexFileNames.TERMS_INDEX_EXTENSION,
readBufferSize), fieldInfos, true);
try {
int indexSize = 1+((int)indexEnum.size-1)/indexDivisor; // otherwise read index
indexTerms = new Term[indexSize];
indexInfos = new TermInfo[indexSize];
indexPointers = new long[indexSize];
for (int i = 0; indexEnum.next(); i++) {
indexTerms[i] = indexEnum.term();
indexInfos[i] = indexEnum.termInfo();
indexPointers[i] = indexEnum.indexPointer;
for (int j = 1; j < indexDivisor; j++)
if (!indexEnum.next())
break;
}
} finally {
indexEnum.close();
}
} else {
// Do not load terms index:
totalIndexInterval = -1;
indexTerms = null;
indexInfos = null;
indexPointers = null;
}
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
public int getSkipInterval() {
return origEnum.skipInterval;
}
public int getMaxSkipLevels() {
return origEnum.maxSkipLevels;
}
final void close() throws IOException {
if (origEnum != null)
origEnum.close();
threadResources.close();
}
/** Returns the number of term/value pairs in the set. */
final long size() {
return size;
}
private ThreadResources getThreadResources() {
ThreadResources resources = threadResources.get();
if (resources == null) {
resources = new ThreadResources();
resources.termEnum = terms();
// Cache does not have to be thread-safe, it is only used by one thread at the same time
resources.termInfoCache = new SimpleLRUCache<Term,TermInfo>(DEFAULT_CACHE_SIZE);
threadResources.set(resources);
}
return resources;
}
/** Returns the offset of the greatest index entry which is less than or equal to term.*/
private final int getIndexOffset(Term term) {
int lo = 0; // binary search indexTerms[]
int hi = indexTerms.length - 1;
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int delta = term.compareTo(indexTerms[mid]);
if (delta < 0)
hi = mid - 1;
else if (delta > 0)
lo = mid + 1;
else
return mid;
}
return hi;
}
private final void seekEnum(SegmentTermEnum enumerator, int indexOffset) throws IOException {
enumerator.seek(indexPointers[indexOffset],
((long) indexOffset * totalIndexInterval) - 1,
indexTerms[indexOffset], indexInfos[indexOffset]);
}
/** Returns the TermInfo for a Term in the set, or null. */
TermInfo get(Term term) throws IOException {
return get(term, true);
}
/** Returns the TermInfo for a Term in the set, or null. */
private TermInfo get(Term term, boolean useCache) throws IOException {
if (size == 0) return null;
ensureIndexIsRead();
TermInfo ti;
ThreadResources resources = getThreadResources();
Cache<Term,TermInfo> cache = null;
if (useCache) {
cache = resources.termInfoCache;
// check the cache first if the term was recently looked up
ti = cache.get(term);
if (ti != null) {
return ti;
}
}
// optimize sequential access: first try scanning cached enum w/o seeking
SegmentTermEnum enumerator = resources.termEnum;
if (enumerator.term() != null // term is at or past current
&& ((enumerator.prev() != null && term.compareTo(enumerator.prev())> 0)
|| term.compareTo(enumerator.term()) >= 0)) {
int enumOffset = (int)(enumerator.position/totalIndexInterval)+1;
if (indexTerms.length == enumOffset // but before end of block
|| term.compareTo(indexTerms[enumOffset]) < 0) {
// no need to seek
int numScans = enumerator.scanTo(term);
if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (cache != null && numScans > 1) {
// we only want to put this TermInfo into the cache if
// scanEnum skipped more than one dictionary entry.
// This prevents RangeQueries or WildcardQueries to
// wipe out the cache when they iterate over a large numbers
// of terms in order
cache.put(term, ti);
}
} else {
ti = null;
}
return ti;
}
}
// random-access: must seek
seekEnum(enumerator, getIndexOffset(term));
enumerator.scanTo(term);
if (enumerator.term() != null && term.compareTo(enumerator.term()) == 0) {
ti = enumerator.termInfo();
if (cache != null) {
cache.put(term, ti);
}
} else {
ti = null;
}
return ti;
}
private void ensureIndexIsRead() {
if (indexTerms == null) {
throw new IllegalStateException("terms index was not loaded when this reader was created");
}
}
/** Returns the position of a Term in the set or -1. */
final long getPosition(Term term) throws IOException {
if (size == 0) return -1;
ensureIndexIsRead();
int indexOffset = getIndexOffset(term);
SegmentTermEnum enumerator = getThreadResources().termEnum;
seekEnum(enumerator, indexOffset);
while(term.compareTo(enumerator.term()) > 0 && enumerator.next()) {}
if (term.compareTo(enumerator.term()) == 0)
return enumerator.position;
else
return -1;
}
/** Returns an enumeration of all the Terms and TermInfos in the set. */
public SegmentTermEnum terms() {
return (SegmentTermEnum)origEnum.clone();
}
/** Returns an enumeration of terms starting at or after the named term. */
public SegmentTermEnum terms(Term term) throws IOException {
// don't use the cache in this call because we want to reposition the
// enumeration
get(term, false);
return (SegmentTermEnum)getThreadResources().termEnum.clone();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermInfosReader.java | Java | art | 9,200 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Collection;
import java.util.Map;
import java.io.IOException;
abstract class InvertedDocConsumer {
/** Add a new thread */
abstract InvertedDocConsumerPerThread addThread(DocInverterPerThread docInverterPerThread);
/** Abort (called after hitting AbortException) */
abstract void abort();
/** Flush a new segment */
abstract void flush(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException;
/** Close doc stores */
abstract void closeDocStore(SegmentWriteState state) throws IOException;
/** Attempt to free RAM, returning true if any RAM was
* freed */
abstract boolean freeRAM();
FieldInfos fieldInfos;
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/InvertedDocConsumer.java | Java | art | 1,662 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
// TODO FI: some of this is "generic" to TermsHash* so we
// should factor it out so other consumers don't have to
// duplicate this code
/** Used by DocumentsWriter to merge the postings from
* multiple ThreadStates when creating a segment */
final class FreqProxFieldMergeState {
final FreqProxTermsWriterPerField field;
final int numPostings;
final CharBlockPool charPool;
final RawPostingList[] postings;
private FreqProxTermsWriter.PostingList p;
char[] text;
int textOffset;
private int postingUpto = -1;
final ByteSliceReader freq = new ByteSliceReader();
final ByteSliceReader prox = new ByteSliceReader();
int docID;
int termFreq;
public FreqProxFieldMergeState(FreqProxTermsWriterPerField field) {
this.field = field;
this.charPool = field.perThread.termsHashPerThread.charPool;
this.numPostings = field.termsHashPerField.numPostings;
this.postings = field.termsHashPerField.sortPostings();
}
boolean nextTerm() throws IOException {
postingUpto++;
if (postingUpto == numPostings)
return false;
p = (FreqProxTermsWriter.PostingList) postings[postingUpto];
docID = 0;
text = charPool.buffers[p.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
textOffset = p.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
field.termsHashPerField.initReader(freq, p, 0);
if (!field.fieldInfo.omitTermFreqAndPositions)
field.termsHashPerField.initReader(prox, p, 1);
// Should always be true
boolean result = nextDoc();
assert result;
return true;
}
public boolean nextDoc() throws IOException {
if (freq.eof()) {
if (p.lastDocCode != -1) {
// Return last doc
docID = p.lastDocID;
if (!field.omitTermFreqAndPositions)
termFreq = p.docFreq;
p.lastDocCode = -1;
return true;
} else
// EOF
return false;
}
final int code = freq.readVInt();
if (field.omitTermFreqAndPositions)
docID += code;
else {
docID += code >>> 1;
if ((code & 1) != 0)
termFreq = 1;
else
termFreq = freq.readVInt();
}
assert docID != p.lastDocID;
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FreqProxFieldMergeState.java | Java | art | 3,066 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
import org.apache.lucene.search.Query;
/** Holds buffered deletes, by docID, term or query. We
* hold two instances of this class: one for the deletes
* prior to the last flush, the other for deletes after
* the last flush. This is so if we need to abort
* (discard all buffered docs) we can also discard the
* buffered deletes yet keep the deletes done during
* previously flushed segments. */
class BufferedDeletes {
int numTerms;
Map<Term,Num> terms;
Map<Query,Integer> queries = new HashMap<Query,Integer>();
List<Integer> docIDs = new ArrayList<Integer>();
long bytesUsed;
private final boolean doTermSort;
public BufferedDeletes(boolean doTermSort) {
this.doTermSort = doTermSort;
if (doTermSort) {
terms = new TreeMap<Term,Num>();
} else {
terms = new HashMap<Term,Num>();
}
}
// Number of documents a delete term applies to.
final static class Num {
private int num;
Num(int num) {
this.num = num;
}
int getNum() {
return num;
}
void setNum(int num) {
// Only record the new number if it's greater than the
// current one. This is important because if multiple
// threads are replacing the same doc at nearly the
// same time, it's possible that one thread that got a
// higher docID is scheduled before the other
// threads.
if (num > this.num)
this.num = num;
}
}
int size() {
// We use numTerms not terms.size() intentionally, so
// that deletes by the same term multiple times "count",
// ie if you ask to flush every 1000 deletes then even
// dup'd terms are counted towards that 1000
return numTerms + queries.size() + docIDs.size();
}
void update(BufferedDeletes in) {
numTerms += in.numTerms;
bytesUsed += in.bytesUsed;
terms.putAll(in.terms);
queries.putAll(in.queries);
docIDs.addAll(in.docIDs);
in.clear();
}
void clear() {
terms.clear();
queries.clear();
docIDs.clear();
numTerms = 0;
bytesUsed = 0;
}
void addBytesUsed(long b) {
bytesUsed += b;
}
boolean any() {
return terms.size() > 0 || docIDs.size() > 0 || queries.size() > 0;
}
// Remaps all buffered deletes based on a completed
// merge
synchronized void remap(MergeDocIDRemapper mapper,
SegmentInfos infos,
int[][] docMaps,
int[] delCounts,
MergePolicy.OneMerge merge,
int mergeDocCount) {
final Map<Term,Num> newDeleteTerms;
// Remap delete-by-term
if (terms.size() > 0) {
if (doTermSort) {
newDeleteTerms = new TreeMap<Term,Num>();
} else {
newDeleteTerms = new HashMap<Term,Num>();
}
for(Entry<Term,Num> entry : terms.entrySet()) {
Num num = entry.getValue();
newDeleteTerms.put(entry.getKey(),
new Num(mapper.remap(num.getNum())));
}
} else
newDeleteTerms = null;
// Remap delete-by-docID
final List<Integer> newDeleteDocIDs;
if (docIDs.size() > 0) {
newDeleteDocIDs = new ArrayList<Integer>(docIDs.size());
for (Integer num : docIDs) {
newDeleteDocIDs.add(Integer.valueOf(mapper.remap(num.intValue())));
}
} else
newDeleteDocIDs = null;
// Remap delete-by-query
final HashMap<Query,Integer> newDeleteQueries;
if (queries.size() > 0) {
newDeleteQueries = new HashMap<Query, Integer>(queries.size());
for(Entry<Query,Integer> entry: queries.entrySet()) {
Integer num = entry.getValue();
newDeleteQueries.put(entry.getKey(),
Integer.valueOf(mapper.remap(num.intValue())));
}
} else
newDeleteQueries = null;
if (newDeleteTerms != null)
terms = newDeleteTerms;
if (newDeleteDocIDs != null)
docIDs = newDeleteDocIDs;
if (newDeleteQueries != null)
queries = newDeleteQueries;
}
} | zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/BufferedDeletes.java | Java | art | 5,046 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
final class NormsWriterPerThread extends InvertedDocEndConsumerPerThread {
final NormsWriter normsWriter;
final DocumentsWriter.DocState docState;
public NormsWriterPerThread(DocInverterPerThread docInverterPerThread, NormsWriter normsWriter) {
this.normsWriter = normsWriter;
docState = docInverterPerThread.docState;
}
@Override
InvertedDocEndConsumerPerField addField(DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
return new NormsWriterPerField(docInverterPerField, this, fieldInfo);
}
@Override
void abort() {}
@Override
void startDocument() {}
@Override
void finishDocument() {}
boolean freeRAM() {
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/NormsWriterPerThread.java | Java | art | 1,535 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
final class FreqProxTermsWriterPerThread extends TermsHashConsumerPerThread {
final TermsHashPerThread termsHashPerThread;
final DocumentsWriter.DocState docState;
public FreqProxTermsWriterPerThread(TermsHashPerThread perThread) {
docState = perThread.docState;
termsHashPerThread = perThread;
}
@Override
public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo) {
return new FreqProxTermsWriterPerField(termsHashPerField, this, fieldInfo);
}
@Override
void startDocument() {
}
@Override
DocumentsWriter.DocWriter finishDocument() {
return null;
}
@Override
public void abort() {}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerThread.java | Java | art | 1,518 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** This is the base class for an in-memory posting list,
* keyed by a Token. {@link TermsHash} maintains a hash
* table holding one instance of this per unique Token.
* Consumers of TermsHash ({@link TermsHashConsumer}) must
* subclass this class with its own concrete class.
* FreqProxTermsWriter.PostingList is a private inner class used
* for the freq/prox postings, and
* TermVectorsTermsWriter.PostingList is a private inner class
* used to hold TermVectors postings. */
abstract class RawPostingList {
final static int BYTES_SIZE = DocumentsWriter.OBJECT_HEADER_BYTES + 3*DocumentsWriter.INT_NUM_BYTE;
int textStart;
int intStart;
int byteStart;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/RawPostingList.java | Java | art | 1,522 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
abstract class DocFieldConsumerPerThread {
abstract void startDocument() throws IOException;
abstract DocumentsWriter.DocWriter finishDocument() throws IOException;
abstract DocFieldConsumerPerField addField(FieldInfo fi);
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldConsumerPerThread.java | Java | art | 1,123 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Collection;
import java.util.Map;
import java.util.HashMap;
import java.util.Iterator;
import java.util.HashSet;
import java.util.Arrays;
import java.io.IOException;
import org.apache.lucene.util.ArrayUtil;
/** This class implements {@link InvertedDocConsumer}, which
* is passed each token produced by the analyzer on each
* field. It stores these tokens in a hash table, and
* allocates separate byte streams per token. Consumers of
* this class, eg {@link FreqProxTermsWriter} and {@link
* TermVectorsTermsWriter}, write their own byte streams
* under each term.
*/
final class TermsHash extends InvertedDocConsumer {
final TermsHashConsumer consumer;
final TermsHash nextTermsHash;
final int bytesPerPosting;
final int postingsFreeChunk;
final DocumentsWriter docWriter;
private RawPostingList[] postingsFreeList = new RawPostingList[1];
private int postingsFreeCount;
private int postingsAllocCount;
boolean trackAllocations;
public TermsHash(final DocumentsWriter docWriter, boolean trackAllocations, final TermsHashConsumer consumer, final TermsHash nextTermsHash) {
this.docWriter = docWriter;
this.consumer = consumer;
this.nextTermsHash = nextTermsHash;
this.trackAllocations = trackAllocations;
// Why + 4*POINTER_NUM_BYTE below?
// +1: Posting is referenced by postingsFreeList array
// +3: Posting is referenced by hash, which
// targets 25-50% fill factor; approximate this
// as 3X # pointers
bytesPerPosting = consumer.bytesPerPosting() + 4*DocumentsWriter.POINTER_NUM_BYTE;
postingsFreeChunk = (int) (DocumentsWriter.BYTE_BLOCK_SIZE / bytesPerPosting);
}
@Override
InvertedDocConsumerPerThread addThread(DocInverterPerThread docInverterPerThread) {
return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, null);
}
TermsHashPerThread addThread(DocInverterPerThread docInverterPerThread, TermsHashPerThread primaryPerThread) {
return new TermsHashPerThread(docInverterPerThread, this, nextTermsHash, primaryPerThread);
}
@Override
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
consumer.setFieldInfos(fieldInfos);
}
@Override
synchronized public void abort() {
consumer.abort();
if (nextTermsHash != null)
nextTermsHash.abort();
}
void shrinkFreePostings(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, SegmentWriteState state) {
assert postingsFreeCount == postingsAllocCount: Thread.currentThread().getName() + ": postingsFreeCount=" + postingsFreeCount + " postingsAllocCount=" + postingsAllocCount + " consumer=" + consumer;
final int newSize = 1;
if (newSize != postingsFreeList.length) {
if (postingsFreeCount > newSize) {
if (trackAllocations) {
docWriter.bytesAllocated(-(postingsFreeCount-newSize) * bytesPerPosting);
}
postingsFreeCount = newSize;
postingsAllocCount = newSize;
}
RawPostingList[] newArray = new RawPostingList[newSize];
System.arraycopy(postingsFreeList, 0, newArray, 0, postingsFreeCount);
postingsFreeList = newArray;
}
}
@Override
synchronized void closeDocStore(SegmentWriteState state) throws IOException {
consumer.closeDocStore(state);
if (nextTermsHash != null)
nextTermsHash.closeDocStore(state);
}
@Override
synchronized void flush(Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> childThreadsAndFields = new HashMap<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>>();
Map<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> nextThreadsAndFields;
if (nextTermsHash != null)
nextThreadsAndFields = new HashMap<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>>();
else
nextThreadsAndFields = null;
for (final Map.Entry<InvertedDocConsumerPerThread,Collection<InvertedDocConsumerPerField>> entry : threadsAndFields.entrySet()) {
TermsHashPerThread perThread = (TermsHashPerThread) entry.getKey();
Collection<InvertedDocConsumerPerField> fields = entry.getValue();
Iterator<InvertedDocConsumerPerField> fieldsIt = fields.iterator();
Collection<TermsHashConsumerPerField> childFields = new HashSet<TermsHashConsumerPerField>();
Collection<InvertedDocConsumerPerField> nextChildFields;
if (nextTermsHash != null)
nextChildFields = new HashSet<InvertedDocConsumerPerField>();
else
nextChildFields = null;
while(fieldsIt.hasNext()) {
TermsHashPerField perField = (TermsHashPerField) fieldsIt.next();
childFields.add(perField.consumer);
if (nextTermsHash != null)
nextChildFields.add(perField.nextPerField);
}
childThreadsAndFields.put(perThread.consumer, childFields);
if (nextTermsHash != null)
nextThreadsAndFields.put(perThread.nextPerThread, nextChildFields);
}
consumer.flush(childThreadsAndFields, state);
shrinkFreePostings(threadsAndFields, state);
if (nextTermsHash != null)
nextTermsHash.flush(nextThreadsAndFields, state);
}
@Override
synchronized public boolean freeRAM() {
if (!trackAllocations)
return false;
boolean any;
final int numToFree;
if (postingsFreeCount >= postingsFreeChunk)
numToFree = postingsFreeChunk;
else
numToFree = postingsFreeCount;
any = numToFree > 0;
if (any) {
Arrays.fill(postingsFreeList, postingsFreeCount-numToFree, postingsFreeCount, null);
postingsFreeCount -= numToFree;
postingsAllocCount -= numToFree;
docWriter.bytesAllocated(-numToFree * bytesPerPosting);
any = true;
}
if (nextTermsHash != null)
any |= nextTermsHash.freeRAM();
return any;
}
synchronized public void recyclePostings(final RawPostingList[] postings, final int numPostings) {
assert postings.length >= numPostings;
// Move all Postings from this ThreadState back to our
// free list. We pre-allocated this array while we were
// creating Postings to make sure it's large enough
assert postingsFreeCount + numPostings <= postingsFreeList.length;
System.arraycopy(postings, 0, postingsFreeList, postingsFreeCount, numPostings);
postingsFreeCount += numPostings;
}
synchronized public void getPostings(final RawPostingList[] postings) {
assert docWriter.writer.testPoint("TermsHash.getPostings start");
assert postingsFreeCount <= postingsFreeList.length;
assert postingsFreeCount <= postingsAllocCount: "postingsFreeCount=" + postingsFreeCount + " postingsAllocCount=" + postingsAllocCount;
final int numToCopy;
if (postingsFreeCount < postings.length)
numToCopy = postingsFreeCount;
else
numToCopy = postings.length;
final int start = postingsFreeCount-numToCopy;
assert start >= 0;
assert start + numToCopy <= postingsFreeList.length;
assert numToCopy <= postings.length;
System.arraycopy(postingsFreeList, start,
postings, 0, numToCopy);
// Directly allocate the remainder if any
if (numToCopy != postings.length) {
final int extra = postings.length - numToCopy;
final int newPostingsAllocCount = postingsAllocCount + extra;
consumer.createPostings(postings, numToCopy, extra);
assert docWriter.writer.testPoint("TermsHash.getPostings after create");
postingsAllocCount += extra;
if (trackAllocations)
docWriter.bytesAllocated(extra * bytesPerPosting);
if (newPostingsAllocCount > postingsFreeList.length)
// Pre-allocate the postingsFreeList so it's large
// enough to hold all postings we've given out
postingsFreeList = new RawPostingList[ArrayUtil.getNextSize(newPostingsAllocCount)];
}
postingsFreeCount -= numToCopy;
if (trackAllocations)
docWriter.bytesUsed(postings.length * bytesPerPosting);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermsHash.java | Java | art | 9,042 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import java.io.IOException;
import java.util.Arrays;
class TermVectorsReader implements Cloneable {
// NOTE: if you make a new format, it must be larger than
// the current format
static final int FORMAT_VERSION = 2;
// Changes to speed up bulk merging of term vectors:
static final int FORMAT_VERSION2 = 3;
// Changed strings to UTF8 with length-in-bytes not length-in-chars
static final int FORMAT_UTF8_LENGTH_IN_BYTES = 4;
// NOTE: always change this if you switch to a new format!
static final int FORMAT_CURRENT = FORMAT_UTF8_LENGTH_IN_BYTES;
//The size in bytes that the FORMAT_VERSION will take up at the beginning of each file
static final int FORMAT_SIZE = 4;
static final byte STORE_POSITIONS_WITH_TERMVECTOR = 0x1;
static final byte STORE_OFFSET_WITH_TERMVECTOR = 0x2;
private FieldInfos fieldInfos;
private IndexInput tvx;
private IndexInput tvd;
private IndexInput tvf;
private int size;
private int numTotalDocs;
// The docID offset where our docs begin in the index
// file. This will be 0 if we have our own private file.
private int docStoreOffset;
private final int format;
TermVectorsReader(Directory d, String segment, FieldInfos fieldInfos)
throws CorruptIndexException, IOException {
this(d, segment, fieldInfos, BufferedIndexInput.BUFFER_SIZE);
}
TermVectorsReader(Directory d, String segment, FieldInfos fieldInfos, int readBufferSize)
throws CorruptIndexException, IOException {
this(d, segment, fieldInfos, readBufferSize, -1, 0);
}
TermVectorsReader(Directory d, String segment, FieldInfos fieldInfos, int readBufferSize, int docStoreOffset, int size)
throws CorruptIndexException, IOException {
boolean success = false;
try {
if (d.fileExists(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION)) {
tvx = d.openInput(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION, readBufferSize);
format = checkValidFormat(tvx);
tvd = d.openInput(segment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION, readBufferSize);
final int tvdFormat = checkValidFormat(tvd);
tvf = d.openInput(segment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION, readBufferSize);
final int tvfFormat = checkValidFormat(tvf);
assert format == tvdFormat;
assert format == tvfFormat;
if (format >= FORMAT_VERSION2) {
assert (tvx.length()-FORMAT_SIZE) % 16 == 0;
numTotalDocs = (int) (tvx.length() >> 4);
} else {
assert (tvx.length()-FORMAT_SIZE) % 8 == 0;
numTotalDocs = (int) (tvx.length() >> 3);
}
if (-1 == docStoreOffset) {
this.docStoreOffset = 0;
this.size = numTotalDocs;
assert size == 0 || numTotalDocs == size;
} else {
this.docStoreOffset = docStoreOffset;
this.size = size;
// Verify the file is long enough to hold all of our
// docs
assert numTotalDocs >= size + docStoreOffset: "numTotalDocs=" + numTotalDocs + " size=" + size + " docStoreOffset=" + docStoreOffset;
}
} else
format = 0;
this.fieldInfos = fieldInfos;
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
// Used for bulk copy when merging
IndexInput getTvdStream() {
return tvd;
}
// Used for bulk copy when merging
IndexInput getTvfStream() {
return tvf;
}
final private void seekTvx(final int docNum) throws IOException {
if (format < FORMAT_VERSION2)
tvx.seek((docNum + docStoreOffset) * 8L + FORMAT_SIZE);
else
tvx.seek((docNum + docStoreOffset) * 16L + FORMAT_SIZE);
}
boolean canReadRawDocs() {
return format >= FORMAT_UTF8_LENGTH_IN_BYTES;
}
/** Retrieve the length (in bytes) of the tvd and tvf
* entries for the next numDocs starting with
* startDocID. This is used for bulk copying when
* merging segments, if the field numbers are
* congruent. Once this returns, the tvf & tvd streams
* are seeked to the startDocID. */
final void rawDocs(int[] tvdLengths, int[] tvfLengths, int startDocID, int numDocs) throws IOException {
if (tvx == null) {
Arrays.fill(tvdLengths, 0);
Arrays.fill(tvfLengths, 0);
return;
}
// SegmentMerger calls canReadRawDocs() first and should
// not call us if that returns false.
if (format < FORMAT_VERSION2)
throw new IllegalStateException("cannot read raw docs with older term vector formats");
seekTvx(startDocID);
long tvdPosition = tvx.readLong();
tvd.seek(tvdPosition);
long tvfPosition = tvx.readLong();
tvf.seek(tvfPosition);
long lastTvdPosition = tvdPosition;
long lastTvfPosition = tvfPosition;
int count = 0;
while (count < numDocs) {
final int docID = docStoreOffset + startDocID + count + 1;
assert docID <= numTotalDocs;
if (docID < numTotalDocs) {
tvdPosition = tvx.readLong();
tvfPosition = tvx.readLong();
} else {
tvdPosition = tvd.length();
tvfPosition = tvf.length();
assert count == numDocs-1;
}
tvdLengths[count] = (int) (tvdPosition-lastTvdPosition);
tvfLengths[count] = (int) (tvfPosition-lastTvfPosition);
count++;
lastTvdPosition = tvdPosition;
lastTvfPosition = tvfPosition;
}
}
private int checkValidFormat(IndexInput in) throws CorruptIndexException, IOException
{
int format = in.readInt();
if (format > FORMAT_CURRENT) {
throw new CorruptIndexException("Incompatible format version: " + format + " expected "
+ FORMAT_CURRENT + " or less");
}
return format;
}
void close() throws IOException {
// make all effort to close up. Keep the first exception
// and throw it as a new one.
IOException keep = null;
if (tvx != null) try { tvx.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (tvd != null) try { tvd.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (tvf != null) try { tvf.close(); } catch (IOException e) { if (keep == null) keep = e; }
if (keep != null) throw (IOException) keep.fillInStackTrace();
}
/**
*
* @return The number of documents in the reader
*/
int size() {
return size;
}
public void get(int docNum, String field, TermVectorMapper mapper) throws IOException {
if (tvx != null) {
int fieldNumber = fieldInfos.fieldNumber(field);
//We need to account for the FORMAT_SIZE at when seeking in the tvx
//We don't need to do this in other seeks because we already have the
// file pointer
//that was written in another file
seekTvx(docNum);
//System.out.println("TVX Pointer: " + tvx.getFilePointer());
long tvdPosition = tvx.readLong();
tvd.seek(tvdPosition);
int fieldCount = tvd.readVInt();
//System.out.println("Num Fields: " + fieldCount);
// There are only a few fields per document. We opt for a full scan
// rather then requiring that they be ordered. We need to read through
// all of the fields anyway to get to the tvf pointers.
int number = 0;
int found = -1;
for (int i = 0; i < fieldCount; i++) {
if (format >= FORMAT_VERSION)
number = tvd.readVInt();
else
number += tvd.readVInt();
if (number == fieldNumber)
found = i;
}
// This field, although valid in the segment, was not found in this
// document
if (found != -1) {
// Compute position in the tvf file
long position;
if (format >= FORMAT_VERSION2)
position = tvx.readLong();
else
position = tvd.readVLong();
for (int i = 1; i <= found; i++)
position += tvd.readVLong();
mapper.setDocumentNumber(docNum);
readTermVector(field, position, mapper);
} else {
//System.out.println("Fieldable not found");
}
} else {
//System.out.println("No tvx file");
}
}
/**
* Retrieve the term vector for the given document and field
* @param docNum The document number to retrieve the vector for
* @param field The field within the document to retrieve
* @return The TermFreqVector for the document and field or null if there is no termVector for this field.
* @throws IOException if there is an error reading the term vector files
*/
TermFreqVector get(int docNum, String field) throws IOException {
// Check if no term vectors are available for this segment at all
ParallelArrayTermVectorMapper mapper = new ParallelArrayTermVectorMapper();
get(docNum, field, mapper);
return mapper.materializeVector();
}
// Reads the String[] fields; you have to pre-seek tvd to
// the right point
final private String[] readFields(int fieldCount) throws IOException {
int number = 0;
String[] fields = new String[fieldCount];
for (int i = 0; i < fieldCount; i++) {
if (format >= FORMAT_VERSION)
number = tvd.readVInt();
else
number += tvd.readVInt();
fields[i] = fieldInfos.fieldName(number);
}
return fields;
}
// Reads the long[] offsets into TVF; you have to pre-seek
// tvx/tvd to the right point
final private long[] readTvfPointers(int fieldCount) throws IOException {
// Compute position in the tvf file
long position;
if (format >= FORMAT_VERSION2)
position = tvx.readLong();
else
position = tvd.readVLong();
long[] tvfPointers = new long[fieldCount];
tvfPointers[0] = position;
for (int i = 1; i < fieldCount; i++) {
position += tvd.readVLong();
tvfPointers[i] = position;
}
return tvfPointers;
}
/**
* Return all term vectors stored for this document or null if the could not be read in.
*
* @param docNum The document number to retrieve the vector for
* @return All term frequency vectors
* @throws IOException if there is an error reading the term vector files
*/
TermFreqVector[] get(int docNum) throws IOException {
TermFreqVector[] result = null;
if (tvx != null) {
//We need to offset by
seekTvx(docNum);
long tvdPosition = tvx.readLong();
tvd.seek(tvdPosition);
int fieldCount = tvd.readVInt();
// No fields are vectorized for this document
if (fieldCount != 0) {
final String[] fields = readFields(fieldCount);
final long[] tvfPointers = readTvfPointers(fieldCount);
result = readTermVectors(docNum, fields, tvfPointers);
}
} else {
//System.out.println("No tvx file");
}
return result;
}
public void get(int docNumber, TermVectorMapper mapper) throws IOException {
// Check if no term vectors are available for this segment at all
if (tvx != null) {
//We need to offset by
seekTvx(docNumber);
long tvdPosition = tvx.readLong();
tvd.seek(tvdPosition);
int fieldCount = tvd.readVInt();
// No fields are vectorized for this document
if (fieldCount != 0) {
final String[] fields = readFields(fieldCount);
final long[] tvfPointers = readTvfPointers(fieldCount);
mapper.setDocumentNumber(docNumber);
readTermVectors(fields, tvfPointers, mapper);
}
} else {
//System.out.println("No tvx file");
}
}
private SegmentTermVector[] readTermVectors(int docNum, String fields[], long tvfPointers[])
throws IOException {
SegmentTermVector res[] = new SegmentTermVector[fields.length];
for (int i = 0; i < fields.length; i++) {
ParallelArrayTermVectorMapper mapper = new ParallelArrayTermVectorMapper();
mapper.setDocumentNumber(docNum);
readTermVector(fields[i], tvfPointers[i], mapper);
res[i] = (SegmentTermVector) mapper.materializeVector();
}
return res;
}
private void readTermVectors(String fields[], long tvfPointers[], TermVectorMapper mapper)
throws IOException {
for (int i = 0; i < fields.length; i++) {
readTermVector(fields[i], tvfPointers[i], mapper);
}
}
/**
*
* @param field The field to read in
* @param tvfPointer The pointer within the tvf file where we should start reading
* @param mapper The mapper used to map the TermVector
* @throws IOException
*/
private void readTermVector(String field, long tvfPointer, TermVectorMapper mapper)
throws IOException {
// Now read the data from specified position
//We don't need to offset by the FORMAT here since the pointer already includes the offset
tvf.seek(tvfPointer);
int numTerms = tvf.readVInt();
//System.out.println("Num Terms: " + numTerms);
// If no terms - return a constant empty termvector. However, this should never occur!
if (numTerms == 0)
return;
boolean storePositions;
boolean storeOffsets;
if (format >= FORMAT_VERSION){
byte bits = tvf.readByte();
storePositions = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
storeOffsets = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
}
else{
tvf.readVInt();
storePositions = false;
storeOffsets = false;
}
mapper.setExpectations(field, numTerms, storeOffsets, storePositions);
int start = 0;
int deltaLength = 0;
int totalLength = 0;
byte[] byteBuffer;
char[] charBuffer;
final boolean preUTF8 = format < FORMAT_UTF8_LENGTH_IN_BYTES;
// init the buffers
if (preUTF8) {
charBuffer = new char[10];
byteBuffer = null;
} else {
charBuffer = null;
byteBuffer = new byte[20];
}
for (int i = 0; i < numTerms; i++) {
start = tvf.readVInt();
deltaLength = tvf.readVInt();
totalLength = start + deltaLength;
final String term;
if (preUTF8) {
// Term stored as java chars
if (charBuffer.length < totalLength) {
char[] newCharBuffer = new char[(int) (1.5*totalLength)];
System.arraycopy(charBuffer, 0, newCharBuffer, 0, start);
charBuffer = newCharBuffer;
}
tvf.readChars(charBuffer, start, deltaLength);
term = new String(charBuffer, 0, totalLength);
} else {
// Term stored as utf8 bytes
if (byteBuffer.length < totalLength) {
byte[] newByteBuffer = new byte[(int) (1.5*totalLength)];
System.arraycopy(byteBuffer, 0, newByteBuffer, 0, start);
byteBuffer = newByteBuffer;
}
tvf.readBytes(byteBuffer, start, deltaLength);
term = new String(byteBuffer, 0, totalLength, "UTF-8");
}
int freq = tvf.readVInt();
int [] positions = null;
if (storePositions) { //read in the positions
//does the mapper even care about positions?
if (mapper.isIgnoringPositions() == false) {
positions = new int[freq];
int prevPosition = 0;
for (int j = 0; j < freq; j++)
{
positions[j] = prevPosition + tvf.readVInt();
prevPosition = positions[j];
}
} else {
//we need to skip over the positions. Since these are VInts, I don't believe there is anyway to know for sure how far to skip
//
for (int j = 0; j < freq; j++)
{
tvf.readVInt();
}
}
}
TermVectorOffsetInfo[] offsets = null;
if (storeOffsets) {
//does the mapper even care about offsets?
if (mapper.isIgnoringOffsets() == false) {
offsets = new TermVectorOffsetInfo[freq];
int prevOffset = 0;
for (int j = 0; j < freq; j++) {
int startOffset = prevOffset + tvf.readVInt();
int endOffset = startOffset + tvf.readVInt();
offsets[j] = new TermVectorOffsetInfo(startOffset, endOffset);
prevOffset = endOffset;
}
} else {
for (int j = 0; j < freq; j++){
tvf.readVInt();
tvf.readVInt();
}
}
}
mapper.map(term, freq, offsets, positions);
}
}
@Override
protected Object clone() throws CloneNotSupportedException {
final TermVectorsReader clone = (TermVectorsReader) super.clone();
// These are null when a TermVectorsReader was created
// on a segment that did not have term vectors saved
if (tvx != null && tvd != null && tvf != null) {
clone.tvx = (IndexInput) tvx.clone();
clone.tvd = (IndexInput) tvd.clone();
clone.tvf = (IndexInput) tvf.clone();
}
return clone;
}
}
/**
* Models the existing parallel array structure
*/
class ParallelArrayTermVectorMapper extends TermVectorMapper
{
private String[] terms;
private int[] termFreqs;
private int positions[][];
private TermVectorOffsetInfo offsets[][];
private int currentPosition;
private boolean storingOffsets;
private boolean storingPositions;
private String field;
@Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
this.field = field;
terms = new String[numTerms];
termFreqs = new int[numTerms];
this.storingOffsets = storeOffsets;
this.storingPositions = storePositions;
if(storePositions)
this.positions = new int[numTerms][];
if(storeOffsets)
this.offsets = new TermVectorOffsetInfo[numTerms][];
}
@Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
terms[currentPosition] = term;
termFreqs[currentPosition] = frequency;
if (storingOffsets)
{
this.offsets[currentPosition] = offsets;
}
if (storingPositions)
{
this.positions[currentPosition] = positions;
}
currentPosition++;
}
/**
* Construct the vector
* @return The {@link TermFreqVector} based on the mappings.
*/
public TermFreqVector materializeVector() {
SegmentTermVector tv = null;
if (field != null && terms != null) {
if (storingPositions || storingOffsets) {
tv = new SegmentTermPositionVector(field, terms, termFreqs, positions, offsets);
} else {
tv = new SegmentTermVector(field, terms, termFreqs);
}
}
return tv;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorsReader.java | Java | art | 19,671 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.List;
import java.io.IOException;
/**
* <p>Expert: policy for deletion of stale {@link IndexCommit index commits}.
*
* <p>Implement this interface, and pass it to one
* of the {@link IndexWriter} or {@link IndexReader}
* constructors, to customize when older
* {@link IndexCommit point-in-time commits}
* are deleted from the index directory. The default deletion policy
* is {@link KeepOnlyLastCommitDeletionPolicy}, which always
* removes old commits as soon as a new commit is done (this
* matches the behavior before 2.2).</p>
*
* <p>One expected use case for this (and the reason why it
* was first created) is to work around problems with an
* index directory accessed via filesystems like NFS because
* NFS does not provide the "delete on last close" semantics
* that Lucene's "point in time" search normally relies on.
* By implementing a custom deletion policy, such as "a
* commit is only removed once it has been stale for more
* than X minutes", you can give your readers time to
* refresh to the new commit before {@link IndexWriter}
* removes the old commits. Note that doing so will
* increase the storage requirements of the index. See <a
* target="top"
* href="http://issues.apache.org/jira/browse/LUCENE-710">LUCENE-710</a>
* for details.</p>
*/
public interface IndexDeletionPolicy {
/**
* <p>This is called once when a writer is first
* instantiated to give the policy a chance to remove old
* commit points.</p>
*
* <p>The writer locates all index commits present in the
* index directory and calls this method. The policy may
* choose to delete some of the commit points, doing so by
* calling method {@link IndexCommit#delete delete()}
* of {@link IndexCommit}.</p>
*
* <p><u>Note:</u> the last CommitPoint is the most recent one,
* i.e. the "front index state". Be careful not to delete it,
* unless you know for sure what you are doing, and unless
* you can afford to lose the index content while doing that.
*
* @param commits List of current
* {@link IndexCommit point-in-time commits},
* sorted by age (the 0th one is the oldest commit).
*/
public void onInit(List<? extends IndexCommit> commits) throws IOException;
/**
* <p>This is called each time the writer completed a commit.
* This gives the policy a chance to remove old commit points
* with each commit.</p>
*
* <p>The policy may now choose to delete old commit points
* by calling method {@link IndexCommit#delete delete()}
* of {@link IndexCommit}.</p>
*
* <p>This method is only called when {@link
* IndexWriter#commit} or {@link IndexWriter#close} is
* called, or possibly not at all if the {@link
* IndexWriter#rollback} is called.
*
* <p><u>Note:</u> the last CommitPoint is the most recent one,
* i.e. the "front index state". Be careful not to delete it,
* unless you know for sure what you are doing, and unless
* you can afford to lose the index content while doing that.
*
* @param commits List of {@link IndexCommit},
* sorted by age (the 0th one is the oldest commit).
*/
public void onCommit(List<? extends IndexCommit> commits) throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexDeletionPolicy.java | Java | art | 4,087 |
package org.apache.lucene.index;
import java.io.IOException;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** This is a {@link LogMergePolicy} that measures size of a
* segment as the number of documents (not taking deletions
* into account). */
public class LogDocMergePolicy extends LogMergePolicy {
/** Default minimum segment size. @see setMinMergeDocs */
public static final int DEFAULT_MIN_MERGE_DOCS = 1000;
public LogDocMergePolicy(IndexWriter writer) {
super(writer);
minMergeSize = DEFAULT_MIN_MERGE_DOCS;
// maxMergeSize is never used by LogDocMergePolicy; set
// it to Long.MAX_VALUE to disable it
maxMergeSize = Long.MAX_VALUE;
}
@Override
protected long size(SegmentInfo info) throws IOException {
return sizeDocs(info);
}
/** Sets the minimum size for the lowest level segments.
* Any segments below this size are considered to be on
* the same level (even if they vary drastically in size)
* and will be merged whenever there are mergeFactor of
* them. This effectively truncates the "long tail" of
* small segments that would otherwise be created into a
* single level. If you set this too large, it could
* greatly increase the merging cost during indexing (if
* you flush many small segments). */
public void setMinMergeDocs(int minMergeDocs) {
minMergeSize = minMergeDocs;
}
/** Get the minimum size for a segment to remain
* un-merged.
* @see #setMinMergeDocs **/
public int getMinMergeDocs() {
return (int) minMergeSize;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/LogDocMergePolicy.java | Java | art | 2,311 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.util.ArrayUtil;
/**
* A Payload is metadata that can be stored together with each occurrence
* of a term. This metadata is stored inline in the posting list of the
* specific term.
* <p>
* To store payloads in the index a {@link TokenStream} has to be used that
* produces payload data.
* <p>
* Use {@link TermPositions#getPayloadLength()} and {@link TermPositions#getPayload(byte[], int)}
* to retrieve the payloads from the index.<br>
*
*/
public class Payload implements Serializable, Cloneable {
/** the byte array containing the payload data */
protected byte[] data;
/** the offset within the byte array */
protected int offset;
/** the length of the payload data */
protected int length;
/** Creates an empty payload and does not allocate a byte array. */
public Payload() {
// nothing to do
}
/**
* Creates a new payload with the the given array as data.
* A reference to the passed-in array is held, i. e. no
* copy is made.
*
* @param data the data of this payload
*/
public Payload(byte[] data) {
this(data, 0, data.length);
}
/**
* Creates a new payload with the the given array as data.
* A reference to the passed-in array is held, i. e. no
* copy is made.
*
* @param data the data of this payload
* @param offset the offset in the data byte array
* @param length the length of the data
*/
public Payload(byte[] data, int offset, int length) {
if (offset < 0 || offset + length > data.length) {
throw new IllegalArgumentException();
}
this.data = data;
this.offset = offset;
this.length = length;
}
/**
* Sets this payloads data.
* A reference to the passed-in array is held, i. e. no
* copy is made.
*/
public void setData(byte[] data) {
setData(data, 0, data.length);
}
/**
* Sets this payloads data.
* A reference to the passed-in array is held, i. e. no
* copy is made.
*/
public void setData(byte[] data, int offset, int length) {
this.data = data;
this.offset = offset;
this.length = length;
}
/**
* Returns a reference to the underlying byte array
* that holds this payloads data.
*/
public byte[] getData() {
return this.data;
}
/**
* Returns the offset in the underlying byte array
*/
public int getOffset() {
return this.offset;
}
/**
* Returns the length of the payload data.
*/
public int length() {
return this.length;
}
/**
* Returns the byte at the given index.
*/
public byte byteAt(int index) {
if (0 <= index && index < this.length) {
return this.data[this.offset + index];
}
throw new ArrayIndexOutOfBoundsException(index);
}
/**
* Allocates a new byte array, copies the payload data into it and returns it.
*/
public byte[] toByteArray() {
byte[] retArray = new byte[this.length];
System.arraycopy(this.data, this.offset, retArray, 0, this.length);
return retArray;
}
/**
* Copies the payload data to a byte array.
*
* @param target the target byte array
* @param targetOffset the offset in the target byte array
*/
public void copyTo(byte[] target, int targetOffset) {
if (this.length > target.length + targetOffset) {
throw new ArrayIndexOutOfBoundsException();
}
System.arraycopy(this.data, this.offset, target, targetOffset, this.length);
}
/**
* Clones this payload by creating a copy of the underlying
* byte array.
*/
@Override
public Object clone() {
try {
// Start with a shallow copy of data
Payload clone = (Payload) super.clone();
// Only copy the part of data that belongs to this Payload
if (offset == 0 && length == data.length) {
// It is the whole thing, so just clone it.
clone.data = (byte[]) data.clone();
}
else {
// Just get the part
clone.data = this.toByteArray();
clone.offset = 0;
}
return clone;
} catch (CloneNotSupportedException e) {
throw new RuntimeException(e); // shouldn't happen
}
}
@Override
public boolean equals(Object obj) {
if (obj == this)
return true;
if (obj instanceof Payload) {
Payload other = (Payload) obj;
if (length == other.length) {
for(int i=0;i<length;i++)
if (data[offset+i] != other.data[other.offset+i])
return false;
return true;
} else
return false;
} else
return false;
}
@Override
public int hashCode() {
return ArrayUtil.hashCode(data, offset, offset+length);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/Payload.java | Java | art | 5,632 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import org.apache.lucene.util.BitVector;
class AllTermDocs extends AbstractAllTermDocs {
protected BitVector deletedDocs;
protected AllTermDocs(SegmentReader parent) {
super(parent.maxDoc());
synchronized (parent) {
this.deletedDocs = parent.deletedDocs;
}
}
public boolean isDeleted(int doc) {
return deletedDocs != null && deletedDocs.get(doc);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/AllTermDocs.java | Java | art | 1,225 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.UnicodeUtil;
final class TermVectorsTermsWriterPerThread extends TermsHashConsumerPerThread {
final TermVectorsTermsWriter termsWriter;
final TermsHashPerThread termsHashPerThread;
final DocumentsWriter.DocState docState;
TermVectorsTermsWriter.PerDoc doc;
public TermVectorsTermsWriterPerThread(TermsHashPerThread termsHashPerThread, TermVectorsTermsWriter termsWriter) {
this.termsWriter = termsWriter;
this.termsHashPerThread = termsHashPerThread;
docState = termsHashPerThread.docState;
}
// Used by perField when serializing the term vectors
final ByteSliceReader vectorSliceReader = new ByteSliceReader();
final UnicodeUtil.UTF8Result utf8Results[] = {new UnicodeUtil.UTF8Result(),
new UnicodeUtil.UTF8Result()};
@Override
public void startDocument() {
assert clearLastVectorFieldName();
if (doc != null) {
doc.reset();
doc.docID = docState.docID;
}
}
@Override
public DocumentsWriter.DocWriter finishDocument() {
try {
return doc;
} finally {
doc = null;
}
}
@Override
public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo) {
return new TermVectorsTermsWriterPerField(termsHashPerField, this, fieldInfo);
}
@Override
public void abort() {
if (doc != null) {
doc.abort();
doc = null;
}
}
// Called only by assert
final boolean clearLastVectorFieldName() {
lastVectorFieldName = null;
return true;
}
// Called only by assert
String lastVectorFieldName;
final boolean vectorFieldsInOrder(FieldInfo fi) {
try {
if (lastVectorFieldName != null)
return lastVectorFieldName.compareTo(fi.name) < 0;
else
return true;
} finally {
lastVectorFieldName = fi.name;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerThread.java | Java | art | 2,734 |
package org.apache.lucene.index;
import java.io.Serializable;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The TermVectorOffsetInfo class holds information pertaining to a Term in a {@link org.apache.lucene.index.TermPositionVector}'s
* offset information. This offset information is the character offset as set during the Analysis phase (and thus may not be the actual offset in the
* original content).
*/
public class TermVectorOffsetInfo implements Serializable {
/**
* Convenience declaration when creating a {@link org.apache.lucene.index.TermPositionVector} that stores only position information.
*/
public transient static final TermVectorOffsetInfo[] EMPTY_OFFSET_INFO = new TermVectorOffsetInfo[0];
private int startOffset;
private int endOffset;
public TermVectorOffsetInfo() {
}
public TermVectorOffsetInfo(int startOffset, int endOffset) {
this.endOffset = endOffset;
this.startOffset = startOffset;
}
/**
* The accessor for the ending offset for the term
* @return The offset
*/
public int getEndOffset() {
return endOffset;
}
public void setEndOffset(int endOffset) {
this.endOffset = endOffset;
}
/**
* The accessor for the starting offset of the term.
*
* @return The offset
*/
public int getStartOffset() {
return startOffset;
}
public void setStartOffset(int startOffset) {
this.startOffset = startOffset;
}
/**
* Two TermVectorOffsetInfos are equals if both the start and end offsets are the same
* @param o The comparison Object
* @return true if both {@link #getStartOffset()} and {@link #getEndOffset()} are the same for both objects.
*/
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (!(o instanceof TermVectorOffsetInfo)) return false;
final TermVectorOffsetInfo termVectorOffsetInfo = (TermVectorOffsetInfo) o;
if (endOffset != termVectorOffsetInfo.endOffset) return false;
if (startOffset != termVectorOffsetInfo.startOffset) return false;
return true;
}
@Override
public int hashCode() {
int result;
result = startOffset;
result = 29 * result + endOffset;
return result;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorOffsetInfo.java | Java | art | 2,967 |
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
import java.io.IOException;
import java.util.List;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMOutputStream;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
final class FieldsWriter
{
static final byte FIELD_IS_TOKENIZED = 0x1;
static final byte FIELD_IS_BINARY = 0x2;
/** @deprecated Kept for backwards-compatibility with <3.0 indexes; will be removed in 4.0 */
static final byte FIELD_IS_COMPRESSED = 0x4;
// Original format
static final int FORMAT = 0;
// Changed strings to UTF8
static final int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = 1;
// Lucene 3.0: Removal of compressed fields
static final int FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS = 2;
// NOTE: if you introduce a new format, make it 1 higher
// than the current one, and always change this if you
// switch to a new format!
static final int FORMAT_CURRENT = FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
private FieldInfos fieldInfos;
private IndexOutput fieldsStream;
private IndexOutput indexStream;
private boolean doClose;
FieldsWriter(Directory d, String segment, FieldInfos fn) throws IOException {
fieldInfos = fn;
boolean success = false;
final String fieldsName = segment + "." + IndexFileNames.FIELDS_EXTENSION;
try {
fieldsStream = d.createOutput(fieldsName);
fieldsStream.writeInt(FORMAT_CURRENT);
success = true;
} finally {
if (!success) {
try {
close();
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
try {
d.deleteFile(fieldsName);
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
}
}
success = false;
final String indexName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
try {
indexStream = d.createOutput(indexName);
indexStream.writeInt(FORMAT_CURRENT);
success = true;
} finally {
if (!success) {
try {
close();
} catch (IOException ioe) {
}
try {
d.deleteFile(fieldsName);
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
try {
d.deleteFile(indexName);
} catch (Throwable t) {
// Suppress so we keep throwing the original exception
}
}
}
doClose = true;
}
FieldsWriter(IndexOutput fdx, IndexOutput fdt, FieldInfos fn) {
fieldInfos = fn;
fieldsStream = fdt;
indexStream = fdx;
doClose = false;
}
void setFieldsStream(IndexOutput stream) {
this.fieldsStream = stream;
}
// Writes the contents of buffer into the fields stream
// and adds a new entry for this document into the index
// stream. This assumes the buffer was already written
// in the correct fields format.
void flushDocument(int numStoredFields, RAMOutputStream buffer) throws IOException {
indexStream.writeLong(fieldsStream.getFilePointer());
fieldsStream.writeVInt(numStoredFields);
buffer.writeTo(fieldsStream);
}
void skipDocument() throws IOException {
indexStream.writeLong(fieldsStream.getFilePointer());
fieldsStream.writeVInt(0);
}
void flush() throws IOException {
indexStream.flush();
fieldsStream.flush();
}
final void close() throws IOException {
if (doClose) {
try {
if (fieldsStream != null) {
try {
fieldsStream.close();
} finally {
fieldsStream = null;
}
}
} catch (IOException ioe) {
try {
if (indexStream != null) {
try {
indexStream.close();
} finally {
indexStream = null;
}
}
} catch (IOException ioe2) {
// Ignore so we throw only first IOException hit
}
throw ioe;
} finally {
if (indexStream != null) {
try {
indexStream.close();
} finally {
indexStream = null;
}
}
}
}
}
final void writeField(FieldInfo fi, Fieldable field) throws IOException {
fieldsStream.writeVInt(fi.number);
byte bits = 0;
if (field.isTokenized())
bits |= FieldsWriter.FIELD_IS_TOKENIZED;
if (field.isBinary())
bits |= FieldsWriter.FIELD_IS_BINARY;
fieldsStream.writeByte(bits);
if (field.isBinary()) {
final byte[] data;
final int len;
final int offset;
data = field.getBinaryValue();
len = field.getBinaryLength();
offset = field.getBinaryOffset();
fieldsStream.writeVInt(len);
fieldsStream.writeBytes(data, offset, len);
}
else {
fieldsStream.writeString(field.stringValue());
}
}
/** Bulk write a contiguous series of documents. The
* lengths array is the length (in bytes) of each raw
* document. The stream IndexInput is the
* fieldsStream from which we should bulk-copy all
* bytes. */
final void addRawDocuments(IndexInput stream, int[] lengths, int numDocs) throws IOException {
long position = fieldsStream.getFilePointer();
long start = position;
for(int i=0;i<numDocs;i++) {
indexStream.writeLong(position);
position += lengths[i];
}
fieldsStream.copyBytes(stream, position-start);
assert fieldsStream.getFilePointer() == position;
}
final void addDocument(Document doc) throws IOException {
indexStream.writeLong(fieldsStream.getFilePointer());
int storedCount = 0;
List<Fieldable> fields = doc.getFields();
for (Fieldable field : fields) {
if (field.isStored())
storedCount++;
}
fieldsStream.writeVInt(storedCount);
for (Fieldable field : fields) {
if (field.isStored())
writeField(fieldInfos.fieldInfo(field.name()), field);
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldsWriter.java | Java | art | 7,207 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
/**
* Holds state for inverting all occurrences of a single
* field in the document. This class doesn't do anything
* itself; instead, it forwards the tokens produced by
* analysis to its own consumer
* (InvertedDocConsumerPerField). It also interacts with an
* endConsumer (InvertedDocEndConsumerPerField).
*/
final class DocInverterPerField extends DocFieldConsumerPerField {
final private DocInverterPerThread perThread;
final private FieldInfo fieldInfo;
final InvertedDocConsumerPerField consumer;
final InvertedDocEndConsumerPerField endConsumer;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
public DocInverterPerField(DocInverterPerThread perThread, FieldInfo fieldInfo) {
this.perThread = perThread;
this.fieldInfo = fieldInfo;
docState = perThread.docState;
fieldState = perThread.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
this.endConsumer = perThread.endConsumer.addField(this, fieldInfo);
}
@Override
void abort() {
consumer.abort();
endConsumer.abort();
}
@Override
public void processFields(final Fieldable[] fields,
final int count) throws IOException {
fieldState.reset(docState.doc.getBoost());
final int maxFieldLength = docState.maxFieldLength;
final boolean doInvert = consumer.start(fields, count);
for(int i=0;i<count;i++) {
final Fieldable field = fields[i];
// TODO FI: this should be "genericized" to querying
// consumer if it wants to see this particular field
// tokenized.
if (field.isIndexed() && doInvert) {
final boolean anyToken;
if (fieldState.length > 0)
fieldState.position += docState.analyzer.getPositionIncrementGap(fieldInfo.name);
if (!field.isTokenized()) { // un-tokenized field
String stringValue = field.stringValue();
final int valueLength = stringValue.length();
perThread.singleToken.reinit(stringValue, 0, valueLength);
fieldState.attributeSource = perThread.singleToken;
consumer.start(field);
boolean success = false;
try {
consumer.add();
success = true;
} finally {
if (!success)
docState.docWriter.setAborting();
}
fieldState.offset += valueLength;
fieldState.length++;
fieldState.position++;
anyToken = valueLength > 0;
} else { // tokenized field
final TokenStream stream;
final TokenStream streamValue = field.tokenStreamValue();
if (streamValue != null)
stream = streamValue;
else {
// the field does not have a TokenStream,
// so we have to obtain one from the analyzer
final Reader reader; // find or make Reader
final Reader readerValue = field.readerValue();
if (readerValue != null)
reader = readerValue;
else {
String stringValue = field.stringValue();
if (stringValue == null)
throw new IllegalArgumentException("field must have either TokenStream, String or Reader value");
perThread.stringReader.init(stringValue);
reader = perThread.stringReader;
}
// Tokenize field and add to postingTable
stream = docState.analyzer.reusableTokenStream(fieldInfo.name, reader);
}
// reset the TokenStream to the first token
stream.reset();
final int startLength = fieldState.length;
try {
int offsetEnd = fieldState.offset-1;
boolean hasMoreTokens = stream.incrementToken();
fieldState.attributeSource = stream;
OffsetAttribute offsetAttribute = fieldState.attributeSource.addAttribute(OffsetAttribute.class);
PositionIncrementAttribute posIncrAttribute = fieldState.attributeSource.addAttribute(PositionIncrementAttribute.class);
consumer.start(field);
for(;;) {
// If we hit an exception in stream.next below
// (which is fairly common, eg if analyzer
// chokes on a given document), then it's
// non-aborting and (above) this one document
// will be marked as deleted, but still
// consume a docID
if (!hasMoreTokens) break;
final int posIncr = posIncrAttribute.getPositionIncrement();
fieldState.position += posIncr;
if (fieldState.position > 0) {
fieldState.position--;
}
if (posIncr == 0)
fieldState.numOverlap++;
boolean success = false;
try {
// If we hit an exception in here, we abort
// all buffered documents since the last
// flush, on the likelihood that the
// internal state of the consumer is now
// corrupt and should not be flushed to a
// new segment:
consumer.add();
success = true;
} finally {
if (!success)
docState.docWriter.setAborting();
}
fieldState.position++;
offsetEnd = fieldState.offset + offsetAttribute.endOffset();
if (++fieldState.length >= maxFieldLength) {
if (docState.infoStream != null)
docState.infoStream.println("maxFieldLength " +maxFieldLength+ " reached for field " + fieldInfo.name + ", ignoring following tokens");
break;
}
hasMoreTokens = stream.incrementToken();
}
// trigger streams to perform end-of-stream operations
stream.end();
fieldState.offset += offsetAttribute.endOffset();
anyToken = fieldState.length > startLength;
} finally {
stream.close();
}
}
if (anyToken)
fieldState.offset += docState.analyzer.getOffsetGap(field);
fieldState.boost *= field.getBoost();
}
// LUCENE-2387: don't hang onto the field, so GC can
// reclaim
fields[i] = null;
}
consumer.finish();
endConsumer.finish();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocInverterPerField.java | Java | art | 7,722 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
final class TermsHashPerThread extends InvertedDocConsumerPerThread {
final TermsHash termsHash;
final TermsHashConsumerPerThread consumer;
final TermsHashPerThread nextPerThread;
final CharBlockPool charPool;
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final boolean primary;
final DocumentsWriter.DocState docState;
final RawPostingList freePostings[] = new RawPostingList[256];
int freePostingsCount;
public TermsHashPerThread(DocInverterPerThread docInverterPerThread, final TermsHash termsHash, final TermsHash nextTermsHash, final TermsHashPerThread primaryPerThread) {
docState = docInverterPerThread.docState;
this.termsHash = termsHash;
this.consumer = termsHash.consumer.addThread(this);
if (nextTermsHash != null) {
// We are primary
charPool = new CharBlockPool(termsHash.docWriter);
primary = true;
} else {
charPool = primaryPerThread.charPool;
primary = false;
}
intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations);
bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations);
if (nextTermsHash != null)
nextPerThread = nextTermsHash.addThread(docInverterPerThread, this);
else
nextPerThread = null;
}
@Override
InvertedDocConsumerPerField addField(DocInverterPerField docInverterPerField, final FieldInfo fieldInfo) {
return new TermsHashPerField(docInverterPerField, this, nextPerThread, fieldInfo);
}
@Override
synchronized public void abort() {
reset(true);
consumer.abort();
if (nextPerThread != null)
nextPerThread.abort();
}
// perField calls this when it needs more postings:
void morePostings() throws IOException {
assert freePostingsCount == 0;
termsHash.getPostings(freePostings);
freePostingsCount = freePostings.length;
assert noNullPostings(freePostings, freePostingsCount, "consumer=" + consumer);
}
private static boolean noNullPostings(RawPostingList[] postings, int count, String details) {
for(int i=0;i<count;i++)
assert postings[i] != null: "postings[" + i + "] of " + count + " is null: " + details;
return true;
}
@Override
public void startDocument() throws IOException {
consumer.startDocument();
if (nextPerThread != null)
nextPerThread.consumer.startDocument();
}
@Override
public DocumentsWriter.DocWriter finishDocument() throws IOException {
final DocumentsWriter.DocWriter doc = consumer.finishDocument();
final DocumentsWriter.DocWriter doc2;
if (nextPerThread != null)
doc2 = nextPerThread.consumer.finishDocument();
else
doc2 = null;
if (doc == null)
return doc2;
else {
doc.setNext(doc2);
return doc;
}
}
// Clear all state
void reset(boolean recyclePostings) {
intPool.reset();
bytePool.reset();
if (primary)
charPool.reset();
if (recyclePostings) {
termsHash.recyclePostings(freePostings, freePostingsCount);
freePostingsCount = 0;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermsHashPerThread.java | Java | art | 3,959 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Fieldable;
abstract class DocFieldConsumerPerField {
/** Processes all occurrences of a single field */
abstract void processFields(Fieldable[] fields, int count) throws IOException;
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldConsumerPerField.java | Java | art | 1,115 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Extends <code>TermFreqVector</code> to provide additional information about
* positions in which each of the terms is found. A TermPositionVector not necessarily
* contains both positions and offsets, but at least one of these arrays exists.
*/
public interface TermPositionVector extends TermFreqVector {
/** Returns an array of positions in which the term is found.
* Terms are identified by the index at which its number appears in the
* term String array obtained from the <code>indexOf</code> method.
* May return null if positions have not been stored.
*/
public int[] getTermPositions(int index);
/**
* Returns an array of TermVectorOffsetInfo in which the term is found.
* May return null if offsets have not been stored.
*
* @see org.apache.lucene.analysis.Token
*
* @param index The position in the array to get the offsets from
* @return An array of TermVectorOffsetInfo objects or the empty list
*/
public TermVectorOffsetInfo [] getOffsets(int index);
} | zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermPositionVector.java | Java | art | 1,900 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** A TermInfo is the record of information stored for a term.*/
final class TermInfo {
/** The number of documents which contain the term. */
int docFreq = 0;
long freqPointer = 0;
long proxPointer = 0;
int skipOffset;
TermInfo() {}
TermInfo(int df, long fp, long pp) {
docFreq = df;
freqPointer = fp;
proxPointer = pp;
}
TermInfo(TermInfo ti) {
docFreq = ti.docFreq;
freqPointer = ti.freqPointer;
proxPointer = ti.proxPointer;
skipOffset = ti.skipOffset;
}
final void set(int docFreq,
long freqPointer, long proxPointer, int skipOffset) {
this.docFreq = docFreq;
this.freqPointer = freqPointer;
this.proxPointer = proxPointer;
this.skipOffset = skipOffset;
}
final void set(TermInfo ti) {
docFreq = ti.docFreq;
freqPointer = ti.freqPointer;
proxPointer = ti.proxPointer;
skipOffset = ti.skipOffset;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermInfo.java | Java | art | 1,755 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.document.AbstractField; // for javadocs
import org.apache.lucene.document.Document;
import java.text.NumberFormat;
import java.io.PrintStream;
import java.io.IOException;
import java.io.File;
import java.util.Collection;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
/**
* Basic tool and API to check the health of an index and
* write a new segments file that removes reference to
* problematic segments.
*
* <p>As this tool checks every byte in the index, on a large
* index it can take quite a long time to run.
*
* <p><b>WARNING</b>: this tool and API is new and
* experimental and is subject to suddenly change in the
* next release. Please make a complete backup of your
* index before using this to fix your index!
*/
public class CheckIndex {
private PrintStream infoStream;
private Directory dir;
/**
* Returned from {@link #checkIndex()} detailing the health and status of the index.
*
* <p><b>WARNING</b>: this API is new and experimental and is
* subject to suddenly change in the next release.
**/
public static class Status {
/** True if no problems were found with the index. */
public boolean clean;
/** True if we were unable to locate and load the segments_N file. */
public boolean missingSegments;
/** True if we were unable to open the segments_N file. */
public boolean cantOpenSegments;
/** True if we were unable to read the version number from segments_N file. */
public boolean missingSegmentVersion;
/** Name of latest segments_N file in the index. */
public String segmentsFileName;
/** Number of segments in the index. */
public int numSegments;
/** String description of the version of the index. */
public String segmentFormat;
/** Empty unless you passed specific segments list to check as optional 3rd argument.
* @see CheckIndex#checkIndex(List) */
public List<String> segmentsChecked = new ArrayList<String>();
/** True if the index was created with a newer version of Lucene than the CheckIndex tool. */
public boolean toolOutOfDate;
/** List of {@link SegmentInfoStatus} instances, detailing status of each segment. */
public List<SegmentInfoStatus> segmentInfos = new ArrayList<SegmentInfoStatus>();
/** Directory index is in. */
public Directory dir;
/**
* SegmentInfos instance containing only segments that
* had no problems (this is used with the {@link CheckIndex#fixIndex}
* method to repair the index.
*/
SegmentInfos newSegments;
/** How many documents will be lost to bad segments. */
public int totLoseDocCount;
/** How many bad segments were found. */
public int numBadSegments;
/** True if we checked only specific segments ({@link
* #checkIndex(List)}) was called with non-null
* argument). */
public boolean partial;
/** Holds the userData of the last commit in the index */
public Map<String, String> userData;
/** Holds the status of each segment in the index.
* See {@link #segmentInfos}.
*
* <p><b>WARNING</b>: this API is new and experimental and is
* subject to suddenly change in the next release.
*/
public static class SegmentInfoStatus {
/** Name of the segment. */
public String name;
/** Document count (does not take deletions into account). */
public int docCount;
/** True if segment is compound file format. */
public boolean compound;
/** Number of files referenced by this segment. */
public int numFiles;
/** Net size (MB) of the files referenced by this
* segment. */
public double sizeMB;
/** Doc store offset, if this segment shares the doc
* store files (stored fields and term vectors) with
* other segments. This is -1 if it does not share. */
public int docStoreOffset = -1;
/** String of the shared doc store segment, or null if
* this segment does not share the doc store files. */
public String docStoreSegment;
/** True if the shared doc store files are compound file
* format. */
public boolean docStoreCompoundFile;
/** True if this segment has pending deletions. */
public boolean hasDeletions;
/** Name of the current deletions file name. */
public String deletionsFileName;
/** Number of deleted documents. */
public int numDeleted;
/** True if we were able to open a SegmentReader on this
* segment. */
public boolean openReaderPassed;
/** Number of fields in this segment. */
int numFields;
/** True if at least one of the fields in this segment
* does not omitTermFreqAndPositions.
* @see AbstractField#setOmitTermFreqAndPositions */
public boolean hasProx;
/** Map that includes certain
* debugging details that IndexWriter records into
* each segment it creates */
public Map<String,String> diagnostics;
/** Status for testing of field norms (null if field norms could not be tested). */
public FieldNormStatus fieldNormStatus;
/** Status for testing of indexed terms (null if indexed terms could not be tested). */
public TermIndexStatus termIndexStatus;
/** Status for testing of stored fields (null if stored fields could not be tested). */
public StoredFieldStatus storedFieldStatus;
/** Status for testing of term vectors (null if term vectors could not be tested). */
public TermVectorStatus termVectorStatus;
}
/**
* Status from testing field norms.
*/
public static final class FieldNormStatus {
/** Number of fields successfully tested */
public long totFields = 0L;
/** Exception thrown during term index test (null on success) */
public Throwable error = null;
}
/**
* Status from testing term index.
*/
public static final class TermIndexStatus {
/** Total term count */
public long termCount = 0L;
/** Total frequency across all terms. */
public long totFreq = 0L;
/** Total number of positions. */
public long totPos = 0L;
/** Exception thrown during term index test (null on success) */
public Throwable error = null;
}
/**
* Status from testing stored fields.
*/
public static final class StoredFieldStatus {
/** Number of documents tested. */
public int docCount = 0;
/** Total number of stored fields tested. */
public long totFields = 0;
/** Exception thrown during stored fields test (null on success) */
public Throwable error = null;
}
/**
* Status from testing stored fields.
*/
public static final class TermVectorStatus {
/** Number of documents tested. */
public int docCount = 0;
/** Total number of term vectors tested. */
public long totVectors = 0;
/** Exception thrown during term vector test (null on success) */
public Throwable error = null;
}
}
/** Create a new CheckIndex on the directory. */
public CheckIndex(Directory dir) {
this.dir = dir;
infoStream = null;
}
/** Set infoStream where messages should go. If null, no
* messages are printed */
public void setInfoStream(PrintStream out) {
infoStream = out;
}
private void msg(String msg) {
if (infoStream != null)
infoStream.println(msg);
}
private static class MySegmentTermDocs extends SegmentTermDocs {
int delCount;
MySegmentTermDocs(SegmentReader p) {
super(p);
}
@Override
public void seek(Term term) throws IOException {
super.seek(term);
delCount = 0;
}
@Override
protected void skippingDoc() throws IOException {
delCount++;
}
}
/** Returns a {@link Status} instance detailing
* the state of the index.
*
* <p>As this method checks every byte in the index, on a large
* index it can take quite a long time to run.
*
* <p><b>WARNING</b>: make sure
* you only call this when the index is not opened by any
* writer. */
public Status checkIndex() throws IOException {
return checkIndex(null);
}
/** Returns a {@link Status} instance detailing
* the state of the index.
*
* @param onlySegments list of specific segment names to check
*
* <p>As this method checks every byte in the specified
* segments, on a large index it can take quite a long
* time to run.
*
* <p><b>WARNING</b>: make sure
* you only call this when the index is not opened by any
* writer. */
public Status checkIndex(List<String> onlySegments) throws IOException {
NumberFormat nf = NumberFormat.getInstance();
SegmentInfos sis = new SegmentInfos();
Status result = new Status();
result.dir = dir;
try {
sis.read(dir);
} catch (Throwable t) {
msg("ERROR: could not read any segments file in directory");
result.missingSegments = true;
if (infoStream != null)
t.printStackTrace(infoStream);
return result;
}
final int numSegments = sis.size();
final String segmentsFileName = sis.getCurrentSegmentFileName();
IndexInput input = null;
try {
input = dir.openInput(segmentsFileName);
} catch (Throwable t) {
msg("ERROR: could not open segments file in directory");
if (infoStream != null)
t.printStackTrace(infoStream);
result.cantOpenSegments = true;
return result;
}
int format = 0;
try {
format = input.readInt();
} catch (Throwable t) {
msg("ERROR: could not read segment file version in directory");
if (infoStream != null)
t.printStackTrace(infoStream);
result.missingSegmentVersion = true;
return result;
} finally {
if (input != null)
input.close();
}
String sFormat = "";
boolean skip = false;
if (format == SegmentInfos.FORMAT)
sFormat = "FORMAT [Lucene Pre-2.1]";
if (format == SegmentInfos.FORMAT_LOCKLESS)
sFormat = "FORMAT_LOCKLESS [Lucene 2.1]";
else if (format == SegmentInfos.FORMAT_SINGLE_NORM_FILE)
sFormat = "FORMAT_SINGLE_NORM_FILE [Lucene 2.2]";
else if (format == SegmentInfos.FORMAT_SHARED_DOC_STORE)
sFormat = "FORMAT_SHARED_DOC_STORE [Lucene 2.3]";
else {
if (format == SegmentInfos.FORMAT_CHECKSUM)
sFormat = "FORMAT_CHECKSUM [Lucene 2.4]";
else if (format == SegmentInfos.FORMAT_DEL_COUNT)
sFormat = "FORMAT_DEL_COUNT [Lucene 2.4]";
else if (format == SegmentInfos.FORMAT_HAS_PROX)
sFormat = "FORMAT_HAS_PROX [Lucene 2.4]";
else if (format == SegmentInfos.FORMAT_USER_DATA)
sFormat = "FORMAT_USER_DATA [Lucene 2.9]";
else if (format == SegmentInfos.FORMAT_DIAGNOSTICS)
sFormat = "FORMAT_DIAGNOSTICS [Lucene 2.9]";
else if (format < SegmentInfos.CURRENT_FORMAT) {
sFormat = "int=" + format + " [newer version of Lucene than this tool]";
skip = true;
} else {
sFormat = format + " [Lucene 1.3 or prior]";
}
}
result.segmentsFileName = segmentsFileName;
result.numSegments = numSegments;
result.segmentFormat = sFormat;
result.userData = sis.getUserData();
String userDataString;
if (sis.getUserData().size() > 0) {
userDataString = " userData=" + sis.getUserData();
} else {
userDataString = "";
}
msg("Segments file=" + segmentsFileName + " numSegments=" + numSegments + " version=" + sFormat + userDataString);
if (onlySegments != null) {
result.partial = true;
if (infoStream != null)
infoStream.print("\nChecking only these segments:");
for (String s : onlySegments) {
if (infoStream != null)
infoStream.print(" " + s);
}
result.segmentsChecked.addAll(onlySegments);
msg(":");
}
if (skip) {
msg("\nERROR: this index appears to be created by a newer version of Lucene than this tool was compiled on; please re-compile this tool on the matching version of Lucene; exiting");
result.toolOutOfDate = true;
return result;
}
result.newSegments = (SegmentInfos) sis.clone();
result.newSegments.clear();
for(int i=0;i<numSegments;i++) {
final SegmentInfo info = sis.info(i);
if (onlySegments != null && !onlySegments.contains(info.name))
continue;
Status.SegmentInfoStatus segInfoStat = new Status.SegmentInfoStatus();
result.segmentInfos.add(segInfoStat);
msg(" " + (1+i) + " of " + numSegments + ": name=" + info.name + " docCount=" + info.docCount);
segInfoStat.name = info.name;
segInfoStat.docCount = info.docCount;
int toLoseDocCount = info.docCount;
SegmentReader reader = null;
try {
msg(" compound=" + info.getUseCompoundFile());
segInfoStat.compound = info.getUseCompoundFile();
msg(" hasProx=" + info.getHasProx());
segInfoStat.hasProx = info.getHasProx();
msg(" numFiles=" + info.files().size());
segInfoStat.numFiles = info.files().size();
msg(" size (MB)=" + nf.format(info.sizeInBytes()/(1024.*1024.)));
segInfoStat.sizeMB = info.sizeInBytes()/(1024.*1024.);
Map<String,String> diagnostics = info.getDiagnostics();
segInfoStat.diagnostics = diagnostics;
if (diagnostics.size() > 0) {
msg(" diagnostics = " + diagnostics);
}
final int docStoreOffset = info.getDocStoreOffset();
if (docStoreOffset != -1) {
msg(" docStoreOffset=" + docStoreOffset);
segInfoStat.docStoreOffset = docStoreOffset;
msg(" docStoreSegment=" + info.getDocStoreSegment());
segInfoStat.docStoreSegment = info.getDocStoreSegment();
msg(" docStoreIsCompoundFile=" + info.getDocStoreIsCompoundFile());
segInfoStat.docStoreCompoundFile = info.getDocStoreIsCompoundFile();
}
final String delFileName = info.getDelFileName();
if (delFileName == null){
msg(" no deletions");
segInfoStat.hasDeletions = false;
}
else{
msg(" has deletions [delFileName=" + delFileName + "]");
segInfoStat.hasDeletions = true;
segInfoStat.deletionsFileName = delFileName;
}
if (infoStream != null)
infoStream.print(" test: open reader.........");
reader = SegmentReader.get(true, info, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
segInfoStat.openReaderPassed = true;
final int numDocs = reader.numDocs();
toLoseDocCount = numDocs;
if (reader.hasDeletions()) {
if (reader.deletedDocs.count() != info.getDelCount()) {
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs deletedDocs.count()=" + reader.deletedDocs.count());
}
if (reader.deletedDocs.count() > reader.maxDoc()) {
throw new RuntimeException("too many deleted docs: maxDoc()=" + reader.maxDoc() + " vs deletedDocs.count()=" + reader.deletedDocs.count());
}
if (info.docCount - numDocs != info.getDelCount()){
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.docCount - numDocs));
}
segInfoStat.numDeleted = info.docCount - numDocs;
msg("OK [" + (segInfoStat.numDeleted) + " deleted docs]");
} else {
if (info.getDelCount() != 0) {
throw new RuntimeException("delete count mismatch: info=" + info.getDelCount() + " vs reader=" + (info.docCount - numDocs));
}
msg("OK");
}
if (reader.maxDoc() != info.docCount)
throw new RuntimeException("SegmentReader.maxDoc() " + reader.maxDoc() + " != SegmentInfos.docCount " + info.docCount);
// Test getFieldNames()
if (infoStream != null) {
infoStream.print(" test: fields..............");
}
Collection<String> fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);
msg("OK [" + fieldNames.size() + " fields]");
segInfoStat.numFields = fieldNames.size();
// Test Field Norms
segInfoStat.fieldNormStatus = testFieldNorms(fieldNames, reader);
// Test the Term Index
segInfoStat.termIndexStatus = testTermIndex(info, reader);
// Test Stored Fields
segInfoStat.storedFieldStatus = testStoredFields(info, reader, nf);
// Test Term Vectors
segInfoStat.termVectorStatus = testTermVectors(info, reader, nf);
// Rethrow the first exception we encountered
// This will cause stats for failed segments to be incremented properly
if (segInfoStat.fieldNormStatus.error != null) {
throw new RuntimeException("Field Norm test failed");
} else if (segInfoStat.termIndexStatus.error != null) {
throw new RuntimeException("Term Index test failed");
} else if (segInfoStat.storedFieldStatus.error != null) {
throw new RuntimeException("Stored Field test failed");
} else if (segInfoStat.termVectorStatus.error != null) {
throw new RuntimeException("Term Vector test failed");
}
msg("");
} catch (Throwable t) {
msg("FAILED");
String comment;
comment = "fixIndex() would remove reference to this segment";
msg(" WARNING: " + comment + "; full exception:");
if (infoStream != null)
t.printStackTrace(infoStream);
msg("");
result.totLoseDocCount += toLoseDocCount;
result.numBadSegments++;
continue;
} finally {
if (reader != null)
reader.close();
}
// Keeper
result.newSegments.add((SegmentInfo) info.clone());
}
if (0 == result.numBadSegments) {
result.clean = true;
msg("No problems were detected with this index.\n");
} else
msg("WARNING: " + result.numBadSegments + " broken segments (containing " + result.totLoseDocCount + " documents) detected");
return result;
}
/**
* Test field norms.
*/
private Status.FieldNormStatus testFieldNorms(Collection<String> fieldNames, SegmentReader reader) {
final Status.FieldNormStatus status = new Status.FieldNormStatus();
try {
// Test Field Norms
if (infoStream != null) {
infoStream.print(" test: field norms.........");
}
final byte[] b = new byte[reader.maxDoc()];
for (final String fieldName : fieldNames) {
reader.norms(fieldName, b, 0);
++status.totFields;
}
msg("OK [" + status.totFields + " fields]");
} catch (Throwable e) {
msg("ERROR [" + String.valueOf(e.getMessage()) + "]");
status.error = e;
if (infoStream != null) {
e.printStackTrace(infoStream);
}
}
return status;
}
/**
* Test the term index.
*/
private Status.TermIndexStatus testTermIndex(SegmentInfo info, SegmentReader reader) {
final Status.TermIndexStatus status = new Status.TermIndexStatus();
try {
if (infoStream != null) {
infoStream.print(" test: terms, freq, prox...");
}
final TermEnum termEnum = reader.terms();
final TermPositions termPositions = reader.termPositions();
// Used only to count up # deleted docs for this term
final MySegmentTermDocs myTermDocs = new MySegmentTermDocs(reader);
final int maxDoc = reader.maxDoc();
while (termEnum.next()) {
status.termCount++;
final Term term = termEnum.term();
final int docFreq = termEnum.docFreq();
termPositions.seek(term);
int lastDoc = -1;
int freq0 = 0;
status.totFreq += docFreq;
while (termPositions.next()) {
freq0++;
final int doc = termPositions.doc();
final int freq = termPositions.freq();
if (doc <= lastDoc)
throw new RuntimeException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
if (doc >= maxDoc)
throw new RuntimeException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
lastDoc = doc;
if (freq <= 0)
throw new RuntimeException("term " + term + ": doc " + doc + ": freq " + freq + " is out of bounds");
int lastPos = -1;
status.totPos += freq;
for(int j=0;j<freq;j++) {
final int pos = termPositions.nextPosition();
if (pos < -1)
throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + " is out of bounds");
if (pos < lastPos)
throw new RuntimeException("term " + term + ": doc " + doc + ": pos " + pos + " < lastPos " + lastPos);
lastPos = pos;
}
}
// Now count how many deleted docs occurred in
// this term:
final int delCount;
if (reader.hasDeletions()) {
myTermDocs.seek(term);
while(myTermDocs.next()) { }
delCount = myTermDocs.delCount;
} else {
delCount = 0;
}
if (freq0 + delCount != docFreq) {
throw new RuntimeException("term " + term + " docFreq=" +
docFreq + " != num docs seen " + freq0 + " + num docs deleted " + delCount);
}
}
msg("OK [" + status.termCount + " terms; " + status.totFreq + " terms/docs pairs; " + status.totPos + " tokens]");
} catch (Throwable e) {
msg("ERROR [" + String.valueOf(e.getMessage()) + "]");
status.error = e;
if (infoStream != null) {
e.printStackTrace(infoStream);
}
}
return status;
}
/**
* Test stored fields for a segment.
*/
private Status.StoredFieldStatus testStoredFields(SegmentInfo info, SegmentReader reader, NumberFormat format) {
final Status.StoredFieldStatus status = new Status.StoredFieldStatus();
try {
if (infoStream != null) {
infoStream.print(" test: stored fields.......");
}
// Scan stored fields for all documents
for (int j = 0; j < info.docCount; ++j) {
if (!reader.isDeleted(j)) {
status.docCount++;
Document doc = reader.document(j);
status.totFields += doc.getFields().size();
}
}
// Validate docCount
if (status.docCount != reader.numDocs()) {
throw new RuntimeException("docCount=" + status.docCount + " but saw " + status.docCount + " undeleted docs");
}
msg("OK [" + status.totFields + " total field count; avg " +
format.format((((float) status.totFields)/status.docCount)) + " fields per doc]");
} catch (Throwable e) {
msg("ERROR [" + String.valueOf(e.getMessage()) + "]");
status.error = e;
if (infoStream != null) {
e.printStackTrace(infoStream);
}
}
return status;
}
/**
* Test term vectors for a segment.
*/
private Status.TermVectorStatus testTermVectors(SegmentInfo info, SegmentReader reader, NumberFormat format) {
final Status.TermVectorStatus status = new Status.TermVectorStatus();
try {
if (infoStream != null) {
infoStream.print(" test: term vectors........");
}
for (int j = 0; j < info.docCount; ++j) {
if (!reader.isDeleted(j)) {
status.docCount++;
TermFreqVector[] tfv = reader.getTermFreqVectors(j);
if (tfv != null) {
status.totVectors += tfv.length;
}
}
}
msg("OK [" + status.totVectors + " total vector count; avg " +
format.format((((float) status.totVectors) / status.docCount)) + " term/freq vector fields per doc]");
} catch (Throwable e) {
msg("ERROR [" + String.valueOf(e.getMessage()) + "]");
status.error = e;
if (infoStream != null) {
e.printStackTrace(infoStream);
}
}
return status;
}
/** Repairs the index using previously returned result
* from {@link #checkIndex}. Note that this does not
* remove any of the unreferenced files after it's done;
* you must separately open an {@link IndexWriter}, which
* deletes unreferenced files when it's created.
*
* <p><b>WARNING</b>: this writes a
* new segments file into the index, effectively removing
* all documents in broken segments from the index.
* BE CAREFUL.
*
* <p><b>WARNING</b>: Make sure you only call this when the
* index is not opened by any writer. */
public void fixIndex(Status result) throws IOException {
if (result.partial)
throw new IllegalArgumentException("can only fix an index that was fully checked (this status checked a subset of segments)");
result.newSegments.commit(result.dir);
}
private static boolean assertsOn;
private static boolean testAsserts() {
assertsOn = true;
return true;
}
private static boolean assertsOn() {
assert testAsserts();
return assertsOn;
}
/** Command-line interface to check and fix an index.
<p>
Run it like this:
<pre>
java -ea:org.apache.lucene... org.apache.lucene.index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]
</pre>
<ul>
<li><code>-fix</code>: actually write a new segments_N file, removing any problematic segments
<li><code>-segment X</code>: only check the specified
segment(s). This can be specified multiple times,
to check more than one segment, eg <code>-segment _2
-segment _a</code>. You can't use this with the -fix
option.
</ul>
<p><b>WARNING</b>: <code>-fix</code> should only be used on an emergency basis as it will cause
documents (perhaps many) to be permanently removed from the index. Always make
a backup copy of your index before running this! Do not run this tool on an index
that is actively being written to. You have been warned!
<p> Run without -fix, this tool will open the index, report version information
and report any exceptions it hits and what action it would take if -fix were
specified. With -fix, this tool will remove any segments that have issues and
write a new segments_N file. This means all documents contained in the affected
segments will be removed.
<p>
This tool exits with exit code 1 if the index cannot be opened or has any
corruption, else 0.
*/
public static void main(String[] args) throws IOException, InterruptedException {
boolean doFix = false;
List<String> onlySegments = new ArrayList<String>();
String indexPath = null;
int i = 0;
while(i < args.length) {
if (args[i].equals("-fix")) {
doFix = true;
i++;
} else if (args[i].equals("-segment")) {
if (i == args.length-1) {
System.out.println("ERROR: missing name for -segment option");
System.exit(1);
}
onlySegments.add(args[i+1]);
i += 2;
} else {
if (indexPath != null) {
System.out.println("ERROR: unexpected extra argument '" + args[i] + "'");
System.exit(1);
}
indexPath = args[i];
i++;
}
}
if (indexPath == null) {
System.out.println("\nERROR: index path not specified");
System.out.println("\nUsage: java org.apache.lucene.index.CheckIndex pathToIndex [-fix] [-segment X] [-segment Y]\n" +
"\n" +
" -fix: actually write a new segments_N file, removing any problematic segments\n" +
" -segment X: only check the specified segments. This can be specified multiple\n" +
" times, to check more than one segment, eg '-segment _2 -segment _a'.\n" +
" You can't use this with the -fix option\n" +
"\n" +
"**WARNING**: -fix should only be used on an emergency basis as it will cause\n" +
"documents (perhaps many) to be permanently removed from the index. Always make\n" +
"a backup copy of your index before running this! Do not run this tool on an index\n" +
"that is actively being written to. You have been warned!\n" +
"\n" +
"Run without -fix, this tool will open the index, report version information\n" +
"and report any exceptions it hits and what action it would take if -fix were\n" +
"specified. With -fix, this tool will remove any segments that have issues and\n" +
"write a new segments_N file. This means all documents contained in the affected\n" +
"segments will be removed.\n" +
"\n" +
"This tool exits with exit code 1 if the index cannot be opened or has any\n" +
"corruption, else 0.\n");
System.exit(1);
}
if (!assertsOn())
System.out.println("\nNOTE: testing will be more thorough if you run java with '-ea:org.apache.lucene...', so assertions are enabled");
if (onlySegments.size() == 0)
onlySegments = null;
else if (doFix) {
System.out.println("ERROR: cannot specify both -fix and -segment");
System.exit(1);
}
System.out.println("\nOpening index @ " + indexPath + "\n");
Directory dir = null;
try {
dir = FSDirectory.open(new File(indexPath));
} catch (Throwable t) {
System.out.println("ERROR: could not open directory \"" + indexPath + "\"; exiting");
t.printStackTrace(System.out);
System.exit(1);
}
CheckIndex checker = new CheckIndex(dir);
checker.setInfoStream(System.out);
Status result = checker.checkIndex(onlySegments);
if (result.missingSegments) {
System.exit(1);
}
if (!result.clean) {
if (!doFix) {
System.out.println("WARNING: would write new segments file, and " + result.totLoseDocCount + " documents would be lost, if -fix were specified\n");
} else {
System.out.println("WARNING: " + result.totLoseDocCount + " documents will be lost\n");
System.out.println("NOTE: will write new segments file in 5 seconds; this will remove " + result.totLoseDocCount + " docs from the index. THIS IS YOUR LAST CHANCE TO CTRL+C!");
for(int s=0;s<5;s++) {
Thread.sleep(1000);
System.out.println(" " + (5-s) + "...");
}
System.out.println("Writing...");
checker.fixIndex(result);
System.out.println("OK");
System.out.println("Wrote new segments file \"" + result.newSegments.getCurrentSegmentFileName() + "\"");
}
}
System.out.println("");
final int exitCode;
if (result != null && result.clean == true)
exitCode = 0;
else
exitCode = 1;
System.exit(exitCode);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/CheckIndex.java | Java | art | 32,556 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
final class CharBlockPool {
public char[][] buffers = new char[10][];
int numBuffer;
int bufferUpto = -1; // Which buffer we are upto
public int charUpto = DocumentsWriter.CHAR_BLOCK_SIZE; // Where we are in head buffer
public char[] buffer; // Current head buffer
public int charOffset = -DocumentsWriter.CHAR_BLOCK_SIZE; // Current head offset
final private DocumentsWriter docWriter;
public CharBlockPool(DocumentsWriter docWriter) {
this.docWriter = docWriter;
}
public void reset() {
docWriter.recycleCharBlocks(buffers, 1+bufferUpto);
bufferUpto = -1;
charUpto = DocumentsWriter.CHAR_BLOCK_SIZE;
charOffset = -DocumentsWriter.CHAR_BLOCK_SIZE;
}
public void nextBuffer() {
if (1+bufferUpto == buffers.length) {
char[][] newBuffers = new char[(int) (buffers.length*1.5)][];
System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
buffers = newBuffers;
}
buffer = buffers[1+bufferUpto] = docWriter.getCharBlock();
bufferUpto++;
charUpto = 0;
charOffset += DocumentsWriter.CHAR_BLOCK_SIZE;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/CharBlockPool.java | Java | art | 2,008 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
abstract class InvertedDocEndConsumerPerField {
abstract void finish();
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/InvertedDocEndConsumerPerField.java | Java | art | 939 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.*;
class SegmentTermVector implements TermFreqVector {
private String field;
private String terms[];
private int termFreqs[];
SegmentTermVector(String field, String terms[], int termFreqs[]) {
this.field = field;
this.terms = terms;
this.termFreqs = termFreqs;
}
/**
*
* @return The number of the field this vector is associated with
*/
public String getField() {
return field;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append('{');
sb.append(field).append(": ");
if(terms != null){
for (int i=0; i<terms.length; i++) {
if (i>0) sb.append(", ");
sb.append(terms[i]).append('/').append(termFreqs[i]);
}
}
sb.append('}');
return sb.toString();
}
public int size() {
return terms == null ? 0 : terms.length;
}
public String [] getTerms() {
return terms;
}
public int[] getTermFrequencies() {
return termFreqs;
}
public int indexOf(String termText) {
if(terms == null)
return -1;
int res = Arrays.binarySearch(terms, termText);
return res >= 0 ? res : -1;
}
public int[] indexesOf(String [] termNumbers, int start, int len) {
// TODO: there must be a more efficient way of doing this.
// At least, we could advance the lower bound of the terms array
// as we find valid indexes. Also, it might be possible to leverage
// this even more by starting in the middle of the termNumbers array
// and thus dividing the terms array maybe in half with each found index.
int res[] = new int[len];
for (int i=0; i < len; i++) {
res[i] = indexOf(termNumbers[start+ i]);
}
return res;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentTermVector.java | Java | art | 2,605 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.DirectoryReader.MultiTermDocs;
import org.apache.lucene.index.DirectoryReader.MultiTermEnum;
import org.apache.lucene.index.DirectoryReader.MultiTermPositions;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
/** An IndexReader which reads multiple indexes, appending
* their content. */
public class MultiReader extends IndexReader implements Cloneable {
protected IndexReader[] subReaders;
private int[] starts; // 1st docno for each segment
private boolean[] decrefOnClose; // remember which subreaders to decRef on close
private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* <p>Note that all subreaders are closed if this Multireader is closed.</p>
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader... subReaders) {
initialize(subReaders, true);
}
/**
* <p>Construct a MultiReader aggregating the named set of (sub)readers.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the subreaders. </p>
* @param closeSubReaders indicates whether the subreaders should be closed
* when this MultiReader is closed
* @param subReaders set of (sub)readers
* @throws IOException
*/
public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) {
initialize(subReaders, closeSubReaders);
}
private void initialize(IndexReader[] subReaders, boolean closeSubReaders) {
this.subReaders = subReaders.clone();
starts = new int[subReaders.length + 1]; // build starts array
decrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (!closeSubReaders) {
subReaders[i].incRef();
decrefOnClose[i] = true;
} else {
decrefOnClose[i] = false;
}
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
}
/**
* Tries to reopen the subreaders.
* <br>
* If one or more subreaders could be re-opened (i. e. subReader.reopen()
* returned a new instance != subReader), then a new MultiReader instance
* is returned, otherwise this instance is returned.
* <p>
* A re-opened instance might share one or more subreaders with the old
* instance. Index modification operations result in undefined behavior
* when performed before the old instance is closed.
* (see {@link IndexReader#reopen()}).
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
return doReopen(false);
}
/**
* Clones the subreaders.
* (see {@link IndexReader#clone()}).
* <br>
* <p>
* If subreaders are shared, then the reference count of those
* readers is increased to ensure that the subreaders remain open
* until the last referring reader is closed.
*/
@Override
public synchronized Object clone() {
try {
return doReopen(true);
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
/**
* If clone is true then we clone each of the subreaders
* @param doClone
* @return New IndexReader, or same one (this) if
* reopen/clone is not necessary
* @throws CorruptIndexException
* @throws IOException
*/
protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException {
ensureOpen();
boolean reopened = false;
IndexReader[] newSubReaders = new IndexReader[subReaders.length];
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++) {
if (doClone)
newSubReaders[i] = (IndexReader) subReaders[i].clone();
else
newSubReaders[i] = subReaders[i].reopen();
// if at least one of the subreaders was updated we remember that
// and return a new MultiReader
if (newSubReaders[i] != subReaders[i]) {
reopened = true;
}
}
success = true;
} finally {
if (!success && reopened) {
for (int i = 0; i < newSubReaders.length; i++) {
if (newSubReaders[i] != subReaders[i]) {
try {
newSubReaders[i].close();
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
if (reopened) {
boolean[] newDecrefOnClose = new boolean[subReaders.length];
for (int i = 0; i < subReaders.length; i++) {
if (newSubReaders[i] == subReaders[i]) {
newSubReaders[i].incRef();
newDecrefOnClose[i] = true;
}
}
MultiReader mr = new MultiReader(newSubReaders);
mr.decrefOnClose = newDecrefOnClose;
return mr;
} else {
return this;
}
}
@Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
@Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
@Override
public boolean isOptimized() {
return false;
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
// NOTE: multiple threads may wind up init'ing
// numDocs... but that's harmless
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length);
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
}
@Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
}
@Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
for (int i = 0; i < subReaders.length; i++) // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
} else if (bytes != null) { // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
} else {
for (int i = 0; i < subReaders.length; i++) { // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
}
}
}
@Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
@Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit(commitUserData);
}
@Override
protected synchronized void doClose() throws IOException {
for (int i = 0; i < subReaders.length; i++) {
if (decrefOnClose[i]) {
subReaders[i].decRef();
} else {
subReaders[i].close();
}
}
// NOTE: only needed in case someone had asked for
// FieldCache for top-level reader (which is generally
// not a good idea):
FieldCache.DEFAULT.purge(this);
}
@Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return DirectoryReader.getFieldNames(fieldNames, this.subReaders);
}
/**
* Checks recursively if all subreaders are up to date.
*/
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++) {
if (!subReaders[i].isCurrent()) {
return false;
}
}
// all subreaders are up to date
return true;
}
/** Not implemented.
* @throws UnsupportedOperationException
*/
@Override
public long getVersion() {
throw new UnsupportedOperationException("MultiReader does not support this method.");
}
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MultiReader.java | Java | art | 14,023 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import org.apache.lucene.util.AttributeSource;
/**
* This class tracks the number and position / offset parameters of terms
* being added to the index. The information collected in this class is
* also used to calculate the normalization factor for a field.
*
* <p><b>WARNING</b>: This API is new and experimental, and may suddenly
* change.</p>
*/
public final class FieldInvertState {
int position;
int length;
int numOverlap;
int offset;
float boost;
AttributeSource attributeSource;
public FieldInvertState() {
}
public FieldInvertState(int position, int length, int numOverlap, int offset, float boost) {
this.position = position;
this.length = length;
this.numOverlap = numOverlap;
this.offset = offset;
this.boost = boost;
}
/**
* Re-initialize the state, using this boost value.
* @param docBoost boost value to use.
*/
void reset(float docBoost) {
position = 0;
length = 0;
numOverlap = 0;
offset = 0;
boost = docBoost;
attributeSource = null;
}
/**
* Get the last processed term position.
* @return the position
*/
public int getPosition() {
return position;
}
/**
* Get total number of terms in this field.
* @return the length
*/
public int getLength() {
return length;
}
/**
* Get the number of terms with <code>positionIncrement == 0</code>.
* @return the numOverlap
*/
public int getNumOverlap() {
return numOverlap;
}
/**
* Get end offset of the last processed term.
* @return the offset
*/
public int getOffset() {
return offset;
}
/**
* Get boost value. This is the cumulative product of
* document boost and field boost for all field instances
* sharing the same field name.
* @return the boost
*/
public float getBoost() {
return boost;
}
public AttributeSource getAttributeSource() {
return attributeSource;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldInvertState.java | Java | art | 2,778 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
final class FieldInfo {
String name;
boolean isIndexed;
int number;
// true if term vector for this field should be stored
boolean storeTermVector;
boolean storeOffsetWithTermVector;
boolean storePositionWithTermVector;
boolean omitNorms; // omit norms associated with indexed fields
boolean omitTermFreqAndPositions;
boolean storePayloads; // whether this field stores payloads together with term positions
FieldInfo(String na, boolean tk, int nu, boolean storeTermVector,
boolean storePositionWithTermVector, boolean storeOffsetWithTermVector,
boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) {
name = na;
isIndexed = tk;
number = nu;
if (isIndexed) {
this.storeTermVector = storeTermVector;
this.storeOffsetWithTermVector = storeOffsetWithTermVector;
this.storePositionWithTermVector = storePositionWithTermVector;
this.storePayloads = storePayloads;
this.omitNorms = omitNorms;
this.omitTermFreqAndPositions = omitTermFreqAndPositions;
} else { // for non-indexed fields, leave defaults
this.storeTermVector = false;
this.storeOffsetWithTermVector = false;
this.storePositionWithTermVector = false;
this.storePayloads = false;
this.omitNorms = true;
this.omitTermFreqAndPositions = false;
}
}
@Override
public Object clone() {
return new FieldInfo(name, isIndexed, number, storeTermVector, storePositionWithTermVector,
storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
}
void update(boolean isIndexed, boolean storeTermVector, boolean storePositionWithTermVector,
boolean storeOffsetWithTermVector, boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) {
if (this.isIndexed != isIndexed) {
this.isIndexed = true; // once indexed, always index
}
if (isIndexed) { // if updated field data is not for indexing, leave the updates out
if (this.storeTermVector != storeTermVector) {
this.storeTermVector = true; // once vector, always vector
}
if (this.storePositionWithTermVector != storePositionWithTermVector) {
this.storePositionWithTermVector = true; // once vector, always vector
}
if (this.storeOffsetWithTermVector != storeOffsetWithTermVector) {
this.storeOffsetWithTermVector = true; // once vector, always vector
}
if (this.storePayloads != storePayloads) {
this.storePayloads = true;
}
if (this.omitNorms != omitNorms) {
this.omitNorms = false; // once norms are stored, always store
}
if (this.omitTermFreqAndPositions != omitTermFreqAndPositions) {
this.omitTermFreqAndPositions = true; // if one require omitTermFreqAndPositions at least once, it remains off for life
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldInfo.java | Java | art | 3,855 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.Directory;
final class FormatPostingsFieldsWriter extends FormatPostingsFieldsConsumer {
final Directory dir;
final String segment;
final TermInfosWriter termsOut;
final FieldInfos fieldInfos;
final FormatPostingsTermsWriter termsWriter;
final DefaultSkipListWriter skipListWriter;
final int totalNumDocs;
public FormatPostingsFieldsWriter(SegmentWriteState state, FieldInfos fieldInfos) throws IOException {
super();
dir = state.directory;
segment = state.segmentName;
totalNumDocs = state.numDocs;
this.fieldInfos = fieldInfos;
termsOut = new TermInfosWriter(dir,
segment,
fieldInfos,
state.termIndexInterval);
// TODO: this is a nasty abstraction violation (that we
// peek down to find freqOut/proxOut) -- we need a
// better abstraction here whereby these child consumers
// can provide skip data or not
skipListWriter = new DefaultSkipListWriter(termsOut.skipInterval,
termsOut.maxSkipLevels,
totalNumDocs,
null,
null);
state.flushedFiles.add(state.segmentFileName(IndexFileNames.TERMS_EXTENSION));
state.flushedFiles.add(state.segmentFileName(IndexFileNames.TERMS_INDEX_EXTENSION));
termsWriter = new FormatPostingsTermsWriter(state, this);
}
/** Add a new field */
@Override
FormatPostingsTermsConsumer addField(FieldInfo field) {
termsWriter.setField(field);
return termsWriter;
}
/** Called when we are done adding everything. */
@Override
void finish() throws IOException {
termsOut.close();
termsWriter.close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsFieldsWriter.java | Java | art | 2,734 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Collection;
import java.util.Map;
import java.io.IOException;
abstract class InvertedDocEndConsumer {
abstract InvertedDocEndConsumerPerThread addThread(DocInverterPerThread docInverterPerThread);
abstract void flush(Map<InvertedDocEndConsumerPerThread,Collection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException;
abstract void closeDocStore(SegmentWriteState state) throws IOException;
abstract void abort();
abstract void setFieldInfos(FieldInfos fieldInfos);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/InvertedDocEndConsumer.java | Java | art | 1,376 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
import java.util.LinkedList;
import java.util.HashSet;
import java.io.IOException;
/**
* Combines multiple files into a single compound file.
* The file format:<br>
* <ul>
* <li>VInt fileCount</li>
* <li>{Directory}
* fileCount entries with the following structure:</li>
* <ul>
* <li>long dataOffset</li>
* <li>String fileName</li>
* </ul>
* <li>{File Data}
* fileCount entries with the raw data of the corresponding file</li>
* </ul>
*
* The fileCount integer indicates how many files are contained in this compound
* file. The {directory} that follows has that many entries. Each directory entry
* contains a long pointer to the start of this file's data section, and a String
* with that file's name.
*/
final class CompoundFileWriter {
private static final class FileEntry {
/** source file */
String file;
/** temporary holder for the start of directory entry for this file */
long directoryOffset;
/** temporary holder for the start of this file's data section */
long dataOffset;
}
private Directory directory;
private String fileName;
private HashSet<String> ids;
private LinkedList<FileEntry> entries;
private boolean merged = false;
private SegmentMerger.CheckAbort checkAbort;
/** Create the compound stream in the specified file. The file name is the
* entire name (no extensions are added).
* @throws NullPointerException if <code>dir</code> or <code>name</code> is null
*/
public CompoundFileWriter(Directory dir, String name) {
this(dir, name, null);
}
CompoundFileWriter(Directory dir, String name, SegmentMerger.CheckAbort checkAbort) {
if (dir == null)
throw new NullPointerException("directory cannot be null");
if (name == null)
throw new NullPointerException("name cannot be null");
this.checkAbort = checkAbort;
directory = dir;
fileName = name;
ids = new HashSet<String>();
entries = new LinkedList<FileEntry>();
}
/** Returns the directory of the compound file. */
public Directory getDirectory() {
return directory;
}
/** Returns the name of the compound file. */
public String getName() {
return fileName;
}
/** Add a source stream. <code>file</code> is the string by which the
* sub-stream will be known in the compound stream.
*
* @throws IllegalStateException if this writer is closed
* @throws NullPointerException if <code>file</code> is null
* @throws IllegalArgumentException if a file with the same name
* has been added already
*/
public void addFile(String file) {
if (merged)
throw new IllegalStateException(
"Can't add extensions after merge has been called");
if (file == null)
throw new NullPointerException(
"file cannot be null");
if (! ids.add(file))
throw new IllegalArgumentException(
"File " + file + " already added");
FileEntry entry = new FileEntry();
entry.file = file;
entries.add(entry);
}
/** Merge files with the extensions added up to now.
* All files with these extensions are combined sequentially into the
* compound stream. After successful merge, the source files
* are deleted.
* @throws IllegalStateException if close() had been called before or
* if no file has been added to this object
*/
public void close() throws IOException {
if (merged)
throw new IllegalStateException(
"Merge already performed");
if (entries.isEmpty())
throw new IllegalStateException(
"No entries to merge have been defined");
merged = true;
// open the compound stream
IndexOutput os = null;
try {
os = directory.createOutput(fileName);
// Write the number of entries
os.writeVInt(entries.size());
// Write the directory with all offsets at 0.
// Remember the positions of directory entries so that we can
// adjust the offsets later
long totalSize = 0;
for (FileEntry fe : entries) {
fe.directoryOffset = os.getFilePointer();
os.writeLong(0); // for now
os.writeString(fe.file);
totalSize += directory.fileLength(fe.file);
}
// Pre-allocate size of file as optimization --
// this can potentially help IO performance as
// we write the file and also later during
// searching. It also uncovers a disk-full
// situation earlier and hopefully without
// actually filling disk to 100%:
final long finalLength = totalSize+os.getFilePointer();
os.setLength(finalLength);
// Open the files and copy their data into the stream.
// Remember the locations of each file's data section.
byte buffer[] = new byte[16384];
for (FileEntry fe : entries) {
fe.dataOffset = os.getFilePointer();
copyFile(fe, os, buffer);
}
// Write the data offsets into the directory of the compound stream
for (FileEntry fe : entries) {
os.seek(fe.directoryOffset);
os.writeLong(fe.dataOffset);
}
assert finalLength == os.length();
// Close the output stream. Set the os to null before trying to
// close so that if an exception occurs during the close, the
// finally clause below will not attempt to close the stream
// the second time.
IndexOutput tmp = os;
os = null;
tmp.close();
} finally {
if (os != null) try { os.close(); } catch (IOException e) { }
}
}
/** Copy the contents of the file with specified extension into the
* provided output stream. Use the provided buffer for moving data
* to reduce memory allocation.
*/
private void copyFile(FileEntry source, IndexOutput os, byte buffer[])
throws IOException
{
IndexInput is = null;
try {
long startPtr = os.getFilePointer();
is = directory.openInput(source.file);
long length = is.length();
long remainder = length;
int chunk = buffer.length;
while(remainder > 0) {
int len = (int) Math.min(chunk, remainder);
is.readBytes(buffer, 0, len, false);
os.writeBytes(buffer, len);
remainder -= len;
if (checkAbort != null)
// Roughly every 2 MB we will check if
// it's time to abort
checkAbort.work(80);
}
// Verify that remainder is 0
if (remainder != 0)
throw new IOException(
"Non-zero remainder length after copying: " + remainder
+ " (id: " + source.file + ", length: " + length
+ ", buffer size: " + chunk + ")");
// Verify that the output length diff is equal to original file
long endPtr = os.getFilePointer();
long diff = endPtr - startPtr;
if (diff != length)
throw new IOException(
"Difference in the output file offsets " + diff
+ " does not match the original file length " + length);
} finally {
if (is != null) is.close();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/CompoundFileWriter.java | Java | art | 8,839 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Collection;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.IOException;
import org.apache.lucene.store.Directory;
/** A {@link IndexDeletionPolicy} that wraps around any other
* {@link IndexDeletionPolicy} and adds the ability to hold and
* later release a single "snapshot" of an index. While
* the snapshot is held, the {@link IndexWriter} will not
* remove any files associated with it even if the index is
* otherwise being actively, arbitrarily changed. Because
* we wrap another arbitrary {@link IndexDeletionPolicy}, this
* gives you the freedom to continue using whatever {@link
* IndexDeletionPolicy} you would normally want to use with your
* index. Note that you can re-use a single instance of
* SnapshotDeletionPolicy across multiple writers as long
* as they are against the same index Directory. Any
* snapshot held when a writer is closed will "survive"
* when the next writer is opened.
*
* <p><b>WARNING</b>: This API is a new and experimental and
* may suddenly change.</p> */
public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
private IndexCommit lastCommit;
private IndexDeletionPolicy primary;
private String snapshot;
public SnapshotDeletionPolicy(IndexDeletionPolicy primary) {
this.primary = primary;
}
public synchronized void onInit(List<? extends IndexCommit> commits) throws IOException {
primary.onInit(wrapCommits(commits));
lastCommit = commits.get(commits.size()-1);
}
public synchronized void onCommit(List<? extends IndexCommit> commits) throws IOException {
primary.onCommit(wrapCommits(commits));
lastCommit = commits.get(commits.size()-1);
}
/** Take a snapshot of the most recent commit to the
* index. You must call release() to free this snapshot.
* Note that while the snapshot is held, the files it
* references will not be deleted, which will consume
* additional disk space in your index. If you take a
* snapshot at a particularly bad time (say just before
* you call optimize()) then in the worst case this could
* consume an extra 1X of your total index size, until
* you release the snapshot. */
public synchronized IndexCommit snapshot() {
if (lastCommit == null) {
throw new IllegalStateException("no index commits to snapshot !");
}
if (snapshot == null)
snapshot = lastCommit.getSegmentsFileName();
else
throw new IllegalStateException("snapshot is already set; please call release() first");
return lastCommit;
}
/** Release the currently held snapshot. */
public synchronized void release() {
if (snapshot != null)
snapshot = null;
else
throw new IllegalStateException("snapshot was not set; please call snapshot() first");
}
private class MyCommitPoint extends IndexCommit {
IndexCommit cp;
MyCommitPoint(IndexCommit cp) {
this.cp = cp;
}
@Override
public String getSegmentsFileName() {
return cp.getSegmentsFileName();
}
@Override
public Collection<String> getFileNames() throws IOException {
return cp.getFileNames();
}
@Override
public Directory getDirectory() {
return cp.getDirectory();
}
@Override
public void delete() {
synchronized(SnapshotDeletionPolicy.this) {
// Suppress the delete request if this commit point is
// our current snapshot.
if (snapshot == null || !snapshot.equals(getSegmentsFileName()))
cp.delete();
}
}
@Override
public boolean isDeleted() {
return cp.isDeleted();
}
@Override
public long getVersion() {
return cp.getVersion();
}
@Override
public long getGeneration() {
return cp.getGeneration();
}
@Override
public Map<String,String> getUserData() throws IOException {
return cp.getUserData();
}
@Override
public boolean isOptimized() {
return cp.isOptimized();
}
}
private List<IndexCommit> wrapCommits(List<? extends IndexCommit> commits) {
final int count = commits.size();
List<IndexCommit> myCommits = new ArrayList<IndexCommit>(count);
for(int i=0;i<count;i++)
myCommits.add(new MyCommitPoint(commits.get(i)));
return myCommits;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SnapshotDeletionPolicy.java | Java | art | 5,179 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.UnicodeUtil;
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
Directory. A TermInfos can be written once, in order. */
final class TermInfosWriter {
/** The file format version, a negative number. */
public static final int FORMAT = -3;
// Changed strings to true utf8 with length-in-bytes not
// length-in-chars
public static final int FORMAT_VERSION_UTF8_LENGTH_IN_BYTES = -4;
// NOTE: always change this if you switch to a new format!
public static final int FORMAT_CURRENT = FORMAT_VERSION_UTF8_LENGTH_IN_BYTES;
private FieldInfos fieldInfos;
private IndexOutput output;
private TermInfo lastTi = new TermInfo();
private long size;
// TODO: the default values for these two parameters should be settable from
// IndexWriter. However, once that's done, folks will start setting them to
// ridiculous values and complaining that things don't work well, as with
// mergeFactor. So, let's wait until a number of folks find that alternate
// values work better. Note that both of these values are stored in the
// segment, so that it's safe to change these w/o rebuilding all indexes.
/** Expert: The fraction of terms in the "dictionary" which should be stored
* in RAM. Smaller values use more memory, but make searching slightly
* faster, while larger values use less memory and make searching slightly
* slower. Searching is typically not dominated by dictionary lookup, so
* tweaking this is rarely useful.*/
int indexInterval = 128;
/** Expert: The fraction of {@link TermDocs} entries stored in skip tables,
* used to accelerate {@link TermDocs#skipTo(int)}. Larger values result in
* smaller indexes, greater acceleration, but fewer accelerable cases, while
* smaller values result in bigger indexes, less acceleration and more
* accelerable cases. More detailed experiments would be useful here. */
int skipInterval = 16;
/** Expert: The maximum number of skip levels. Smaller values result in
* slightly smaller indexes, but slower skipping in big posting lists.
*/
int maxSkipLevels = 10;
private long lastIndexPointer;
private boolean isIndex;
private byte[] lastTermBytes = new byte[10];
private int lastTermBytesLength = 0;
private int lastFieldNumber = -1;
private TermInfosWriter other;
private UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
TermInfosWriter(Directory directory, String segment, FieldInfos fis,
int interval)
throws IOException {
initialize(directory, segment, fis, interval, false);
other = new TermInfosWriter(directory, segment, fis, interval, true);
other.other = this;
}
private TermInfosWriter(Directory directory, String segment, FieldInfos fis,
int interval, boolean isIndex) throws IOException {
initialize(directory, segment, fis, interval, isIndex);
}
private void initialize(Directory directory, String segment, FieldInfos fis,
int interval, boolean isi) throws IOException {
indexInterval = interval;
fieldInfos = fis;
isIndex = isi;
output = directory.createOutput(segment + (isIndex ? ".tii" : ".tis"));
output.writeInt(FORMAT_CURRENT); // write format
output.writeLong(0); // leave space for size
output.writeInt(indexInterval); // write indexInterval
output.writeInt(skipInterval); // write skipInterval
output.writeInt(maxSkipLevels); // write maxSkipLevels
assert initUTF16Results();
}
void add(Term term, TermInfo ti) throws IOException {
UnicodeUtil.UTF16toUTF8(term.text, 0, term.text.length(), utf8Result);
add(fieldInfos.fieldNumber(term.field), utf8Result.result, utf8Result.length, ti);
}
// Currently used only by assert statements
UnicodeUtil.UTF16Result utf16Result1;
UnicodeUtil.UTF16Result utf16Result2;
// Currently used only by assert statements
private boolean initUTF16Results() {
utf16Result1 = new UnicodeUtil.UTF16Result();
utf16Result2 = new UnicodeUtil.UTF16Result();
return true;
}
// Currently used only by assert statement
private int compareToLastTerm(int fieldNumber, byte[] termBytes, int termBytesLength) {
if (lastFieldNumber != fieldNumber) {
final int cmp = fieldInfos.fieldName(lastFieldNumber).compareTo(fieldInfos.fieldName(fieldNumber));
// If there is a field named "" (empty string) then we
// will get 0 on this comparison, yet, it's "OK". But
// it's not OK if two different field numbers map to
// the same name.
if (cmp != 0 || lastFieldNumber != -1)
return cmp;
}
UnicodeUtil.UTF8toUTF16(lastTermBytes, 0, lastTermBytesLength, utf16Result1);
UnicodeUtil.UTF8toUTF16(termBytes, 0, termBytesLength, utf16Result2);
final int len;
if (utf16Result1.length < utf16Result2.length)
len = utf16Result1.length;
else
len = utf16Result2.length;
for(int i=0;i<len;i++) {
final char ch1 = utf16Result1.result[i];
final char ch2 = utf16Result2.result[i];
if (ch1 != ch2)
return ch1-ch2;
}
return utf16Result1.length - utf16Result2.length;
}
/** Adds a new <<fieldNumber, termBytes>, TermInfo> pair to the set.
Term must be lexicographically greater than all previous Terms added.
TermInfo pointers must be positive and greater than all previous.*/
void add(int fieldNumber, byte[] termBytes, int termBytesLength, TermInfo ti)
throws IOException {
assert compareToLastTerm(fieldNumber, termBytes, termBytesLength) < 0 ||
(isIndex && termBytesLength == 0 && lastTermBytesLength == 0) :
"Terms are out of order: field=" + fieldInfos.fieldName(fieldNumber) + " (number " + fieldNumber + ")" +
" lastField=" + fieldInfos.fieldName(lastFieldNumber) + " (number " + lastFieldNumber + ")" +
" text=" + new String(termBytes, 0, termBytesLength, "UTF-8") + " lastText=" + new String(lastTermBytes, 0, lastTermBytesLength, "UTF-8");
assert ti.freqPointer >= lastTi.freqPointer: "freqPointer out of order (" + ti.freqPointer + " < " + lastTi.freqPointer + ")";
assert ti.proxPointer >= lastTi.proxPointer: "proxPointer out of order (" + ti.proxPointer + " < " + lastTi.proxPointer + ")";
if (!isIndex && size % indexInterval == 0)
other.add(lastFieldNumber, lastTermBytes, lastTermBytesLength, lastTi); // add an index term
writeTerm(fieldNumber, termBytes, termBytesLength); // write term
output.writeVInt(ti.docFreq); // write doc freq
output.writeVLong(ti.freqPointer - lastTi.freqPointer); // write pointers
output.writeVLong(ti.proxPointer - lastTi.proxPointer);
if (ti.docFreq >= skipInterval) {
output.writeVInt(ti.skipOffset);
}
if (isIndex) {
output.writeVLong(other.output.getFilePointer() - lastIndexPointer);
lastIndexPointer = other.output.getFilePointer(); // write pointer
}
lastFieldNumber = fieldNumber;
lastTi.set(ti);
size++;
}
private void writeTerm(int fieldNumber, byte[] termBytes, int termBytesLength)
throws IOException {
// TODO: UTF16toUTF8 could tell us this prefix
// Compute prefix in common with last term:
int start = 0;
final int limit = termBytesLength < lastTermBytesLength ? termBytesLength : lastTermBytesLength;
while(start < limit) {
if (termBytes[start] != lastTermBytes[start])
break;
start++;
}
final int length = termBytesLength - start;
output.writeVInt(start); // write shared prefix length
output.writeVInt(length); // write delta length
output.writeBytes(termBytes, start, length); // write delta bytes
output.writeVInt(fieldNumber); // write field num
if (lastTermBytes.length < termBytesLength) {
byte[] newArray = new byte[(int) (termBytesLength*1.5)];
System.arraycopy(lastTermBytes, 0, newArray, 0, start);
lastTermBytes = newArray;
}
System.arraycopy(termBytes, start, lastTermBytes, start, length);
lastTermBytesLength = termBytesLength;
}
/** Called to complete TermInfos creation. */
void close() throws IOException {
output.seek(4); // write size after format
output.writeLong(size);
output.close();
if (!isIndex)
other.close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermInfosWriter.java | Java | art | 9,496 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.util.BitVector;
import org.apache.lucene.store.IndexInput;
class SegmentTermDocs implements TermDocs {
protected SegmentReader parent;
protected IndexInput freqStream;
protected int count;
protected int df;
protected BitVector deletedDocs;
int doc = 0;
int freq;
private int skipInterval;
private int maxSkipLevels;
private DefaultSkipListReader skipListReader;
private long freqBasePointer;
private long proxBasePointer;
private long skipPointer;
private boolean haveSkipped;
protected boolean currentFieldStoresPayloads;
protected boolean currentFieldOmitTermFreqAndPositions;
protected SegmentTermDocs(SegmentReader parent) {
this.parent = parent;
this.freqStream = (IndexInput) parent.core.freqStream.clone();
synchronized (parent) {
this.deletedDocs = parent.deletedDocs;
}
this.skipInterval = parent.core.getTermsReader().getSkipInterval();
this.maxSkipLevels = parent.core.getTermsReader().getMaxSkipLevels();
}
public void seek(Term term) throws IOException {
TermInfo ti = parent.core.getTermsReader().get(term);
seek(ti, term);
}
public void seek(TermEnum termEnum) throws IOException {
TermInfo ti;
Term term;
// use comparison of fieldinfos to verify that termEnum belongs to the same segment as this SegmentTermDocs
if (termEnum instanceof SegmentTermEnum && ((SegmentTermEnum) termEnum).fieldInfos == parent.core.fieldInfos) { // optimized case
SegmentTermEnum segmentTermEnum = ((SegmentTermEnum) termEnum);
term = segmentTermEnum.term();
ti = segmentTermEnum.termInfo();
} else { // punt case
term = termEnum.term();
ti = parent.core.getTermsReader().get(term);
}
seek(ti, term);
}
void seek(TermInfo ti, Term term) throws IOException {
count = 0;
FieldInfo fi = parent.core.fieldInfos.fieldInfo(term.field);
currentFieldOmitTermFreqAndPositions = (fi != null) ? fi.omitTermFreqAndPositions : false;
currentFieldStoresPayloads = (fi != null) ? fi.storePayloads : false;
if (ti == null) {
df = 0;
} else {
df = ti.docFreq;
doc = 0;
freqBasePointer = ti.freqPointer;
proxBasePointer = ti.proxPointer;
skipPointer = freqBasePointer + ti.skipOffset;
freqStream.seek(freqBasePointer);
haveSkipped = false;
}
}
public void close() throws IOException {
freqStream.close();
if (skipListReader != null)
skipListReader.close();
}
public final int doc() { return doc; }
public final int freq() { return freq; }
protected void skippingDoc() throws IOException {
}
public boolean next() throws IOException {
while (true) {
if (count == df)
return false;
final int docCode = freqStream.readVInt();
if (currentFieldOmitTermFreqAndPositions) {
doc += docCode;
freq = 1;
} else {
doc += docCode >>> 1; // shift off low bit
if ((docCode & 1) != 0) // if low bit is set
freq = 1; // freq is one
else
freq = freqStream.readVInt(); // else read freq
}
count++;
if (deletedDocs == null || !deletedDocs.get(doc))
break;
skippingDoc();
}
return true;
}
/** Optimized implementation. */
public int read(final int[] docs, final int[] freqs)
throws IOException {
final int length = docs.length;
if (currentFieldOmitTermFreqAndPositions) {
return readNoTf(docs, freqs, length);
} else {
int i = 0;
while (i < length && count < df) {
// manually inlined call to next() for speed
final int docCode = freqStream.readVInt();
doc += docCode >>> 1; // shift off low bit
if ((docCode & 1) != 0) // if low bit is set
freq = 1; // freq is one
else
freq = freqStream.readVInt(); // else read freq
count++;
if (deletedDocs == null || !deletedDocs.get(doc)) {
docs[i] = doc;
freqs[i] = freq;
++i;
}
}
return i;
}
}
private final int readNoTf(final int[] docs, final int[] freqs, final int length) throws IOException {
int i = 0;
while (i < length && count < df) {
// manually inlined call to next() for speed
doc += freqStream.readVInt();
count++;
if (deletedDocs == null || !deletedDocs.get(doc)) {
docs[i] = doc;
// Hardware freq to 1 when term freqs were not
// stored in the index
freqs[i] = 1;
++i;
}
}
return i;
}
/** Overridden by SegmentTermPositions to skip in prox stream. */
protected void skipProx(long proxPointer, int payloadLength) throws IOException {}
/** Optimized implementation. */
public boolean skipTo(int target) throws IOException {
if (df >= skipInterval) { // optimized case
if (skipListReader == null)
skipListReader = new DefaultSkipListReader((IndexInput) freqStream.clone(), maxSkipLevels, skipInterval); // lazily clone
if (!haveSkipped) { // lazily initialize skip stream
skipListReader.init(skipPointer, freqBasePointer, proxBasePointer, df, currentFieldStoresPayloads);
haveSkipped = true;
}
int newCount = skipListReader.skipTo(target);
if (newCount > count) {
freqStream.seek(skipListReader.getFreqPointer());
skipProx(skipListReader.getProxPointer(), skipListReader.getPayloadLength());
doc = skipListReader.getDoc();
count = newCount;
}
}
// done skipping, now just scan
do {
if (!next())
return false;
} while (target > doc);
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentTermDocs.java | Java | art | 6,755 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
final class DocFieldConsumersPerThread extends DocFieldConsumerPerThread {
final DocFieldConsumerPerThread one;
final DocFieldConsumerPerThread two;
final DocFieldConsumers parent;
final DocumentsWriter.DocState docState;
public DocFieldConsumersPerThread(DocFieldProcessorPerThread docFieldProcessorPerThread,
DocFieldConsumers parent, DocFieldConsumerPerThread one, DocFieldConsumerPerThread two) {
this.parent = parent;
this.one = one;
this.two = two;
docState = docFieldProcessorPerThread.docState;
}
@Override
public void startDocument() throws IOException {
one.startDocument();
two.startDocument();
}
@Override
public void abort() {
try {
one.abort();
} finally {
two.abort();
}
}
@Override
public DocumentsWriter.DocWriter finishDocument() throws IOException {
final DocumentsWriter.DocWriter oneDoc = one.finishDocument();
final DocumentsWriter.DocWriter twoDoc = two.finishDocument();
if (oneDoc == null)
return twoDoc;
else if (twoDoc == null)
return oneDoc;
else {
DocFieldConsumers.PerDoc both = parent.getPerDoc();
both.docID = docState.docID;
assert oneDoc.docID == docState.docID;
assert twoDoc.docID == docState.docID;
both.one = oneDoc;
both.two = twoDoc;
return both;
}
}
@Override
public DocFieldConsumerPerField addField(FieldInfo fi) {
return new DocFieldConsumersPerField(this, one.addField(fi), two.addField(fi));
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldConsumersPerThread.java | Java | art | 2,417 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
abstract class FormatPostingsPositionsConsumer {
/** Add a new position & payload. If payloadLength > 0
* you must read those bytes from the IndexInput. */
abstract void addPosition(int position, byte[] payload, int payloadOffset, int payloadLength) throws IOException;
/** Called when we are done adding positions & payloads */
abstract void finish() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsPositionsConsumer.java | Java | art | 1,257 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.HashMap;
/**
* This is a DocConsumer that gathers all fields under the
* same name, and calls per-field consumers to process field
* by field. This class doesn't doesn't do any "real" work
* of its own: it just forwards the fields to a
* DocFieldConsumer.
*/
final class DocFieldProcessor extends DocConsumer {
final DocumentsWriter docWriter;
final FieldInfos fieldInfos = new FieldInfos();
final DocFieldConsumer consumer;
final StoredFieldsWriter fieldsWriter;
public DocFieldProcessor(DocumentsWriter docWriter, DocFieldConsumer consumer) {
this.docWriter = docWriter;
this.consumer = consumer;
consumer.setFieldInfos(fieldInfos);
fieldsWriter = new StoredFieldsWriter(docWriter, fieldInfos);
}
@Override
public void closeDocStore(SegmentWriteState state) throws IOException {
consumer.closeDocStore(state);
fieldsWriter.closeDocStore(state);
}
@Override
public void flush(Collection<DocConsumerPerThread> threads, SegmentWriteState state) throws IOException {
Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>>();
for ( DocConsumerPerThread thread : threads) {
DocFieldProcessorPerThread perThread = (DocFieldProcessorPerThread) thread;
childThreadsAndFields.put(perThread.consumer, perThread.fields());
perThread.trimFields(state);
}
fieldsWriter.flush(state);
consumer.flush(childThreadsAndFields, state);
// Important to save after asking consumer to flush so
// consumer can alter the FieldInfo* if necessary. EG,
// FreqProxTermsWriter does this with
// FieldInfo.storePayload.
final String fileName = state.segmentFileName(IndexFileNames.FIELD_INFOS_EXTENSION);
fieldInfos.write(state.directory, fileName);
state.flushedFiles.add(fileName);
}
@Override
public void abort() {
fieldsWriter.abort();
consumer.abort();
}
@Override
public boolean freeRAM() {
return consumer.freeRAM();
}
@Override
public DocConsumerPerThread addThread(DocumentsWriterThreadState threadState) throws IOException {
return new DocFieldProcessorPerThread(threadState, this);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldProcessor.java | Java | art | 3,186 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** A {@link MergeScheduler} that simply does each merge
* sequentially, using the current thread. */
public class SerialMergeScheduler extends MergeScheduler {
/** Just do the merges in sequence. We do this
* "synchronized" so that even if the application is using
* multiple threads, only one merge may run at a time. */
@Override
synchronized public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
while(true) {
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null)
break;
writer.merge(merge);
}
}
@Override
public void close() {}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SerialMergeScheduler.java | Java | art | 1,508 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
/**
* <p>Expert: a MergePolicy determines the sequence of
* primitive merge operations to be used for overall merge
* and optimize operations.</p>
*
* <p>Whenever the segments in an index have been altered by
* {@link IndexWriter}, either the addition of a newly
* flushed segment, addition of many segments from
* addIndexes* calls, or a previous merge that may now need
* to cascade, {@link IndexWriter} invokes {@link
* #findMerges} to give the MergePolicy a chance to pick
* merges that are now required. This method returns a
* {@link MergeSpecification} instance describing the set of
* merges that should be done, or null if no merges are
* necessary. When IndexWriter.optimize is called, it calls
* {@link #findMergesForOptimize} and the MergePolicy should
* then return the necessary merges.</p>
*
* <p>Note that the policy can return more than one merge at
* a time. In this case, if the writer is using {@link
* SerialMergeScheduler}, the merges will be run
* sequentially but if it is using {@link
* ConcurrentMergeScheduler} they will be run concurrently.</p>
*
* <p>The default MergePolicy is {@link
* LogByteSizeMergePolicy}.</p>
*
* <p><b>NOTE:</b> This API is new and still experimental
* (subject to change suddenly in the next release)</p>
*
* <p><b>NOTE</b>: This class typically requires access to
* package-private APIs (e.g. <code>SegmentInfos</code>) to do its job;
* if you implement your own MergePolicy, you'll need to put
* it in package org.apache.lucene.index in order to use
* these APIs.
*/
public abstract class MergePolicy implements java.io.Closeable {
/** OneMerge provides the information necessary to perform
* an individual primitive merge operation, resulting in
* a single new segment. The merge spec includes the
* subset of segments to be merged as well as whether the
* new segment should use the compound file format. */
public static class OneMerge {
SegmentInfo info; // used by IndexWriter
boolean mergeDocStores; // used by IndexWriter
boolean optimize; // used by IndexWriter
boolean increfDone; // used by IndexWriter
boolean registerDone; // used by IndexWriter
long mergeGen; // used by IndexWriter
boolean isExternal; // used by IndexWriter
int maxNumSegmentsOptimize; // used by IndexWriter
SegmentReader[] readers; // used by IndexWriter
SegmentReader[] readersClone; // used by IndexWriter
List<String> mergeFiles; // used by IndexWriter
final SegmentInfos segments;
final boolean useCompoundFile;
boolean aborted;
Throwable error;
public OneMerge(SegmentInfos segments, boolean useCompoundFile) {
if (0 == segments.size())
throw new RuntimeException("segments must include at least one segment");
this.segments = segments;
this.useCompoundFile = useCompoundFile;
}
/** Record that an exception occurred while executing
* this merge */
synchronized void setException(Throwable error) {
this.error = error;
}
/** Retrieve previous exception set by {@link
* #setException}. */
synchronized Throwable getException() {
return error;
}
/** Mark this merge as aborted. If this is called
* before the merge is committed then the merge will
* not be committed. */
synchronized void abort() {
aborted = true;
}
/** Returns true if this merge was aborted. */
synchronized boolean isAborted() {
return aborted;
}
synchronized void checkAborted(Directory dir) throws MergeAbortedException {
if (aborted)
throw new MergeAbortedException("merge is aborted: " + segString(dir));
}
String segString(Directory dir) {
StringBuilder b = new StringBuilder();
final int numSegments = segments.size();
for(int i=0;i<numSegments;i++) {
if (i > 0) b.append(' ');
b.append(segments.info(i).segString(dir));
}
if (info != null)
b.append(" into ").append(info.name);
if (optimize)
b.append(" [optimize]");
if (mergeDocStores) {
b.append(" [mergeDocStores]");
}
return b.toString();
}
}
/**
* A MergeSpecification instance provides the information
* necessary to perform multiple merges. It simply
* contains a list of {@link OneMerge} instances.
*/
public static class MergeSpecification {
/**
* The subset of segments to be included in the primitive merge.
*/
public List<OneMerge> merges = new ArrayList<OneMerge>();
public void add(OneMerge merge) {
merges.add(merge);
}
public String segString(Directory dir) {
StringBuilder b = new StringBuilder();
b.append("MergeSpec:\n");
final int count = merges.size();
for(int i=0;i<count;i++)
b.append(" ").append(1 + i).append(": ").append(merges.get(i).segString(dir));
return b.toString();
}
}
/** Exception thrown if there are any problems while
* executing a merge. */
public static class MergeException extends RuntimeException {
private Directory dir;
public MergeException(String message, Directory dir) {
super(message);
this.dir = dir;
}
public MergeException(Throwable exc, Directory dir) {
super(exc);
this.dir = dir;
}
/** Returns the {@link Directory} of the index that hit
* the exception. */
public Directory getDirectory() {
return dir;
}
}
public static class MergeAbortedException extends IOException {
public MergeAbortedException() {
super("merge is aborted");
}
public MergeAbortedException(String message) {
super(message);
}
}
final protected IndexWriter writer;
public MergePolicy(IndexWriter writer) {
this.writer = writer;
}
/**
* Determine what set of merge operations are now necessary on the index.
* {@link IndexWriter} calls this whenever there is a change to the segments.
* This call is always synchronized on the {@link IndexWriter} instance so
* only one thread at a time will call this method.
*
* @param segmentInfos
* the total set of segments in the index
*/
public abstract MergeSpecification findMerges(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException;
/**
* Determine what set of merge operations is necessary in order to optimize
* the index. {@link IndexWriter} calls this when its
* {@link IndexWriter#optimize()} method is called. This call is always
* synchronized on the {@link IndexWriter} instance so only one thread at a
* time will call this method.
*
* @param segmentInfos
* the total set of segments in the index
* @param maxSegmentCount
* requested maximum number of segments in the index (currently this
* is always 1)
* @param segmentsToOptimize
* contains the specific SegmentInfo instances that must be merged
* away. This may be a subset of all SegmentInfos.
*/
public abstract MergeSpecification findMergesForOptimize(
SegmentInfos segmentInfos, int maxSegmentCount, Set<SegmentInfo> segmentsToOptimize)
throws CorruptIndexException, IOException;
/**
* Determine what set of merge operations is necessary in order to expunge all
* deletes from the index.
*
* @param segmentInfos
* the total set of segments in the index
*/
public abstract MergeSpecification findMergesToExpungeDeletes(
SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
/**
* Release all resources for the policy.
*/
public abstract void close();
/**
* Returns true if a newly flushed (not from merge)
* segment should use the compound file format.
*/
public abstract boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
/**
* Returns true if the doc store files should use the
* compound file format.
*/
public abstract boolean useCompoundDocStore(SegmentInfos segments);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MergePolicy.java | Java | art | 9,186 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import java.io.IOException;
import java.util.Map;
class ReadOnlyDirectoryReader extends DirectoryReader {
ReadOnlyDirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, int termInfosIndexDivisor) throws IOException {
super(directory, sis, deletionPolicy, true, termInfosIndexDivisor);
}
ReadOnlyDirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts, Map<String,byte[]> oldNormsCache, boolean doClone,
int termInfosIndexDivisor) throws IOException {
super(directory, infos, oldReaders, oldStarts, oldNormsCache, true, doClone, termInfosIndexDivisor);
}
ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor) throws IOException {
super(writer, infos, termInfosIndexDivisor);
}
@Override
protected void acquireWriteLock() {
ReadOnlySegmentReader.noWrite();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ReadOnlyDirectoryReader.java | Java | art | 1,827 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.util.PriorityQueue;
final class SegmentMergeQueue extends PriorityQueue<SegmentMergeInfo> {
SegmentMergeQueue(int size) {
initialize(size);
}
@Override
protected final boolean lessThan(SegmentMergeInfo stiA, SegmentMergeInfo stiB) {
int comparison = stiA.term.compareTo(stiB.term);
if (comparison == 0)
return stiA.base < stiB.base;
else
return comparison < 0;
}
final void close() throws IOException {
while (top() != null)
pop().close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentMergeQueue.java | Java | art | 1,392 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
/** Base class for enumerating all but deleted docs.
*
* <p>NOTE: this class is meant only to be used internally
* by Lucene; it's only public so it can be shared across
* packages. This means the API is freely subject to
* change, and, the class could be removed entirely, in any
* Lucene release. Use directly at your own risk! */
public abstract class AbstractAllTermDocs implements TermDocs {
protected int maxDoc;
protected int doc = -1;
protected AbstractAllTermDocs(int maxDoc) {
this.maxDoc = maxDoc;
}
public void seek(Term term) throws IOException {
if (term==null) {
doc = -1;
} else {
throw new UnsupportedOperationException();
}
}
public void seek(TermEnum termEnum) throws IOException {
throw new UnsupportedOperationException();
}
public int doc() {
return doc;
}
public int freq() {
return 1;
}
public boolean next() throws IOException {
return skipTo(doc+1);
}
public int read(int[] docs, int[] freqs) throws IOException {
final int length = docs.length;
int i = 0;
while (i < length && doc < maxDoc) {
if (!isDeleted(doc)) {
docs[i] = doc;
freqs[i] = 1;
++i;
}
doc++;
}
return i;
}
public boolean skipTo(int target) throws IOException {
doc = target;
while (doc < maxDoc) {
if (!isDeleted(doc)) {
return true;
}
doc++;
}
return false;
}
public void close() throws IOException {
}
public abstract boolean isDeleted(int doc);
} | zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/AbstractAllTermDocs.java | Java | art | 2,420 |
package org.apache.lucene.index;
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.*;
/**
* Store a sorted collection of {@link org.apache.lucene.index.TermVectorEntry}s. Collects all term information
* into a single, SortedSet.
* <br/>
* NOTE: This Mapper ignores all Field information for the Document. This means that if you are using offset/positions you will not
* know what Fields they correlate with.
* <br/>
* This is not thread-safe
*/
public class SortedTermVectorMapper extends TermVectorMapper{
private SortedSet<TermVectorEntry> currentSet;
private Map<String,TermVectorEntry> termToTVE = new HashMap<String,TermVectorEntry>();
private boolean storeOffsets;
private boolean storePositions;
/**
* Stand-in name for the field in {@link TermVectorEntry}.
*/
public static final String ALL = "_ALL_";
/**
*
* @param comparator A Comparator for sorting {@link TermVectorEntry}s
*/
public SortedTermVectorMapper(Comparator<TermVectorEntry> comparator) {
this(false, false, comparator);
}
public SortedTermVectorMapper(boolean ignoringPositions, boolean ignoringOffsets, Comparator<TermVectorEntry> comparator) {
super(ignoringPositions, ignoringOffsets);
currentSet = new TreeSet<TermVectorEntry>(comparator);
}
/**
*
* @param term The term to map
* @param frequency The frequency of the term
* @param offsets Offset information, may be null
* @param positions Position information, may be null
*/
//We need to combine any previous mentions of the term
@Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
TermVectorEntry entry = termToTVE.get(term);
if (entry == null) {
entry = new TermVectorEntry(ALL, term, frequency,
storeOffsets == true ? offsets : null,
storePositions == true ? positions : null);
termToTVE.put(term, entry);
currentSet.add(entry);
} else {
entry.setFrequency(entry.getFrequency() + frequency);
if (storeOffsets)
{
TermVectorOffsetInfo [] existingOffsets = entry.getOffsets();
//A few diff. cases here: offsets is null, existing offsets is null, both are null, same for positions
if (existingOffsets != null && offsets != null && offsets.length > 0)
{
//copy over the existing offsets
TermVectorOffsetInfo [] newOffsets = new TermVectorOffsetInfo[existingOffsets.length + offsets.length];
System.arraycopy(existingOffsets, 0, newOffsets, 0, existingOffsets.length);
System.arraycopy(offsets, 0, newOffsets, existingOffsets.length, offsets.length);
entry.setOffsets(newOffsets);
}
else if (existingOffsets == null && offsets != null && offsets.length > 0)
{
entry.setOffsets(offsets);
}
//else leave it alone
}
if (storePositions)
{
int [] existingPositions = entry.getPositions();
if (existingPositions != null && positions != null && positions.length > 0)
{
int [] newPositions = new int[existingPositions.length + positions.length];
System.arraycopy(existingPositions, 0, newPositions, 0, existingPositions.length);
System.arraycopy(positions, 0, newPositions, existingPositions.length, positions.length);
entry.setPositions(newPositions);
}
else if (existingPositions == null && positions != null && positions.length > 0)
{
entry.setPositions(positions);
}
}
}
}
@Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
this.storeOffsets = storeOffsets;
this.storePositions = storePositions;
}
/**
* The TermVectorEntrySet. A SortedSet of {@link TermVectorEntry} objects. Sort is by the comparator passed into the constructor.
*<br/>
* This set will be empty until after the mapping process takes place.
*
* @return The SortedSet of {@link TermVectorEntry}.
*/
public SortedSet<TermVectorEntry> getTermVectorEntrySet()
{
return currentSet;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SortedTermVectorMapper.java | Java | art | 4,764 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Serializable;
/**
* Provides a {@link FieldComparator} for custom field sorting.
*
* <b>NOTE:</b> This API is experimental and might change in
* incompatible ways in the next release.
*
*/
public abstract class FieldComparatorSource implements Serializable {
/**
* Creates a comparator for the field in the given index.
*
* @param fieldname
* Name of the field to create comparator for.
* @return FieldComparator.
* @throws IOException
* If an error occurs reading the index.
*/
public abstract FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed)
throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FieldComparatorSource.java | Java | art | 1,553 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/** The abstract base class for queries.
<p>Instantiable subclasses are:
<ul>
<li> {@link TermQuery}
<li> {@link MultiTermQuery}
<li> {@link BooleanQuery}
<li> {@link WildcardQuery}
<li> {@link PhraseQuery}
<li> {@link PrefixQuery}
<li> {@link MultiPhraseQuery}
<li> {@link FuzzyQuery}
<li> {@link TermRangeQuery}
<li> {@link NumericRangeQuery}
<li> {@link org.apache.lucene.search.spans.SpanQuery}
</ul>
<p>A parser for queries is contained in:
<ul>
<li>{@link org.apache.lucene.queryParser.QueryParser QueryParser}
</ul>
*/
public abstract class Query implements java.io.Serializable, Cloneable {
private float boost = 1.0f; // query boost factor
/** Sets the boost for this query clause to <code>b</code>. Documents
* matching this clause will (in addition to the normal weightings) have
* their score multiplied by <code>b</code>.
*/
public void setBoost(float b) { boost = b; }
/** Gets the boost for this clause. Documents matching
* this clause will (in addition to the normal weightings) have their score
* multiplied by <code>b</code>. The boost is 1.0 by default.
*/
public float getBoost() { return boost; }
/** Prints a query to a string, with <code>field</code> assumed to be the
* default field and omitted.
* <p>The representation used is one that is supposed to be readable
* by {@link org.apache.lucene.queryParser.QueryParser QueryParser}. However,
* there are the following limitations:
* <ul>
* <li>If the query was created by the parser, the printed
* representation may not be exactly what was parsed. For example,
* characters that need to be escaped will be represented without
* the required backslash.</li>
* <li>Some of the more complicated queries (e.g. span queries)
* don't have a representation that can be parsed by QueryParser.</li>
* </ul>
*/
public abstract String toString(String field);
/** Prints a query to a string. */
@Override
public String toString() {
return toString("");
}
/**
* Expert: Constructs an appropriate Weight implementation for this query.
*
* <p>
* Only implemented by primitive queries, which re-write to themselves.
*/
public Weight createWeight(Searcher searcher) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Expert: Constructs and initializes a Weight for a top-level query.
*/
public Weight weight(Searcher searcher) throws IOException {
Query query = searcher.rewrite(this);
Weight weight = query.createWeight(searcher);
float sum = weight.sumOfSquaredWeights();
float norm = getSimilarity(searcher).queryNorm(sum);
if (Float.isInfinite(norm) || Float.isNaN(norm))
norm = 1.0f;
weight.normalize(norm);
return weight;
}
/** Expert: called to re-write queries into primitive queries. For example,
* a PrefixQuery will be rewritten into a BooleanQuery that consists
* of TermQuerys.
*/
public Query rewrite(IndexReader reader) throws IOException {
return this;
}
/** Expert: called when re-writing queries under MultiSearcher.
*
* Create a single query suitable for use by all subsearchers (in 1-1
* correspondence with queries). This is an optimization of the OR of
* all queries. We handle the common optimization cases of equal
* queries and overlapping clauses of boolean OR queries (as generated
* by MultiTermQuery.rewrite()).
* Be careful overriding this method as queries[0] determines which
* method will be called and is not necessarily of the same type as
* the other queries.
*/
public Query combine(Query[] queries) {
HashSet<Query> uniques = new HashSet<Query>();
for (int i = 0; i < queries.length; i++) {
Query query = queries[i];
BooleanClause[] clauses = null;
// check if we can split the query into clauses
boolean splittable = (query instanceof BooleanQuery);
if(splittable){
BooleanQuery bq = (BooleanQuery) query;
splittable = bq.isCoordDisabled();
clauses = bq.getClauses();
for (int j = 0; splittable && j < clauses.length; j++) {
splittable = (clauses[j].getOccur() == BooleanClause.Occur.SHOULD);
}
}
if(splittable){
for (int j = 0; j < clauses.length; j++) {
uniques.add(clauses[j].getQuery());
}
} else {
uniques.add(query);
}
}
// optimization: if we have just one query, just return it
if(uniques.size() == 1){
return uniques.iterator().next();
}
BooleanQuery result = new BooleanQuery(true);
for (final Query query : uniques)
result.add(query, BooleanClause.Occur.SHOULD);
return result;
}
/**
* Expert: adds all terms occurring in this query to the terms set. Only
* works if this query is in its {@link #rewrite rewritten} form.
*
* @throws UnsupportedOperationException if this query is not yet rewritten
*/
public void extractTerms(Set<Term> terms) {
// needs to be implemented by query subclasses
throw new UnsupportedOperationException();
}
/** Expert: merges the clauses of a set of BooleanQuery's into a single
* BooleanQuery.
*
*<p>A utility for use by {@link #combine(Query[])} implementations.
*/
public static Query mergeBooleanQueries(BooleanQuery... queries) {
HashSet<BooleanClause> allClauses = new HashSet<BooleanClause>();
for (BooleanQuery booleanQuery : queries) {
for (BooleanClause clause : booleanQuery) {
allClauses.add(clause);
}
}
boolean coordDisabled =
queries.length==0? false : queries[0].isCoordDisabled();
BooleanQuery result = new BooleanQuery(coordDisabled);
for(BooleanClause clause2 : allClauses) {
result.add(clause2);
}
return result;
}
/** Expert: Returns the Similarity implementation to be used for this query.
* Subclasses may override this method to specify their own Similarity
* implementation, perhaps one that delegates through that of the Searcher.
* By default the Searcher's Similarity implementation is returned.*/
public Similarity getSimilarity(Searcher searcher) {
return searcher.getSimilarity();
}
/** Returns a clone of this query. */
@Override
public Object clone() {
try {
return super.clone();
} catch (CloneNotSupportedException e) {
throw new RuntimeException("Clone not supported: " + e.getMessage());
}
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Float.floatToIntBits(boost);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Query other = (Query) obj;
if (Float.floatToIntBits(boost) != Float.floatToIntBits(other.boost))
return false;
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/Query.java | Java | art | 8,085 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.PriorityQueue;
final class PhraseQueue extends PriorityQueue<PhrasePositions> {
PhraseQueue(int size) {
initialize(size);
}
@Override
protected final boolean lessThan(PhrasePositions pp1, PhrasePositions pp2) {
if (pp1.doc == pp2.doc)
if (pp1.position == pp2.position)
// same doc and pp.position, so decide by actual term positions.
// rely on: pp.position == tp.position - offset.
return pp1.offset < pp2.offset;
else
return pp1.position < pp2.position;
else
return pp1.doc < pp2.doc;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/PhraseQueue.java | Java | art | 1,437 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** A Scorer for queries with a required subscorer
* and an excluding (prohibited) sub DocIdSetIterator.
* <br>
* This <code>Scorer</code> implements {@link Scorer#skipTo(int)},
* and it uses the skipTo() on the given scorers.
*/
class ReqExclScorer extends Scorer {
private Scorer reqScorer;
private DocIdSetIterator exclDisi;
private int doc = -1;
/** Construct a <code>ReqExclScorer</code>.
* @param reqScorer The scorer that must match, except where
* @param exclDisi indicates exclusion.
*/
public ReqExclScorer(Scorer reqScorer, DocIdSetIterator exclDisi) {
super(null); // No similarity used.
this.reqScorer = reqScorer;
this.exclDisi = exclDisi;
}
@Override
public int nextDoc() throws IOException {
if (reqScorer == null) {
return doc;
}
doc = reqScorer.nextDoc();
if (doc == NO_MORE_DOCS) {
reqScorer = null; // exhausted, nothing left
return doc;
}
if (exclDisi == null) {
return doc;
}
return doc = toNonExcluded();
}
/** Advance to non excluded doc.
* <br>On entry:
* <ul>
* <li>reqScorer != null,
* <li>exclScorer != null,
* <li>reqScorer was advanced once via next() or skipTo()
* and reqScorer.doc() may still be excluded.
* </ul>
* Advances reqScorer a non excluded required doc, if any.
* @return true iff there is a non excluded required doc.
*/
private int toNonExcluded() throws IOException {
int exclDoc = exclDisi.docID();
int reqDoc = reqScorer.docID(); // may be excluded
do {
if (reqDoc < exclDoc) {
return reqDoc; // reqScorer advanced to before exclScorer, ie. not excluded
} else if (reqDoc > exclDoc) {
exclDoc = exclDisi.advance(reqDoc);
if (exclDoc == NO_MORE_DOCS) {
exclDisi = null; // exhausted, no more exclusions
return reqDoc;
}
if (exclDoc > reqDoc) {
return reqDoc; // not excluded
}
}
} while ((reqDoc = reqScorer.nextDoc()) != NO_MORE_DOCS);
reqScorer = null; // exhausted, nothing left
return NO_MORE_DOCS;
}
@Override
public int docID() {
return doc;
}
/** Returns the score of the current document matching the query.
* Initially invalid, until {@link #next()} is called the first time.
* @return The score of the required scorer.
*/
@Override
public float score() throws IOException {
return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
}
@Override
public int advance(int target) throws IOException {
if (reqScorer == null) {
return doc = NO_MORE_DOCS;
}
if (exclDisi == null) {
return doc = reqScorer.advance(target);
}
if (reqScorer.advance(target) == NO_MORE_DOCS) {
reqScorer = null;
return doc = NO_MORE_DOCS;
}
return doc = toNonExcluded();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/ReqExclScorer.java | Java | art | 3,772 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.PriorityQueue;
final class HitQueue extends PriorityQueue<ScoreDoc> {
private boolean prePopulate;
/**
* Creates a new instance with <code>size</code> elements. If
* <code>prePopulate</code> is set to true, the queue will pre-populate itself
* with sentinel objects and set its {@link #size()} to <code>size</code>. In
* that case, you should not rely on {@link #size()} to get the number of
* actual elements that were added to the queue, but keep track yourself.<br>
* <b>NOTE:</b> in case <code>prePopulate</code> is true, you should pop
* elements from the queue using the following code example:
*
* <pre>
* PriorityQueue pq = new HitQueue(10, true); // pre-populate.
* ScoreDoc top = pq.top();
*
* // Add/Update one element.
* top.score = 1.0f;
* top.doc = 0;
* top = (ScoreDoc) pq.updateTop();
* int totalHits = 1;
*
* // Now pop only the elements that were *truly* inserted.
* // First, pop all the sentinel elements (there are pq.size() - totalHits).
* for (int i = pq.size() - totalHits; i > 0; i--) pq.pop();
*
* // Now pop the truly added elements.
* ScoreDoc[] results = new ScoreDoc[totalHits];
* for (int i = totalHits - 1; i >= 0; i--) {
* results[i] = (ScoreDoc) pq.pop();
* }
* </pre>
*
* <p><b>NOTE</b>: This class pre-allocate a full array of
* length <code>size</code>.
*
* @param size
* the requested size of this queue.
* @param prePopulate
* specifies whether to pre-populate the queue with sentinel values.
* @see #getSentinelObject()
*/
HitQueue(int size, boolean prePopulate) {
this.prePopulate = prePopulate;
initialize(size);
}
// Returns null if prePopulate is false.
@Override
protected ScoreDoc getSentinelObject() {
// Always set the doc Id to MAX_VALUE so that it won't be favored by
// lessThan. This generally should not happen since if score is not NEG_INF,
// TopScoreDocCollector will always add the object to the queue.
return !prePopulate ? null : new ScoreDoc(Integer.MAX_VALUE, Float.NEGATIVE_INFINITY);
}
@Override
protected final boolean lessThan(ScoreDoc hitA, ScoreDoc hitB) {
if (hitA.score == hitB.score)
return hitA.doc > hitB.doc;
else
return hitA.score < hitB.score;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/HitQueue.java | Java | art | 3,212 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.WeakHashMap;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.FieldCacheSanityChecker;
/**
* Expert: The default cache implementation, storing all values in memory.
* A WeakHashMap is used for storage.
*
* <p>Created: May 19, 2004 4:40:36 PM
*
* @since lucene 1.4
*/
class FieldCacheImpl implements FieldCache {
private Map<Class<?>,Cache> caches;
FieldCacheImpl() {
init();
}
private synchronized void init() {
caches = new HashMap<Class<?>,Cache>(7);
caches.put(Byte.TYPE, new ByteCache(this));
caches.put(Short.TYPE, new ShortCache(this));
caches.put(Integer.TYPE, new IntCache(this));
caches.put(Float.TYPE, new FloatCache(this));
caches.put(Long.TYPE, new LongCache(this));
caches.put(Double.TYPE, new DoubleCache(this));
caches.put(String.class, new StringCache(this));
caches.put(StringIndex.class, new StringIndexCache(this));
}
public void purgeAllCaches() {
init();
}
public void purge(IndexReader r) {
for(Cache c : caches.values()) {
c.purge(r);
}
}
public CacheEntry[] getCacheEntries() {
List<CacheEntry> result = new ArrayList<CacheEntry>(17);
for(final Class<?> cacheType: caches.keySet()) {
Cache cache = caches.get(cacheType);
for (final Object readerKey : cache.readerCache.keySet()) {
// we've now materialized a hard ref
// innerKeys was backed by WeakHashMap, sanity check
// that it wasn't GCed before we made hard ref
if (null != readerKey && cache.readerCache.containsKey(readerKey)) {
Map<Entry, Object> innerCache = cache.readerCache.get(readerKey);
for (final Map.Entry<Entry, Object> mapEntry : innerCache.entrySet()) {
Entry entry = mapEntry.getKey();
result.add(new CacheEntryImpl(readerKey, entry.field,
cacheType, entry.custom,
mapEntry.getValue()));
}
}
}
}
return result.toArray(new CacheEntry[result.size()]);
}
private static final class CacheEntryImpl extends CacheEntry {
private final Object readerKey;
private final String fieldName;
private final Class<?> cacheType;
private final Object custom;
private final Object value;
CacheEntryImpl(Object readerKey, String fieldName,
Class<?> cacheType,
Object custom,
Object value) {
this.readerKey = readerKey;
this.fieldName = fieldName;
this.cacheType = cacheType;
this.custom = custom;
this.value = value;
// :HACK: for testing.
// if (null != locale || SortField.CUSTOM != sortFieldType) {
// throw new RuntimeException("Locale/sortFieldType: " + this);
// }
}
@Override
public Object getReaderKey() { return readerKey; }
@Override
public String getFieldName() { return fieldName; }
@Override
public Class<?> getCacheType() { return cacheType; }
@Override
public Object getCustom() { return custom; }
@Override
public Object getValue() { return value; }
}
/**
* Hack: When thrown from a Parser (NUMERIC_UTILS_* ones), this stops
* processing terms and returns the current FieldCache
* array.
*/
static final class StopFillCacheException extends RuntimeException {
}
/** Expert: Internal cache. */
abstract static class Cache {
Cache() {
this.wrapper = null;
}
Cache(FieldCache wrapper) {
this.wrapper = wrapper;
}
final FieldCache wrapper;
final Map<Object,Map<Entry,Object>> readerCache = new WeakHashMap<Object,Map<Entry,Object>>();
protected abstract Object createValue(IndexReader reader, Entry key)
throws IOException;
/** Remove this reader from the cache, if present. */
public void purge(IndexReader r) {
Object readerKey = r.getFieldCacheKey();
synchronized(readerCache) {
readerCache.remove(readerKey);
}
}
public Object get(IndexReader reader, Entry key) throws IOException {
Map<Entry,Object> innerCache;
Object value;
final Object readerKey = reader.getFieldCacheKey();
synchronized (readerCache) {
innerCache = readerCache.get(readerKey);
if (innerCache == null) {
innerCache = new HashMap<Entry,Object>();
readerCache.put(readerKey, innerCache);
value = null;
} else {
value = innerCache.get(key);
}
if (value == null) {
value = new CreationPlaceholder();
innerCache.put(key, value);
}
}
if (value instanceof CreationPlaceholder) {
synchronized (value) {
CreationPlaceholder progress = (CreationPlaceholder) value;
if (progress.value == null) {
progress.value = createValue(reader, key);
synchronized (readerCache) {
innerCache.put(key, progress.value);
}
// Only check if key.custom (the parser) is
// non-null; else, we check twice for a single
// call to FieldCache.getXXX
if (key.custom != null && wrapper != null) {
final PrintStream infoStream = wrapper.getInfoStream();
if (infoStream != null) {
printNewInsanity(infoStream, progress.value);
}
}
}
return progress.value;
}
}
return value;
}
private void printNewInsanity(PrintStream infoStream, Object value) {
final FieldCacheSanityChecker.Insanity[] insanities = FieldCacheSanityChecker.checkSanity(wrapper);
for(int i=0;i<insanities.length;i++) {
final FieldCacheSanityChecker.Insanity insanity = insanities[i];
final CacheEntry[] entries = insanity.getCacheEntries();
for(int j=0;j<entries.length;j++) {
if (entries[j].getValue() == value) {
// OK this insanity involves our entry
infoStream.println("WARNING: new FieldCache insanity created\nDetails: " + insanity.toString());
infoStream.println("\nStack:\n");
new Throwable().printStackTrace(infoStream);
break;
}
}
}
}
}
/** Expert: Every composite-key in the internal cache is of this type. */
static class Entry {
final String field; // which Fieldable
final Object custom; // which custom comparator or parser
/** Creates one of these objects for a custom comparator/parser. */
Entry (String field, Object custom) {
this.field = StringHelper.intern(field);
this.custom = custom;
}
/** Two of these are equal iff they reference the same field and type. */
@Override
public boolean equals (Object o) {
if (o instanceof Entry) {
Entry other = (Entry) o;
if (other.field == field) {
if (other.custom == null) {
if (custom == null) return true;
} else if (other.custom.equals (custom)) {
return true;
}
}
}
return false;
}
/** Composes a hashcode based on the field and type. */
@Override
public int hashCode() {
return field.hashCode() ^ (custom==null ? 0 : custom.hashCode());
}
}
// inherit javadocs
public byte[] getBytes (IndexReader reader, String field) throws IOException {
return getBytes(reader, field, null);
}
// inherit javadocs
public byte[] getBytes(IndexReader reader, String field, ByteParser parser)
throws IOException {
return (byte[]) caches.get(Byte.TYPE).get(reader, new Entry(field, parser));
}
static final class ByteCache extends Cache {
ByteCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
String field = entry.field;
ByteParser parser = (ByteParser) entry.custom;
if (parser == null) {
return wrapper.getBytes(reader, field, FieldCache.DEFAULT_BYTE_PARSER);
}
final byte[] retArray = new byte[reader.maxDoc()];
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
byte termval = parser.parseByte(term.text());
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} catch (StopFillCacheException stop) {
} finally {
termDocs.close();
termEnum.close();
}
return retArray;
}
};
// inherit javadocs
public short[] getShorts (IndexReader reader, String field) throws IOException {
return getShorts(reader, field, null);
}
// inherit javadocs
public short[] getShorts(IndexReader reader, String field, ShortParser parser)
throws IOException {
return (short[]) caches.get(Short.TYPE).get(reader, new Entry(field, parser));
}
static final class ShortCache extends Cache {
ShortCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
String field = entry.field;
ShortParser parser = (ShortParser) entry.custom;
if (parser == null) {
return wrapper.getShorts(reader, field, FieldCache.DEFAULT_SHORT_PARSER);
}
final short[] retArray = new short[reader.maxDoc()];
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
short termval = parser.parseShort(term.text());
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} catch (StopFillCacheException stop) {
} finally {
termDocs.close();
termEnum.close();
}
return retArray;
}
};
// inherit javadocs
public int[] getInts (IndexReader reader, String field) throws IOException {
return getInts(reader, field, null);
}
// inherit javadocs
public int[] getInts(IndexReader reader, String field, IntParser parser)
throws IOException {
return (int[]) caches.get(Integer.TYPE).get(reader, new Entry(field, parser));
}
static final class IntCache extends Cache {
IntCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
String field = entry.field;
IntParser parser = (IntParser) entry.custom;
if (parser == null) {
try {
return wrapper.getInts(reader, field, DEFAULT_INT_PARSER);
} catch (NumberFormatException ne) {
return wrapper.getInts(reader, field, NUMERIC_UTILS_INT_PARSER);
}
}
int[] retArray = null;
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
int termval = parser.parseInt(term.text());
if (retArray == null) // late init
retArray = new int[reader.maxDoc()];
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} catch (StopFillCacheException stop) {
} finally {
termDocs.close();
termEnum.close();
}
if (retArray == null) // no values
retArray = new int[reader.maxDoc()];
return retArray;
}
};
// inherit javadocs
public float[] getFloats (IndexReader reader, String field)
throws IOException {
return getFloats(reader, field, null);
}
// inherit javadocs
public float[] getFloats(IndexReader reader, String field, FloatParser parser)
throws IOException {
return (float[]) caches.get(Float.TYPE).get(reader, new Entry(field, parser));
}
static final class FloatCache extends Cache {
FloatCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
String field = entry.field;
FloatParser parser = (FloatParser) entry.custom;
if (parser == null) {
try {
return wrapper.getFloats(reader, field, DEFAULT_FLOAT_PARSER);
} catch (NumberFormatException ne) {
return wrapper.getFloats(reader, field, NUMERIC_UTILS_FLOAT_PARSER);
}
}
float[] retArray = null;
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
float termval = parser.parseFloat(term.text());
if (retArray == null) // late init
retArray = new float[reader.maxDoc()];
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} catch (StopFillCacheException stop) {
} finally {
termDocs.close();
termEnum.close();
}
if (retArray == null) // no values
retArray = new float[reader.maxDoc()];
return retArray;
}
};
public long[] getLongs(IndexReader reader, String field) throws IOException {
return getLongs(reader, field, null);
}
// inherit javadocs
public long[] getLongs(IndexReader reader, String field, FieldCache.LongParser parser)
throws IOException {
return (long[]) caches.get(Long.TYPE).get(reader, new Entry(field, parser));
}
static final class LongCache extends Cache {
LongCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entry)
throws IOException {
String field = entry.field;
FieldCache.LongParser parser = (FieldCache.LongParser) entry.custom;
if (parser == null) {
try {
return wrapper.getLongs(reader, field, DEFAULT_LONG_PARSER);
} catch (NumberFormatException ne) {
return wrapper.getLongs(reader, field, NUMERIC_UTILS_LONG_PARSER);
}
}
long[] retArray = null;
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term(field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
long termval = parser.parseLong(term.text());
if (retArray == null) // late init
retArray = new long[reader.maxDoc()];
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} catch (StopFillCacheException stop) {
} finally {
termDocs.close();
termEnum.close();
}
if (retArray == null) // no values
retArray = new long[reader.maxDoc()];
return retArray;
}
};
// inherit javadocs
public double[] getDoubles(IndexReader reader, String field)
throws IOException {
return getDoubles(reader, field, null);
}
// inherit javadocs
public double[] getDoubles(IndexReader reader, String field, FieldCache.DoubleParser parser)
throws IOException {
return (double[]) caches.get(Double.TYPE).get(reader, new Entry(field, parser));
}
static final class DoubleCache extends Cache {
DoubleCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
Entry entry = entryKey;
String field = entry.field;
FieldCache.DoubleParser parser = (FieldCache.DoubleParser) entry.custom;
if (parser == null) {
try {
return wrapper.getDoubles(reader, field, DEFAULT_DOUBLE_PARSER);
} catch (NumberFormatException ne) {
return wrapper.getDoubles(reader, field, NUMERIC_UTILS_DOUBLE_PARSER);
}
}
double[] retArray = null;
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
double termval = parser.parseDouble(term.text());
if (retArray == null) // late init
retArray = new double[reader.maxDoc()];
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} catch (StopFillCacheException stop) {
} finally {
termDocs.close();
termEnum.close();
}
if (retArray == null) // no values
retArray = new double[reader.maxDoc()];
return retArray;
}
};
// inherit javadocs
public String[] getStrings(IndexReader reader, String field)
throws IOException {
return (String[]) caches.get(String.class).get(reader, new Entry(field, (Parser)null));
}
static final class StringCache extends Cache {
StringCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
String field = StringHelper.intern(entryKey.field);
final String[] retArray = new String[reader.maxDoc()];
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
String termval = term.text();
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = termval;
}
} while (termEnum.next());
} finally {
termDocs.close();
termEnum.close();
}
return retArray;
}
};
// inherit javadocs
public StringIndex getStringIndex(IndexReader reader, String field)
throws IOException {
return (StringIndex) caches.get(StringIndex.class).get(reader, new Entry(field, (Parser)null));
}
static final class StringIndexCache extends Cache {
StringIndexCache(FieldCache wrapper) {
super(wrapper);
}
@Override
protected Object createValue(IndexReader reader, Entry entryKey)
throws IOException {
String field = StringHelper.intern(entryKey.field);
final int[] retArray = new int[reader.maxDoc()];
String[] mterms = new String[reader.maxDoc()+1];
TermDocs termDocs = reader.termDocs();
TermEnum termEnum = reader.terms (new Term (field));
int t = 0; // current term number
// an entry for documents that have no terms in this field
// should a document with no terms be at top or bottom?
// this puts them at the top - if it is changed, FieldDocSortedHitQueue
// needs to change as well.
mterms[t++] = null;
try {
do {
Term term = termEnum.term();
if (term==null || term.field() != field) break;
// store term text
mterms[t] = term.text();
termDocs.seek (termEnum);
while (termDocs.next()) {
retArray[termDocs.doc()] = t;
}
t++;
} while (termEnum.next());
} finally {
termDocs.close();
termEnum.close();
}
if (t == 0) {
// if there are no terms, make the term array
// have a single null entry
mterms = new String[1];
} else if (t < mterms.length) {
// if there are less terms than documents,
// trim off the dead array space
String[] terms = new String[t];
System.arraycopy (mterms, 0, terms, 0, t);
mterms = terms;
}
StringIndex value = new StringIndex (retArray, mterms);
return value;
}
};
private volatile PrintStream infoStream;
public void setInfoStream(PrintStream stream) {
infoStream = stream;
}
public PrintStream getInfoStream() {
return infoStream;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FieldCacheImpl.java | Java | art | 22,021 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/* See the description in BooleanScorer.java, comparing
* BooleanScorer & BooleanScorer2 */
/** An alternative to BooleanScorer that also allows a minimum number
* of optional scorers that should match.
* <br>Implements skipTo(), and has no limitations on the numbers of added scorers.
* <br>Uses ConjunctionScorer, DisjunctionScorer, ReqOptScorer and ReqExclScorer.
*/
class BooleanScorer2 extends Scorer {
private final List<Scorer> requiredScorers;
private final List<Scorer> optionalScorers;
private final List<Scorer> prohibitedScorers;
private class Coordinator {
float[] coordFactors = null;
int maxCoord = 0; // to be increased for each non prohibited scorer
int nrMatchers; // to be increased by score() of match counting scorers.
void init() { // use after all scorers have been added.
coordFactors = new float[maxCoord + 1];
Similarity sim = getSimilarity();
for (int i = 0; i <= maxCoord; i++) {
coordFactors[i] = sim.coord(i, maxCoord);
}
}
}
private final Coordinator coordinator;
/** The scorer to which all scoring will be delegated,
* except for computing and using the coordination factor.
*/
private final Scorer countingSumScorer;
/** The number of optionalScorers that need to match (if there are any) */
private final int minNrShouldMatch;
private int doc = -1;
/**
* Creates a {@link Scorer} with the given similarity and lists of required,
* prohibited and optional scorers. In no required scorers are added, at least
* one of the optional scorers will have to match during the search.
*
* @param similarity
* The similarity to be used.
* @param minNrShouldMatch
* The minimum number of optional added scorers that should match
* during the search. In case no required scorers are added, at least
* one of the optional scorers will have to match during the search.
* @param required
* the list of required scorers.
* @param prohibited
* the list of prohibited scorers.
* @param optional
* the list of optional scorers.
*/
public BooleanScorer2(Similarity similarity, int minNrShouldMatch,
List<Scorer> required, List<Scorer> prohibited, List<Scorer> optional) throws IOException {
super(similarity);
if (minNrShouldMatch < 0) {
throw new IllegalArgumentException("Minimum number of optional scorers should not be negative");
}
coordinator = new Coordinator();
this.minNrShouldMatch = minNrShouldMatch;
optionalScorers = optional;
coordinator.maxCoord += optional.size();
requiredScorers = required;
coordinator.maxCoord += required.size();
prohibitedScorers = prohibited;
coordinator.init();
countingSumScorer = makeCountingSumScorer();
}
/** Count a scorer as a single match. */
private class SingleMatchScorer extends Scorer {
private Scorer scorer;
private int lastScoredDoc = -1;
// Save the score of lastScoredDoc, so that we don't compute it more than
// once in score().
private float lastDocScore = Float.NaN;
SingleMatchScorer(Scorer scorer) {
super(scorer.getSimilarity());
this.scorer = scorer;
}
@Override
public float score() throws IOException {
int doc = docID();
if (doc >= lastScoredDoc) {
if (doc > lastScoredDoc) {
lastDocScore = scorer.score();
lastScoredDoc = doc;
}
coordinator.nrMatchers++;
}
return lastDocScore;
}
@Override
public int docID() {
return scorer.docID();
}
@Override
public int nextDoc() throws IOException {
return scorer.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return scorer.advance(target);
}
}
private Scorer countingDisjunctionSumScorer(final List<Scorer> scorers,
int minNrShouldMatch) throws IOException {
// each scorer from the list counted as a single matcher
return new DisjunctionSumScorer(scorers, minNrShouldMatch) {
private int lastScoredDoc = -1;
// Save the score of lastScoredDoc, so that we don't compute it more than
// once in score().
private float lastDocScore = Float.NaN;
@Override public float score() throws IOException {
int doc = docID();
if (doc >= lastScoredDoc) {
if (doc > lastScoredDoc) {
lastDocScore = super.score();
lastScoredDoc = doc;
}
coordinator.nrMatchers += super.nrMatchers;
}
return lastDocScore;
}
};
}
private static final Similarity defaultSimilarity = Similarity.getDefault();
private Scorer countingConjunctionSumScorer(List<Scorer> requiredScorers) throws IOException {
// each scorer from the list counted as a single matcher
final int requiredNrMatchers = requiredScorers.size();
return new ConjunctionScorer(defaultSimilarity, requiredScorers) {
private int lastScoredDoc = -1;
// Save the score of lastScoredDoc, so that we don't compute it more than
// once in score().
private float lastDocScore = Float.NaN;
@Override public float score() throws IOException {
int doc = docID();
if (doc >= lastScoredDoc) {
if (doc > lastScoredDoc) {
lastDocScore = super.score();
lastScoredDoc = doc;
}
coordinator.nrMatchers += requiredNrMatchers;
}
// All scorers match, so defaultSimilarity super.score() always has 1 as
// the coordination factor.
// Therefore the sum of the scores of the requiredScorers
// is used as score.
return lastDocScore;
}
};
}
private Scorer dualConjunctionSumScorer(Scorer req1, Scorer req2) throws IOException { // non counting.
return new ConjunctionScorer(defaultSimilarity, new Scorer[]{req1, req2});
// All scorers match, so defaultSimilarity always has 1 as
// the coordination factor.
// Therefore the sum of the scores of two scorers
// is used as score.
}
/** Returns the scorer to be used for match counting and score summing.
* Uses requiredScorers, optionalScorers and prohibitedScorers.
*/
private Scorer makeCountingSumScorer() throws IOException { // each scorer counted as a single matcher
return (requiredScorers.size() == 0)
? makeCountingSumScorerNoReq()
: makeCountingSumScorerSomeReq();
}
private Scorer makeCountingSumScorerNoReq() throws IOException { // No required scorers
// minNrShouldMatch optional scorers are required, but at least 1
int nrOptRequired = (minNrShouldMatch < 1) ? 1 : minNrShouldMatch;
Scorer requiredCountingSumScorer;
if (optionalScorers.size() > nrOptRequired)
requiredCountingSumScorer = countingDisjunctionSumScorer(optionalScorers, nrOptRequired);
else if (optionalScorers.size() == 1)
requiredCountingSumScorer = new SingleMatchScorer(optionalScorers.get(0));
else
requiredCountingSumScorer = countingConjunctionSumScorer(optionalScorers);
return addProhibitedScorers(requiredCountingSumScorer);
}
private Scorer makeCountingSumScorerSomeReq() throws IOException { // At least one required scorer.
if (optionalScorers.size() == minNrShouldMatch) { // all optional scorers also required.
ArrayList<Scorer> allReq = new ArrayList<Scorer>(requiredScorers);
allReq.addAll(optionalScorers);
return addProhibitedScorers(countingConjunctionSumScorer(allReq));
} else { // optionalScorers.size() > minNrShouldMatch, and at least one required scorer
Scorer requiredCountingSumScorer =
requiredScorers.size() == 1
? new SingleMatchScorer(requiredScorers.get(0))
: countingConjunctionSumScorer(requiredScorers);
if (minNrShouldMatch > 0) { // use a required disjunction scorer over the optional scorers
return addProhibitedScorers(
dualConjunctionSumScorer( // non counting
requiredCountingSumScorer,
countingDisjunctionSumScorer(
optionalScorers,
minNrShouldMatch)));
} else { // minNrShouldMatch == 0
return new ReqOptSumScorer(
addProhibitedScorers(requiredCountingSumScorer),
optionalScorers.size() == 1
? new SingleMatchScorer(optionalScorers.get(0))
// require 1 in combined, optional scorer.
: countingDisjunctionSumScorer(optionalScorers, 1));
}
}
}
/** Returns the scorer to be used for match counting and score summing.
* Uses the given required scorer and the prohibitedScorers.
* @param requiredCountingSumScorer A required scorer already built.
*/
private Scorer addProhibitedScorers(Scorer requiredCountingSumScorer) throws IOException
{
return (prohibitedScorers.size() == 0)
? requiredCountingSumScorer // no prohibited
: new ReqExclScorer(requiredCountingSumScorer,
((prohibitedScorers.size() == 1)
? prohibitedScorers.get(0)
: new DisjunctionSumScorer(prohibitedScorers)));
}
/** Scores and collects all matching documents.
* @param collector The collector to which all matching documents are passed through.
*/
@Override
public void score(Collector collector) throws IOException {
collector.setScorer(this);
while ((doc = countingSumScorer.nextDoc()) != NO_MORE_DOCS) {
collector.collect(doc);
}
}
@Override
protected boolean score(Collector collector, int max, int firstDocID) throws IOException {
doc = firstDocID;
collector.setScorer(this);
while (doc < max) {
collector.collect(doc);
doc = countingSumScorer.nextDoc();
}
return doc != NO_MORE_DOCS;
}
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return doc = countingSumScorer.nextDoc();
}
@Override
public float score() throws IOException {
coordinator.nrMatchers = 0;
float sum = countingSumScorer.score();
return sum * coordinator.coordFactors[coordinator.nrMatchers];
}
@Override
public int advance(int target) throws IOException {
return doc = countingSumScorer.advance(target);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/BooleanScorer2.java | Java | art | 11,526 |
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.Set;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/**
* A query that generates the union of documents produced by its subqueries, and that scores each document with the maximum
* score for that document as produced by any subquery, plus a tie breaking increment for any additional matching subqueries.
* This is useful when searching for a word in multiple fields with different boost factors (so that the fields cannot be
* combined equivalently into a single search field). We want the primary score to be the one associated with the highest boost,
* not the sum of the field scores (as BooleanQuery would give).
* If the query is "albino elephant" this ensures that "albino" matching one field and "elephant" matching
* another gets a higher score than "albino" matching both fields.
* To get this result, use both BooleanQuery and DisjunctionMaxQuery: for each term a DisjunctionMaxQuery searches for it in
* each field, while the set of these DisjunctionMaxQuery's is combined into a BooleanQuery.
* The tie breaker capability allows results that include the same term in multiple fields to be judged better than results that
* include this term in only the best of those multiple fields, without confusing this with the better case of two different terms
* in the multiple fields.
*/
public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
/* The subqueries */
private ArrayList<Query> disjuncts = new ArrayList<Query>();
/* Multiple of the non-max disjunct scores added into our final score. Non-zero values support tie-breaking. */
private float tieBreakerMultiplier = 0.0f;
/** Creates a new empty DisjunctionMaxQuery. Use add() to add the subqueries.
* @param tieBreakerMultiplier the score of each non-maximum disjunct for a document is multiplied by this weight
* and added into the final score. If non-zero, the value should be small, on the order of 0.1, which says that
* 10 occurrences of word in a lower-scored field that is also in a higher scored field is just as good as a unique
* word in the lower scored field (i.e., one that is not in any higher scored field.
*/
public DisjunctionMaxQuery(float tieBreakerMultiplier) {
this.tieBreakerMultiplier = tieBreakerMultiplier;
}
/**
* Creates a new DisjunctionMaxQuery
* @param disjuncts a Collection<Query> of all the disjuncts to add
* @param tieBreakerMultiplier the weight to give to each matching non-maximum disjunct
*/
public DisjunctionMaxQuery(Collection<Query> disjuncts, float tieBreakerMultiplier) {
this.tieBreakerMultiplier = tieBreakerMultiplier;
add(disjuncts);
}
/** Add a subquery to this disjunction
* @param query the disjunct added
*/
public void add(Query query) {
disjuncts.add(query);
}
/** Add a collection of disjuncts to this disjunction
* via Iterable<Query>
*/
public void add(Collection<Query> disjuncts) {
this.disjuncts.addAll(disjuncts);
}
/** An Iterator<Query> over the disjuncts */
public Iterator<Query> iterator() {
return disjuncts.iterator();
}
/**
* Expert: the Weight for DisjunctionMaxQuery, used to
* normalize, score and explain these queries.
*
* <p>NOTE: this API and implementation is subject to
* change suddenly in the next release.</p>
*/
protected class DisjunctionMaxWeight extends Weight {
/** The Similarity implementation. */
protected Similarity similarity;
/** The Weights for our subqueries, in 1-1 correspondence with disjuncts */
protected ArrayList<Weight> weights = new ArrayList<Weight>(); // The Weight's for our subqueries, in 1-1 correspondence with disjuncts
/* Construct the Weight for this Query searched by searcher. Recursively construct subquery weights. */
public DisjunctionMaxWeight(Searcher searcher) throws IOException {
this.similarity = searcher.getSimilarity();
for (Query disjunctQuery : disjuncts) {
weights.add(disjunctQuery.createWeight(searcher));
}
}
/* Return our associated DisjunctionMaxQuery */
@Override
public Query getQuery() { return DisjunctionMaxQuery.this; }
/* Return our boost */
@Override
public float getValue() { return getBoost(); }
/* Compute the sub of squared weights of us applied to our subqueries. Used for normalization. */
@Override
public float sumOfSquaredWeights() throws IOException {
float max = 0.0f, sum = 0.0f;
for (Weight currentWeight : weights) {
float sub = currentWeight.sumOfSquaredWeights();
sum += sub;
max = Math.max(max, sub);
}
float boost = getBoost();
return (((sum - max) * tieBreakerMultiplier * tieBreakerMultiplier) + max) * boost * boost;
}
/* Apply the computed normalization factor to our subqueries */
@Override
public void normalize(float norm) {
norm *= getBoost(); // Incorporate our boost
for (Weight wt : weights) {
wt.normalize(norm);
}
}
/* Create the scorer used to score our associated DisjunctionMaxQuery */
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder,
boolean topScorer) throws IOException {
Scorer[] scorers = new Scorer[weights.size()];
int idx = 0;
for (Weight w : weights) {
Scorer subScorer = w.scorer(reader, true, false);
if (subScorer != null && subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
scorers[idx++] = subScorer;
}
}
if (idx == 0) return null; // all scorers did not have documents
DisjunctionMaxScorer result = new DisjunctionMaxScorer(tieBreakerMultiplier, similarity, scorers, idx);
return result;
}
/* Explain the score we computed for doc */
@Override
public Explanation explain(IndexReader reader, int doc) throws IOException {
if (disjuncts.size() == 1) return weights.get(0).explain(reader,doc);
ComplexExplanation result = new ComplexExplanation();
float max = 0.0f, sum = 0.0f;
result.setDescription(tieBreakerMultiplier == 0.0f ? "max of:" : "max plus " + tieBreakerMultiplier + " times others of:");
for (Weight wt : weights) {
Explanation e = wt.explain(reader, doc);
if (e.isMatch()) {
result.setMatch(Boolean.TRUE);
result.addDetail(e);
sum += e.getValue();
max = Math.max(max, e.getValue());
}
}
result.setValue(max + (sum - max) * tieBreakerMultiplier);
return result;
}
} // end of DisjunctionMaxWeight inner class
/* Create the Weight used to score us */
@Override
public Weight createWeight(Searcher searcher) throws IOException {
return new DisjunctionMaxWeight(searcher);
}
/** Optimize our representation and our subqueries representations
* @param reader the IndexReader we query
* @return an optimized copy of us (which may not be a copy if there is nothing to optimize) */
@Override
public Query rewrite(IndexReader reader) throws IOException {
int numDisjunctions = disjuncts.size();
if (numDisjunctions == 1) {
Query singleton = disjuncts.get(0);
Query result = singleton.rewrite(reader);
if (getBoost() != 1.0f) {
if (result == singleton) result = (Query)result.clone();
result.setBoost(getBoost() * result.getBoost());
}
return result;
}
DisjunctionMaxQuery clone = null;
for (int i = 0 ; i < numDisjunctions; i++) {
Query clause = disjuncts.get(i);
Query rewrite = clause.rewrite(reader);
if (rewrite != clause) {
if (clone == null) clone = (DisjunctionMaxQuery)this.clone();
clone.disjuncts.set(i, rewrite);
}
}
if (clone != null) return clone;
else return this;
}
/** Create a shallow copy of us -- used in rewriting if necessary
* @return a copy of us (but reuse, don't copy, our subqueries) */
@Override @SuppressWarnings("unchecked")
public Object clone() {
DisjunctionMaxQuery clone = (DisjunctionMaxQuery)super.clone();
clone.disjuncts = (ArrayList<Query>) this.disjuncts.clone();
return clone;
}
// inherit javadoc
@Override
public void extractTerms(Set<Term> terms) {
for (Query query : disjuncts) {
query.extractTerms(terms);
}
}
/** Prettyprint us.
* @param field the field to which we are applied
* @return a string that shows what we do, of the form "(disjunct1 | disjunct2 | ... | disjunctn)^boost"
*/
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("(");
int numDisjunctions = disjuncts.size();
for (int i = 0 ; i < numDisjunctions; i++) {
Query subquery = disjuncts.get(i);
if (subquery instanceof BooleanQuery) { // wrap sub-bools in parens
buffer.append("(");
buffer.append(subquery.toString(field));
buffer.append(")");
}
else buffer.append(subquery.toString(field));
if (i != numDisjunctions-1) buffer.append(" | ");
}
buffer.append(")");
if (tieBreakerMultiplier != 0.0f) {
buffer.append("~");
buffer.append(tieBreakerMultiplier);
}
if (getBoost() != 1.0) {
buffer.append("^");
buffer.append(getBoost());
}
return buffer.toString();
}
/** Return true iff we represent the same query as o
* @param o another object
* @return true iff o is a DisjunctionMaxQuery with the same boost and the same subqueries, in the same order, as us
*/
@Override
public boolean equals(Object o) {
if (! (o instanceof DisjunctionMaxQuery) ) return false;
DisjunctionMaxQuery other = (DisjunctionMaxQuery)o;
return this.getBoost() == other.getBoost()
&& this.tieBreakerMultiplier == other.tieBreakerMultiplier
&& this.disjuncts.equals(other.disjuncts);
}
/** Compute a hash code for hashing us
* @return the hash code
*/
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost())
+ Float.floatToIntBits(tieBreakerMultiplier)
+ disjuncts.hashCode();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java | Java | art | 11,023 |
package org.apache.lucene.search;
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/** Abstract base class providing a mechanism to restrict searches to a subset
of an index and also maintains and returns position information.
This is useful if you want to compare the positions from a SpanQuery with the positions of items in
a filter. For instance, if you had a SpanFilter that marked all the occurrences of the word "foo" in documents,
and then you entered a new SpanQuery containing bar, you could not only filter by the word foo, but you could
then compare position information for post processing.
*/
public abstract class SpanFilter extends Filter{
/** Returns a SpanFilterResult with true for documents which should be permitted in
search results, and false for those that should not and Spans for where the true docs match.
* @param reader The {@link org.apache.lucene.index.IndexReader} to load position and DocIdSet information from
* @return A {@link SpanFilterResult}
* @throws java.io.IOException if there was an issue accessing the necessary information
* */
public abstract SpanFilterResult bitSpans(IndexReader reader) throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/SpanFilter.java | Java | art | 1,827 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ReaderUtil;
/** Implements search over a single IndexReader.
*
* <p>Applications usually need only call the inherited
* {@link #search(Query,int)}
* or {@link #search(Query,Filter,int)} methods. For performance reasons it is
* recommended to open only one IndexSearcher and use it for all of your searches.
*
* <a name="thread-safety"></a><p><b>NOTE</b>: {@link
* <code>IndexSearcher</code>} instances are completely
* thread safe, meaning multiple threads can call any of its
* methods, concurrently. If your application requires
* external synchronization, you should <b>not</b>
* synchronize on the <code>IndexSearcher</code> instance;
* use your own (non-Lucene) objects instead.</p>
*/
public class IndexSearcher extends Searcher {
IndexReader reader;
private boolean closeReader;
// NOTE: these members might change in incompatible ways
// in the next release
protected IndexReader[] subReaders;
protected int[] docStarts;
/** Creates a searcher searching the index in the named
* directory, with readOnly=true
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @param path directory where IndexReader will be opened
*/
public IndexSearcher(Directory path) throws CorruptIndexException, IOException {
this(IndexReader.open(path, true), true);
}
/** Creates a searcher searching the index in the named
* directory. You should pass readOnly=true, since it
* gives much better concurrent performance, unless you
* intend to do write operations (delete documents or
* change norms) with the underlying IndexReader.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @param path directory where IndexReader will be opened
* @param readOnly if true, the underlying IndexReader
* will be opened readOnly
*/
public IndexSearcher(Directory path, boolean readOnly) throws CorruptIndexException, IOException {
this(IndexReader.open(path, readOnly), true);
}
/** Creates a searcher searching the provided index. */
public IndexSearcher(IndexReader r) {
this(r, false);
}
/** Expert: directly specify the reader, subReaders and
* their docID starts.
*
* <p><b>NOTE:</b> This API is experimental and
* might change in incompatible ways in the next
* release.</font></p> */
public IndexSearcher(IndexReader reader, IndexReader[] subReaders, int[] docStarts) {
this.reader = reader;
this.subReaders = subReaders;
this.docStarts = docStarts;
closeReader = false;
}
private IndexSearcher(IndexReader r, boolean closeReader) {
reader = r;
this.closeReader = closeReader;
List<IndexReader> subReadersList = new ArrayList<IndexReader>();
gatherSubReaders(subReadersList, reader);
subReaders = subReadersList.toArray(new IndexReader[subReadersList.size()]);
docStarts = new int[subReaders.length];
int maxDoc = 0;
for (int i = 0; i < subReaders.length; i++) {
docStarts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc();
}
}
protected void gatherSubReaders(List<IndexReader> allSubReaders, IndexReader r) {
ReaderUtil.gatherSubReaders(allSubReaders, r);
}
/** Return the {@link IndexReader} this searches. */
public IndexReader getIndexReader() {
return reader;
}
/**
* Note that the underlying IndexReader is not closed, if
* IndexSearcher was constructed with IndexSearcher(IndexReader r).
* If the IndexReader was supplied implicitly by specifying a directory, then
* the IndexReader gets closed.
*/
@Override
public void close() throws IOException {
if(closeReader)
reader.close();
}
// inherit javadoc
@Override
public int docFreq(Term term) throws IOException {
return reader.docFreq(term);
}
// inherit javadoc
@Override
public Document doc(int i) throws CorruptIndexException, IOException {
return reader.document(i);
}
// inherit javadoc
@Override
public Document doc(int i, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
return reader.document(i, fieldSelector);
}
// inherit javadoc
@Override
public int maxDoc() throws IOException {
return reader.maxDoc();
}
// inherit javadoc
@Override
public TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
if (nDocs <= 0) {
throw new IllegalArgumentException("nDocs must be > 0");
}
nDocs = Math.min(nDocs, reader.maxDoc());
TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.scoresDocsOutOfOrder());
search(weight, filter, collector);
return collector.topDocs();
}
@Override
public TopFieldDocs search(Weight weight, Filter filter,
final int nDocs, Sort sort) throws IOException {
return search(weight, filter, nDocs, sort, true);
}
/**
* Just like {@link #search(Weight, Filter, int, Sort)}, but you choose
* whether or not the fields in the returned {@link FieldDoc} instances should
* be set by specifying fillFields.
*
* <p>NOTE: this does not compute scores by default. If you
* need scores, create a {@link TopFieldCollector}
* instance by calling {@link TopFieldCollector#create} and
* then pass that to {@link #search(Weight, Filter,
* Collector)}.</p>
*/
public TopFieldDocs search(Weight weight, Filter filter, int nDocs,
Sort sort, boolean fillFields)
throws IOException {
nDocs = Math.min(nDocs, reader.maxDoc());
TopFieldCollector collector = TopFieldCollector.create(sort, nDocs,
fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.scoresDocsOutOfOrder());
search(weight, filter, collector);
return (TopFieldDocs) collector.topDocs();
}
@Override
public void search(Weight weight, Filter filter, Collector collector)
throws IOException {
if (filter == null) {
for (int i = 0; i < subReaders.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]);
Scorer scorer = weight.scorer(subReaders[i], !collector.acceptsDocsOutOfOrder(), true);
if (scorer != null) {
scorer.score(collector);
}
}
} else {
for (int i = 0; i < subReaders.length; i++) { // search each subreader
collector.setNextReader(subReaders[i], docStarts[i]);
searchWithFilter(subReaders[i], weight, filter, collector);
}
}
}
private void searchWithFilter(IndexReader reader, Weight weight,
final Filter filter, final Collector collector) throws IOException {
assert filter != null;
Scorer scorer = weight.scorer(reader, true, false);
if (scorer == null) {
return;
}
int docID = scorer.docID();
assert docID == -1 || docID == DocIdSetIterator.NO_MORE_DOCS;
// CHECKME: use ConjunctionScorer here?
DocIdSet filterDocIdSet = filter.getDocIdSet(reader);
if (filterDocIdSet == null) {
// this means the filter does not accept any documents.
return;
}
DocIdSetIterator filterIter = filterDocIdSet.iterator();
if (filterIter == null) {
// this means the filter does not accept any documents.
return;
}
int filterDoc = filterIter.nextDoc();
int scorerDoc = scorer.advance(filterDoc);
collector.setScorer(scorer);
while (true) {
if (scorerDoc == filterDoc) {
// Check if scorer has exhausted, only before collecting.
if (scorerDoc == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
collector.collect(scorerDoc);
filterDoc = filterIter.nextDoc();
scorerDoc = scorer.advance(filterDoc);
} else if (scorerDoc > filterDoc) {
filterDoc = filterIter.advance(scorerDoc);
} else {
scorerDoc = scorer.advance(filterDoc);
}
}
}
@Override
public Query rewrite(Query original) throws IOException {
Query query = original;
for (Query rewrittenQuery = query.rewrite(reader); rewrittenQuery != query;
rewrittenQuery = query.rewrite(reader)) {
query = rewrittenQuery;
}
return query;
}
@Override
public Explanation explain(Weight weight, int doc) throws IOException {
int n = ReaderUtil.subIndex(doc, docStarts);
int deBasedDoc = doc - docStarts[n];
return weight.explain(subReaders[n], deBasedDoc);
}
private boolean fieldSortDoTrackScores;
private boolean fieldSortDoMaxScore;
/** By default, no scores are computed when sorting by
* field (using {@link #search(Query,Filter,int,Sort)}).
* You can change that, per IndexSearcher instance, by
* calling this method. Note that this will incur a CPU
* cost.
*
* @param doTrackScores If true, then scores are
* returned for every matching document in {@link
* TopFieldDocs}.
*
* @param doMaxScore If true, then the max score for all
* matching docs is computed. */
public void setDefaultFieldSortScoring(boolean doTrackScores, boolean doMaxScore) {
fieldSortDoTrackScores = doTrackScores;
fieldSortDoMaxScore = doMaxScore;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/IndexSearcher.java | Java | art | 10,472 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Serializable;
import org.apache.lucene.index.IndexReader;
/**
* Expert: Calculate query weights and build query scorers.
* <p>
* The purpose of {@link Weight} is to ensure searching does not
* modify a {@link Query}, so that a {@link Query} instance can be reused. <br>
* {@link Searcher} dependent state of the query should reside in the
* {@link Weight}. <br>
* {@link IndexReader} dependent state should reside in the {@link Scorer}.
* <p>
* A <code>Weight</code> is used in the following way:
* <ol>
* <li>A <code>Weight</code> is constructed by a top-level query, given a
* <code>Searcher</code> ({@link Query#createWeight(Searcher)}).
* <li>The {@link #sumOfSquaredWeights()} method is called on the
* <code>Weight</code> to compute the query normalization factor
* {@link Similarity#queryNorm(float)} of the query clauses contained in the
* query.
* <li>The query normalization factor is passed to {@link #normalize(float)}. At
* this point the weighting is complete.
* <li>A <code>Scorer</code> is constructed by {@link #scorer(IndexReader,boolean,boolean)}.
* </ol>
*
* @since 2.9
*/
public abstract class Weight implements Serializable {
/**
* An explanation of the score computation for the named document.
*
* @param reader sub-reader containing the give doc
* @param doc
* @return an Explanation for the score
* @throws IOException
*/
public abstract Explanation explain(IndexReader reader, int doc) throws IOException;
/** The query that this concerns. */
public abstract Query getQuery();
/** The weight for this query. */
public abstract float getValue();
/** Assigns the query normalization factor to this. */
public abstract void normalize(float norm);
/**
* Returns a {@link Scorer} which scores documents in/out-of order according
* to <code>scoreDocsInOrder</code>.
* <p>
* <b>NOTE:</b> even if <code>scoreDocsInOrder</code> is false, it is
* recommended to check whether the returned <code>Scorer</code> indeed scores
* documents out of order (i.e., call {@link #scoresDocsOutOfOrder()}), as
* some <code>Scorer</code> implementations will always return documents
* in-order.<br>
* <b>NOTE:</b> null can be returned if no documents will be scored by this
* query.
*
* @param reader
* the {@link IndexReader} for which to return the {@link Scorer}.
* @param scoreDocsInOrder
* specifies whether in-order scoring of documents is required. Note
* that if set to false (i.e., out-of-order scoring is required),
* this method can return whatever scoring mode it supports, as every
* in-order scorer is also an out-of-order one. However, an
* out-of-order scorer may not support {@link Scorer#nextDoc()}
* and/or {@link Scorer#advance(int)}, therefore it is recommended to
* request an in-order scorer if use of these methods is required.
* @param topScorer
* if true, {@link Scorer#score(Collector)} will be called; if false,
* {@link Scorer#nextDoc()} and/or {@link Scorer#advance(int)} will
* be called.
* @return a {@link Scorer} which scores documents in/out-of order.
* @throws IOException
*/
public abstract Scorer scorer(IndexReader reader, boolean scoreDocsInOrder,
boolean topScorer) throws IOException;
/** The sum of squared weights of contained query clauses. */
public abstract float sumOfSquaredWeights() throws IOException;
/**
* Returns true iff this implementation scores docs only out of order. This
* method is used in conjunction with {@link Collector}'s
* {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
* {@link #scorer(org.apache.lucene.index.IndexReader, boolean, boolean)} to
* create a matching {@link Scorer} instance for a given {@link Collector}, or
* vice versa.
* <p>
* <b>NOTE:</b> the default implementation returns <code>false</code>, i.e.
* the <code>Scorer</code> scores documents in-order.
*/
public boolean scoresDocsOutOfOrder() { return false; }
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/Weight.java | Java | art | 5,013 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.util.Set;
/**
* A query that applies a filter to the results of another query.
*
* <p>Note: the bits are retrieved from the filter each time this
* query is used in a search - use a CachingWrapperFilter to avoid
* regenerating the bits every time.
*
* <p>Created: Apr 20, 2004 8:58:29 AM
*
* @since 1.4
* @see CachingWrapperFilter
*/
public class FilteredQuery
extends Query {
Query query;
Filter filter;
/**
* Constructs a new query which applies a filter to the results of the original query.
* Filter.getDocIdSet() will be called every time this query is used in a search.
* @param query Query to be filtered, cannot be <code>null</code>.
* @param filter Filter to apply to query results, cannot be <code>null</code>.
*/
public FilteredQuery (Query query, Filter filter) {
this.query = query;
this.filter = filter;
}
/**
* Returns a Weight that applies the filter to the enclosed query's Weight.
* This is accomplished by overriding the Scorer returned by the Weight.
*/
@Override
public Weight createWeight(final Searcher searcher) throws IOException {
final Weight weight = query.createWeight (searcher);
final Similarity similarity = query.getSimilarity(searcher);
return new Weight() {
private float value;
// pass these methods through to enclosed query's weight
@Override
public float getValue() { return value; }
@Override
public float sumOfSquaredWeights() throws IOException {
return weight.sumOfSquaredWeights() * getBoost() * getBoost();
}
@Override
public void normalize (float v) {
weight.normalize(v);
value = weight.getValue() * getBoost();
}
@Override
public Explanation explain (IndexReader ir, int i) throws IOException {
Explanation inner = weight.explain (ir, i);
if (getBoost()!=1) {
Explanation preBoost = inner;
inner = new Explanation(inner.getValue()*getBoost(),"product of:");
inner.addDetail(new Explanation(getBoost(),"boost"));
inner.addDetail(preBoost);
}
Filter f = FilteredQuery.this.filter;
DocIdSet docIdSet = f.getDocIdSet(ir);
DocIdSetIterator docIdSetIterator = docIdSet == null ? DocIdSet.EMPTY_DOCIDSET.iterator() : docIdSet.iterator();
if (docIdSetIterator == null) {
docIdSetIterator = DocIdSet.EMPTY_DOCIDSET.iterator();
}
if (docIdSetIterator.advance(i) == i) {
return inner;
} else {
Explanation result = new Explanation
(0.0f, "failure to match filter: " + f.toString());
result.addDetail(inner);
return result;
}
}
// return this query
@Override
public Query getQuery() { return FilteredQuery.this; }
// return a filtering scorer
@Override
public Scorer scorer(IndexReader indexReader, boolean scoreDocsInOrder, boolean topScorer)
throws IOException {
final Scorer scorer = weight.scorer(indexReader, true, false);
if (scorer == null) {
return null;
}
DocIdSet docIdSet = filter.getDocIdSet(indexReader);
if (docIdSet == null) {
return null;
}
final DocIdSetIterator docIdSetIterator = docIdSet.iterator();
if (docIdSetIterator == null) {
return null;
}
return new Scorer(similarity) {
private int doc = -1;
private int advanceToCommon(int scorerDoc, int disiDoc) throws IOException {
while (scorerDoc != disiDoc) {
if (scorerDoc < disiDoc) {
scorerDoc = scorer.advance(disiDoc);
} else {
disiDoc = docIdSetIterator.advance(scorerDoc);
}
}
return scorerDoc;
}
@Override
public int nextDoc() throws IOException {
int scorerDoc, disiDoc;
return doc = (disiDoc = docIdSetIterator.nextDoc()) != NO_MORE_DOCS
&& (scorerDoc = scorer.nextDoc()) != NO_MORE_DOCS
&& advanceToCommon(scorerDoc, disiDoc) != NO_MORE_DOCS ? scorer.docID() : NO_MORE_DOCS;
}
@Override
public int docID() { return doc; }
@Override
public int advance(int target) throws IOException {
int disiDoc, scorerDoc;
return doc = (disiDoc = docIdSetIterator.advance(target)) != NO_MORE_DOCS
&& (scorerDoc = scorer.advance(disiDoc)) != NO_MORE_DOCS
&& advanceToCommon(scorerDoc, disiDoc) != NO_MORE_DOCS ? scorer.docID() : NO_MORE_DOCS;
}
@Override
public float score() throws IOException { return getBoost() * scorer.score(); }
};
}
};
}
/** Rewrites the wrapped query. */
@Override
public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = query.rewrite(reader);
if (rewritten != query) {
FilteredQuery clone = (FilteredQuery)this.clone();
clone.query = rewritten;
return clone;
} else {
return this;
}
}
public Query getQuery() {
return query;
}
public Filter getFilter() {
return filter;
}
// inherit javadoc
@Override
public void extractTerms(Set<Term> terms) {
getQuery().extractTerms(terms);
}
/** Prints a user-readable version of this query. */
@Override
public String toString (String s) {
StringBuilder buffer = new StringBuilder();
buffer.append("filtered(");
buffer.append(query.toString(s));
buffer.append(")->");
buffer.append(filter);
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
/** Returns true iff <code>o</code> is equal to this. */
@Override
public boolean equals(Object o) {
if (o instanceof FilteredQuery) {
FilteredQuery fq = (FilteredQuery) o;
return (query.equals(fq.query) && filter.equals(fq.filter) && getBoost()==fq.getBoost());
}
return false;
}
/** Returns a hash code value for this object. */
@Override
public int hashCode() {
return query.hashCode() ^ filter.hashCode() + Float.floatToRawIntBits(getBoost());
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FilteredQuery.java | Java | art | 7,342 |
package org.apache.lucene.search;
/**
* Copyright 2005 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
/**
* Wraps another SpanFilter's result and caches it. The purpose is to allow
* filters to simply filter, and then wrap with this class to add caching.
*/
public class CachingSpanFilter extends SpanFilter {
private SpanFilter filter;
/**
* A transient Filter cache (package private because of test)
*/
private final CachingWrapperFilter.FilterCache<SpanFilterResult> cache;
/**
* New deletions always result in a cache miss, by default
* ({@link CachingWrapperFilter.DeletesMode#RECACHE}.
* @param filter Filter to cache results of
*/
public CachingSpanFilter(SpanFilter filter) {
this(filter, CachingWrapperFilter.DeletesMode.RECACHE);
}
/**
* @param filter Filter to cache results of
* @param deletesMode See {@link CachingWrapperFilter.DeletesMode}
*/
public CachingSpanFilter(SpanFilter filter, CachingWrapperFilter.DeletesMode deletesMode) {
this.filter = filter;
if (deletesMode == CachingWrapperFilter.DeletesMode.DYNAMIC) {
throw new IllegalArgumentException("DeletesMode.DYNAMIC is not supported");
}
this.cache = new CachingWrapperFilter.FilterCache<SpanFilterResult>(deletesMode) {
@Override
protected SpanFilterResult mergeDeletes(final IndexReader r, final SpanFilterResult value) {
throw new IllegalStateException("DeletesMode.DYNAMIC is not supported");
}
};
}
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
SpanFilterResult result = getCachedResult(reader);
return result != null ? result.getDocIdSet() : null;
}
// for testing
int hitCount, missCount;
private SpanFilterResult getCachedResult(IndexReader reader) throws IOException {
final Object coreKey = reader.getFieldCacheKey();
final Object delCoreKey = reader.hasDeletions() ? reader.getDeletesCacheKey() : coreKey;
SpanFilterResult result = cache.get(reader, coreKey, delCoreKey);
if (result != null) {
hitCount++;
return result;
}
missCount++;
result = filter.bitSpans(reader);
cache.put(coreKey, delCoreKey, result);
return result;
}
@Override
public SpanFilterResult bitSpans(IndexReader reader) throws IOException {
return getCachedResult(reader);
}
@Override
public String toString() {
return "CachingSpanFilter("+filter+")";
}
@Override
public boolean equals(Object o) {
if (!(o instanceof CachingSpanFilter)) return false;
return this.filter.equals(((CachingSpanFilter)o).filter);
}
@Override
public int hashCode() {
return filter.hashCode() ^ 0x1117BF25;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/CachingSpanFilter.java | Java | art | 3,336 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.FieldValueHitQueue.Entry;
import org.apache.lucene.util.PriorityQueue;
/**
* A {@link Collector} that sorts by {@link SortField} using
* {@link FieldComparator}s.
* <p/>
* See the {@link #create(org.apache.lucene.search.Sort, int, boolean, boolean, boolean, boolean)} method
* for instantiating a TopFieldCollector.
*
* <p><b>NOTE:</b> This API is experimental and might change in
* incompatible ways in the next release.</p>
*/
public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
// TODO: one optimization we could do is to pre-fill
// the queue with sentinel value that guaranteed to
// always compare lower than a real hit; this would
// save having to check queueFull on each insert
/*
* Implements a TopFieldCollector over one SortField criteria, without
* tracking document scores and maxScore.
*/
private static class OneComparatorNonScoringCollector extends
TopFieldCollector {
final FieldComparator comparator;
final int reverseMul;
public OneComparatorNonScoringCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
comparator = queue.getComparators()[0];
reverseMul = queue.getReverseMul()[0];
}
final void updateBottom(int doc) {
// bottom.score is already set to Float.NaN in add().
bottom.doc = docBase + doc;
bottom = pq.updateTop();
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
if ((reverseMul * comparator.compareBottom(doc)) <= 0) {
// since docs are visited in doc Id order, if compare is 0, it means
// this document is largest than anything else in the queue, and
// therefore not competitive.
return;
}
// This hit is competitive - replace bottom element in queue & adjustTop
comparator.copy(bottom.slot, doc);
updateBottom(doc);
comparator.setBottom(bottom.slot);
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
comparator.copy(slot, doc);
add(slot, doc, Float.NaN);
if (queueFull) {
comparator.setBottom(bottom.slot);
}
}
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
this.docBase = docBase;
comparator.setNextReader(reader, docBase);
}
@Override
public void setScorer(Scorer scorer) throws IOException {
comparator.setScorer(scorer);
}
}
/*
* Implements a TopFieldCollector over one SortField criteria, without
* tracking document scores and maxScore, and assumes out of orderness in doc
* Ids collection.
*/
private static class OutOfOrderOneComparatorNonScoringCollector extends
OneComparatorNonScoringCollector {
public OutOfOrderOneComparatorNonScoringCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
final int cmp = reverseMul * comparator.compareBottom(doc);
if (cmp < 0 || (cmp == 0 && doc + docBase > bottom.doc)) {
return;
}
// This hit is competitive - replace bottom element in queue & adjustTop
comparator.copy(bottom.slot, doc);
updateBottom(doc);
comparator.setBottom(bottom.slot);
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
comparator.copy(slot, doc);
add(slot, doc, Float.NaN);
if (queueFull) {
comparator.setBottom(bottom.slot);
}
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
/*
* Implements a TopFieldCollector over one SortField criteria, while tracking
* document scores but no maxScore.
*/
private static class OneComparatorScoringNoMaxScoreCollector extends
OneComparatorNonScoringCollector {
Scorer scorer;
public OneComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
}
final void updateBottom(int doc, float score) {
bottom.doc = docBase + doc;
bottom.score = score;
bottom = pq.updateTop();
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
if ((reverseMul * comparator.compareBottom(doc)) <= 0) {
// since docs are visited in doc Id order, if compare is 0, it means
// this document is largest than anything else in the queue, and
// therefore not competitive.
return;
}
// Compute the score only if the hit is competitive.
final float score = scorer.score();
// This hit is competitive - replace bottom element in queue & adjustTop
comparator.copy(bottom.slot, doc);
updateBottom(doc, score);
comparator.setBottom(bottom.slot);
} else {
// Compute the score only if the hit is competitive.
final float score = scorer.score();
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
comparator.copy(slot, doc);
add(slot, doc, score);
if (queueFull) {
comparator.setBottom(bottom.slot);
}
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
comparator.setScorer(scorer);
}
}
/*
* Implements a TopFieldCollector over one SortField criteria, while tracking
* document scores but no maxScore, and assumes out of orderness in doc Ids
* collection.
*/
private static class OutOfOrderOneComparatorScoringNoMaxScoreCollector extends
OneComparatorScoringNoMaxScoreCollector {
public OutOfOrderOneComparatorScoringNoMaxScoreCollector(
FieldValueHitQueue queue, int numHits, boolean fillFields)
throws IOException {
super(queue, numHits, fillFields);
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
final int cmp = reverseMul * comparator.compareBottom(doc);
if (cmp < 0 || (cmp == 0 && doc + docBase > bottom.doc)) {
return;
}
// Compute the score only if the hit is competitive.
final float score = scorer.score();
// This hit is competitive - replace bottom element in queue & adjustTop
comparator.copy(bottom.slot, doc);
updateBottom(doc, score);
comparator.setBottom(bottom.slot);
} else {
// Compute the score only if the hit is competitive.
final float score = scorer.score();
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
comparator.copy(slot, doc);
add(slot, doc, score);
if (queueFull) {
comparator.setBottom(bottom.slot);
}
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
/*
* Implements a TopFieldCollector over one SortField criteria, with tracking
* document scores and maxScore.
*/
private static class OneComparatorScoringMaxScoreCollector extends
OneComparatorNonScoringCollector {
Scorer scorer;
public OneComparatorScoringMaxScoreCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
// Must set maxScore to NEG_INF, or otherwise Math.max always returns NaN.
maxScore = Float.NEGATIVE_INFINITY;
}
final void updateBottom(int doc, float score) {
bottom.doc = docBase + doc;
bottom.score = score;
bottom = pq.updateTop();
}
@Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
maxScore = score;
}
++totalHits;
if (queueFull) {
if ((reverseMul * comparator.compareBottom(doc)) <= 0) {
// since docs are visited in doc Id order, if compare is 0, it means
// this document is largest than anything else in the queue, and
// therefore not competitive.
return;
}
// This hit is competitive - replace bottom element in queue & adjustTop
comparator.copy(bottom.slot, doc);
updateBottom(doc, score);
comparator.setBottom(bottom.slot);
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
comparator.copy(slot, doc);
add(slot, doc, score);
if (queueFull) {
comparator.setBottom(bottom.slot);
}
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
}
}
/*
* Implements a TopFieldCollector over one SortField criteria, with tracking
* document scores and maxScore, and assumes out of orderness in doc Ids
* collection.
*/
private static class OutOfOrderOneComparatorScoringMaxScoreCollector extends
OneComparatorScoringMaxScoreCollector {
public OutOfOrderOneComparatorScoringMaxScoreCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
}
@Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
maxScore = score;
}
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
final int cmp = reverseMul * comparator.compareBottom(doc);
if (cmp < 0 || (cmp == 0 && doc + docBase > bottom.doc)) {
return;
}
// This hit is competitive - replace bottom element in queue & adjustTop
comparator.copy(bottom.slot, doc);
updateBottom(doc, score);
comparator.setBottom(bottom.slot);
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
comparator.copy(slot, doc);
add(slot, doc, score);
if (queueFull) {
comparator.setBottom(bottom.slot);
}
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
/*
* Implements a TopFieldCollector over multiple SortField criteria, without
* tracking document scores and maxScore.
*/
private static class MultiComparatorNonScoringCollector extends TopFieldCollector {
final FieldComparator[] comparators;
final int[] reverseMul;
public MultiComparatorNonScoringCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
comparators = queue.getComparators();
reverseMul = queue.getReverseMul();
}
final void updateBottom(int doc) {
// bottom.score is already set to Float.NaN in add().
bottom.doc = docBase + doc;
bottom = pq.updateTop();
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
for (int i = 0;; i++) {
final int c = reverseMul[i] * comparators[i].compareBottom(doc);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (i == comparators.length - 1) {
// Here c=0. If we're at the last comparator, this doc is not
// competitive, since docs are visited in doc Id order, which means
// this doc cannot compete with any other document in the queue.
return;
}
}
// This hit is competitive - replace bottom element in queue & adjustTop
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(bottom.slot, doc);
}
updateBottom(doc);
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(slot, doc);
}
add(slot, doc, Float.NaN);
if (queueFull) {
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
}
}
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
this.docBase = docBase;
for (int i = 0; i < comparators.length; i++) {
comparators[i].setNextReader(reader, docBase);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
// set the scorer on all comparators
for (int i = 0; i < comparators.length; i++) {
comparators[i].setScorer(scorer);
}
}
}
/*
* Implements a TopFieldCollector over multiple SortField criteria, without
* tracking document scores and maxScore, and assumes out of orderness in doc
* Ids collection.
*/
private static class OutOfOrderMultiComparatorNonScoringCollector extends
MultiComparatorNonScoringCollector {
public OutOfOrderMultiComparatorNonScoringCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
for (int i = 0;; i++) {
final int c = reverseMul[i] * comparators[i].compareBottom(doc);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (i == comparators.length - 1) {
// This is the equals case.
if (doc + docBase > bottom.doc) {
// Definitely not competitive
return;
}
break;
}
}
// This hit is competitive - replace bottom element in queue & adjustTop
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(bottom.slot, doc);
}
updateBottom(doc);
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(slot, doc);
}
add(slot, doc, Float.NaN);
if (queueFull) {
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
}
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
/*
* Implements a TopFieldCollector over multiple SortField criteria, with
* tracking document scores and maxScore.
*/
private static class MultiComparatorScoringMaxScoreCollector extends MultiComparatorNonScoringCollector {
Scorer scorer;
public MultiComparatorScoringMaxScoreCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
// Must set maxScore to NEG_INF, or otherwise Math.max always returns NaN.
maxScore = Float.NEGATIVE_INFINITY;
}
final void updateBottom(int doc, float score) {
bottom.doc = docBase + doc;
bottom.score = score;
bottom = pq.updateTop();
}
@Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
maxScore = score;
}
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
for (int i = 0;; i++) {
final int c = reverseMul[i] * comparators[i].compareBottom(doc);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (i == comparators.length - 1) {
// Here c=0. If we're at the last comparator, this doc is not
// competitive, since docs are visited in doc Id order, which means
// this doc cannot compete with any other document in the queue.
return;
}
}
// This hit is competitive - replace bottom element in queue & adjustTop
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(bottom.slot, doc);
}
updateBottom(doc, score);
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(slot, doc);
}
add(slot, doc, score);
if (queueFull) {
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
}
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
}
}
/*
* Implements a TopFieldCollector over multiple SortField criteria, with
* tracking document scores and maxScore, and assumes out of orderness in doc
* Ids collection.
*/
private final static class OutOfOrderMultiComparatorScoringMaxScoreCollector
extends MultiComparatorScoringMaxScoreCollector {
public OutOfOrderMultiComparatorScoringMaxScoreCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
}
@Override
public void collect(int doc) throws IOException {
final float score = scorer.score();
if (score > maxScore) {
maxScore = score;
}
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
for (int i = 0;; i++) {
final int c = reverseMul[i] * comparators[i].compareBottom(doc);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (i == comparators.length - 1) {
// This is the equals case.
if (doc + docBase > bottom.doc) {
// Definitely not competitive
return;
}
break;
}
}
// This hit is competitive - replace bottom element in queue & adjustTop
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(bottom.slot, doc);
}
updateBottom(doc, score);
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(slot, doc);
}
add(slot, doc, score);
if (queueFull) {
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
}
}
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
/*
* Implements a TopFieldCollector over multiple SortField criteria, with
* tracking document scores and maxScore.
*/
private static class MultiComparatorScoringNoMaxScoreCollector extends MultiComparatorNonScoringCollector {
Scorer scorer;
public MultiComparatorScoringNoMaxScoreCollector(FieldValueHitQueue queue,
int numHits, boolean fillFields) throws IOException {
super(queue, numHits, fillFields);
}
final void updateBottom(int doc, float score) {
bottom.doc = docBase + doc;
bottom.score = score;
bottom = pq.updateTop();
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
for (int i = 0;; i++) {
final int c = reverseMul[i] * comparators[i].compareBottom(doc);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (i == comparators.length - 1) {
// Here c=0. If we're at the last comparator, this doc is not
// competitive, since docs are visited in doc Id order, which means
// this doc cannot compete with any other document in the queue.
return;
}
}
// This hit is competitive - replace bottom element in queue & adjustTop
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(bottom.slot, doc);
}
// Compute score only if it is competitive.
final float score = scorer.score();
updateBottom(doc, score);
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(slot, doc);
}
// Compute score only if it is competitive.
final float score = scorer.score();
add(slot, doc, score);
if (queueFull) {
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
}
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
}
}
/*
* Implements a TopFieldCollector over multiple SortField criteria, with
* tracking document scores and maxScore, and assumes out of orderness in doc
* Ids collection.
*/
private final static class OutOfOrderMultiComparatorScoringNoMaxScoreCollector
extends MultiComparatorScoringNoMaxScoreCollector {
public OutOfOrderMultiComparatorScoringNoMaxScoreCollector(
FieldValueHitQueue queue, int numHits, boolean fillFields)
throws IOException {
super(queue, numHits, fillFields);
}
@Override
public void collect(int doc) throws IOException {
++totalHits;
if (queueFull) {
// Fastmatch: return if this hit is not competitive
for (int i = 0;; i++) {
final int c = reverseMul[i] * comparators[i].compareBottom(doc);
if (c < 0) {
// Definitely not competitive.
return;
} else if (c > 0) {
// Definitely competitive.
break;
} else if (i == comparators.length - 1) {
// This is the equals case.
if (doc + docBase > bottom.doc) {
// Definitely not competitive
return;
}
break;
}
}
// This hit is competitive - replace bottom element in queue & adjustTop
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(bottom.slot, doc);
}
// Compute score only if it is competitive.
final float score = scorer.score();
updateBottom(doc, score);
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
} else {
// Startup transient: queue hasn't gathered numHits yet
final int slot = totalHits - 1;
// Copy hit into queue
for (int i = 0; i < comparators.length; i++) {
comparators[i].copy(slot, doc);
}
// Compute score only if it is competitive.
final float score = scorer.score();
add(slot, doc, score);
if (queueFull) {
for (int i = 0; i < comparators.length; i++) {
comparators[i].setBottom(bottom.slot);
}
}
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
super.setScorer(scorer);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
}
private static final ScoreDoc[] EMPTY_SCOREDOCS = new ScoreDoc[0];
private final boolean fillFields;
/*
* Stores the maximum score value encountered, needed for normalizing. If
* document scores are not tracked, this value is initialized to NaN.
*/
float maxScore = Float.NaN;
final int numHits;
FieldValueHitQueue.Entry bottom = null;
boolean queueFull;
int docBase;
// Declaring the constructor private prevents extending this class by anyone
// else. Note that the class cannot be final since it's extended by the
// internal versions. If someone will define a constructor with any other
// visibility, then anyone will be able to extend the class, which is not what
// we want.
private TopFieldCollector(PriorityQueue<Entry> pq, int numHits, boolean fillFields) {
super(pq);
this.numHits = numHits;
this.fillFields = fillFields;
}
/**
* Creates a new {@link TopFieldCollector} from the given
* arguments.
*
* <p><b>NOTE</b>: The instances returned by this method
* pre-allocate a full array of length
* <code>numHits</code>.
*
* @param sort
* the sort criteria (SortFields).
* @param numHits
* the number of results to collect.
* @param fillFields
* specifies whether the actual field values should be returned on
* the results (FieldDoc).
* @param trackDocScores
* specifies whether document scores should be tracked and set on the
* results. Note that if set to false, then the results' scores will
* be set to Float.NaN. Setting this to true affects performance, as
* it incurs the score computation on each competitive result.
* Therefore if document scores are not required by the application,
* it is recommended to set it to false.
* @param trackMaxScore
* specifies whether the query's maxScore should be tracked and set
* on the resulting {@link TopDocs}. Note that if set to false,
* {@link TopDocs#getMaxScore()} returns Float.NaN. Setting this to
* true affects performance as it incurs the score computation on
* each result. Also, setting this true automatically sets
* <code>trackDocScores</code> to true as well.
* @param docsScoredInOrder
* specifies whether documents are scored in doc Id order or not by
* the given {@link Scorer} in {@link #setScorer(Scorer)}.
* @return a {@link TopFieldCollector} instance which will sort the results by
* the sort criteria.
* @throws IOException
*/
public static TopFieldCollector create(Sort sort, int numHits,
boolean fillFields, boolean trackDocScores, boolean trackMaxScore,
boolean docsScoredInOrder)
throws IOException {
if (sort.fields.length == 0) {
throw new IllegalArgumentException("Sort must contain at least one field");
}
FieldValueHitQueue queue = FieldValueHitQueue.create(sort.fields, numHits);
if (queue.getComparators().length == 1) {
if (docsScoredInOrder) {
if (trackMaxScore) {
return new OneComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
} else if (trackDocScores) {
return new OneComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
} else {
return new OneComparatorNonScoringCollector(queue, numHits, fillFields);
}
} else {
if (trackMaxScore) {
return new OutOfOrderOneComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
} else if (trackDocScores) {
return new OutOfOrderOneComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
} else {
return new OutOfOrderOneComparatorNonScoringCollector(queue, numHits, fillFields);
}
}
}
// multiple comparators.
if (docsScoredInOrder) {
if (trackMaxScore) {
return new MultiComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
} else if (trackDocScores) {
return new MultiComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
} else {
return new MultiComparatorNonScoringCollector(queue, numHits, fillFields);
}
} else {
if (trackMaxScore) {
return new OutOfOrderMultiComparatorScoringMaxScoreCollector(queue, numHits, fillFields);
} else if (trackDocScores) {
return new OutOfOrderMultiComparatorScoringNoMaxScoreCollector(queue, numHits, fillFields);
} else {
return new OutOfOrderMultiComparatorNonScoringCollector(queue, numHits, fillFields);
}
}
}
final void add(int slot, int doc, float score) {
bottom = pq.add(new Entry(slot, docBase + doc, score));
queueFull = totalHits == numHits;
}
/*
* Only the following callback methods need to be overridden since
* topDocs(int, int) calls them to return the results.
*/
@Override
protected void populateResults(ScoreDoc[] results, int howMany) {
if (fillFields) {
// avoid casting if unnecessary.
FieldValueHitQueue queue = (FieldValueHitQueue) pq;
for (int i = howMany - 1; i >= 0; i--) {
results[i] = queue.fillFields(queue.pop());
}
} else {
for (int i = howMany - 1; i >= 0; i--) {
Entry entry = pq.pop();
results[i] = new FieldDoc(entry.doc, entry.score);
}
}
}
@Override
protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
if (results == null) {
results = EMPTY_SCOREDOCS;
// Set maxScore to NaN, in case this is a maxScore tracking collector.
maxScore = Float.NaN;
}
// If this is a maxScoring tracking collector and there were no results,
return new TopFieldDocs(totalHits, results, ((FieldValueHitQueue) pq).getFields(), maxScore);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/TopFieldCollector.java | Java | art | 32,865 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Represents hits returned by {@link
* Searcher#search(Query,Filter,int,Sort)}.
*/
public class TopFieldDocs
extends TopDocs {
/** The fields which were used to sort results by. */
public SortField[] fields;
/** Creates one of these objects.
* @param totalHits Total number of hits for the query.
* @param scoreDocs The top hits for the query.
* @param fields The sort criteria used to find the top hits.
* @param maxScore The maximum score encountered.
*/
public TopFieldDocs (int totalHits, ScoreDoc[] scoreDocs, SortField[] fields, float maxScore) {
super (totalHits, scoreDocs, maxScore);
this.fields = fields;
}
} | zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/TopFieldDocs.java | Java | art | 1,508 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
Code to search indices.
<h2>Table Of Contents</h2>
<p>
<ol>
<li><a href="#search">Search Basics</a></li>
<li><a href="#query">The Query Classes</a></li>
<li><a href="#scoring">Changing the Scoring</a></li>
</ol>
</p>
<a name="search"></a>
<h2>Search</h2>
<p>
Search over indices.
Applications usually call {@link
org.apache.lucene.search.Searcher#search(Query,int)} or {@link
org.apache.lucene.search.Searcher#search(Query,Filter,int)}.
<!-- FILL IN MORE HERE -->
</p>
<a name="query"></a>
<h2>Query Classes</h2>
<h4>
<a href="TermQuery.html">TermQuery</a>
</h4>
<p>Of the various implementations of
<a href="Query.html">Query</a>, the
<a href="TermQuery.html">TermQuery</a>
is the easiest to understand and the most often used in applications. A <a
href="TermQuery.html">TermQuery</a> matches all the documents that contain the
specified
<a href="../index/Term.html">Term</a>,
which is a word that occurs in a certain
<a href="../document/Field.html">Field</a>.
Thus, a <a href="TermQuery.html">TermQuery</a> identifies and scores all
<a href="../document/Document.html">Document</a>s that have a <a
href="../document/Field.html">Field</a> with the specified string in it.
Constructing a <a
href="TermQuery.html">TermQuery</a>
is as simple as:
<pre>
TermQuery tq = new TermQuery(new Term("fieldName", "term"));
</pre>In this example, the <a href="Query.html">Query</a> identifies all <a
href="../document/Document.html">Document</a>s that have the <a
href="../document/Field.html">Field</a> named <tt>"fieldName"</tt>
containing the word <tt>"term"</tt>.
</p>
<h4>
<a href="BooleanQuery.html">BooleanQuery</a>
</h4>
<p>Things start to get interesting when one combines multiple
<a href="TermQuery.html">TermQuery</a> instances into a <a
href="BooleanQuery.html">BooleanQuery</a>.
A <a href="BooleanQuery.html">BooleanQuery</a> contains multiple
<a href="BooleanClause.html">BooleanClause</a>s,
where each clause contains a sub-query (<a href="Query.html">Query</a>
instance) and an operator (from <a
href="BooleanClause.Occur.html">BooleanClause.Occur</a>)
describing how that sub-query is combined with the other clauses:
<ol>
<li><p>SHOULD — Use this operator when a clause can occur in the result set, but is not required.
If a query is made up of all SHOULD clauses, then every document in the result
set matches at least one of these clauses.</p></li>
<li><p>MUST — Use this operator when a clause is required to occur in the result set. Every
document in the result set will match
all such clauses.</p></li>
<li><p>MUST NOT — Use this operator when a
clause must not occur in the result set. No
document in the result set will match
any such clauses.</p></li>
</ol>
Boolean queries are constructed by adding two or more
<a href="BooleanClause.html">BooleanClause</a>
instances. If too many clauses are added, a <a href="BooleanQuery.TooManyClauses.html">TooManyClauses</a>
exception will be thrown during searching. This most often occurs
when a <a href="Query.html">Query</a>
is rewritten into a <a href="BooleanQuery.html">BooleanQuery</a> with many
<a href="TermQuery.html">TermQuery</a> clauses,
for example by <a href="WildcardQuery.html">WildcardQuery</a>.
The default setting for the maximum number
of clauses 1024, but this can be changed via the
static method <a href="BooleanQuery.html#setMaxClauseCount(int)">setMaxClauseCount</a>
in <a href="BooleanQuery.html">BooleanQuery</a>.
</p>
<h4>Phrases</h4>
<p>Another common search is to find documents containing certain phrases. This
is handled two different ways:
<ol>
<li>
<p><a href="PhraseQuery.html">PhraseQuery</a>
— Matches a sequence of
<a href="../index/Term.html">Terms</a>.
<a href="PhraseQuery.html">PhraseQuery</a> uses a slop factor to determine
how many positions may occur between any two terms in the phrase and still be considered a match.</p>
</li>
<li>
<p><a href="spans/SpanNearQuery.html">SpanNearQuery</a>
— Matches a sequence of other
<a href="spans/SpanQuery.html">SpanQuery</a>
instances. <a href="spans/SpanNearQuery.html">SpanNearQuery</a> allows for
much more
complicated phrase queries since it is constructed from other <a
href="spans/SpanQuery.html">SpanQuery</a>
instances, instead of only <a href="TermQuery.html">TermQuery</a>
instances.</p>
</li>
</ol>
</p>
<h4>
<a href="TermRangeQuery.html">TermRangeQuery</a>
</h4>
<p>The
<a href="TermRangeQuery.html">TermRangeQuery</a>
matches all documents that occur in the
exclusive range of a lower
<a href="../index/Term.html">Term</a>
and an upper
<a href="../index/Term.html">Term</a>.
according to {@link java.lang.String#compareTo(String)}. It is not intended
for numerical ranges, use <a href="NumericRangeQuery.html">NumericRangeQuery</a> instead.
For example, one could find all documents
that have terms beginning with the letters <tt>a</tt> through <tt>c</tt>. This type of <a
href="Query.html">Query</a> is frequently used to
find
documents that occur in a specific date range.
</p>
<h4>
<a href="NumericRangeQuery.html">NumericRangeQuery</a>
</h4>
<p>The
<a href="NumericRangeQuery.html">NumericRangeQuery</a>
matches all documents that occur in a numeric range.
For NumericRangeQuery to work, you must index the values
using a special <a href="../document/NumericField.html">
NumericField</a>.
</p>
<h4>
<a href="PrefixQuery.html">PrefixQuery</a>,
<a href="WildcardQuery.html">WildcardQuery</a>
</h4>
<p>While the
<a href="PrefixQuery.html">PrefixQuery</a>
has a different implementation, it is essentially a special case of the
<a href="WildcardQuery.html">WildcardQuery</a>.
The <a href="PrefixQuery.html">PrefixQuery</a> allows an application
to identify all documents with terms that begin with a certain string. The <a
href="WildcardQuery.html">WildcardQuery</a> generalizes this by allowing
for the use of <tt>*</tt> (matches 0 or more characters) and <tt>?</tt> (matches exactly one character) wildcards.
Note that the <a href="WildcardQuery.html">WildcardQuery</a> can be quite slow. Also
note that
<a href="WildcardQuery.html">WildcardQuery</a> should
not start with <tt>*</tt> and <tt>?</tt>, as these are extremely slow.
To remove this protection and allow a wildcard at the beginning of a term, see method
<a href="../queryParser/QueryParser.html#setAllowLeadingWildcard(boolean)">setAllowLeadingWildcard</a> in
<a href="../queryParser/QueryParser.html">QueryParser</a>.
</p>
<h4>
<a href="FuzzyQuery.html">FuzzyQuery</a>
</h4>
<p>A
<a href="FuzzyQuery.html">FuzzyQuery</a>
matches documents that contain terms similar to the specified term. Similarity is
determined using
<a href="http://en.wikipedia.org/wiki/Levenshtein">Levenshtein (edit) distance</a>.
This type of query can be useful when accounting for spelling variations in the collection.
</p>
<a name="changingSimilarity"></a>
<h2>Changing Similarity</h2>
<p>Chances are <a href="DefaultSimilarity.html">DefaultSimilarity</a> is sufficient for all
your searching needs.
However, in some applications it may be necessary to customize your <a
href="Similarity.html">Similarity</a> implementation. For instance, some
applications do not need to
distinguish between shorter and longer documents (see <a
href="http://www.gossamer-threads.com/lists/lucene/java-user/38967#38967">a "fair" similarity</a>).</p>
<p>To change <a href="Similarity.html">Similarity</a>, one must do so for both indexing and
searching, and the changes must happen before
either of these actions take place. Although in theory there is nothing stopping you from changing mid-stream, it
just isn't well-defined what is going to happen.
</p>
<p>To make this change, implement your own <a href="Similarity.html">Similarity</a> (likely
you'll want to simply subclass
<a href="DefaultSimilarity.html">DefaultSimilarity</a>) and then use the new
class by calling
<a href="../index/IndexWriter.html#setSimilarity(org.apache.lucene.search.Similarity)">IndexWriter.setSimilarity</a>
before indexing and
<a href="Searcher.html#setSimilarity(org.apache.lucene.search.Similarity)">Searcher.setSimilarity</a>
before searching.
</p>
<p>
If you are interested in use cases for changing your similarity, see the Lucene users's mailing list at <a
href="http://www.nabble.com/Overriding-Similarity-tf2128934.html">Overriding Similarity</a>.
In summary, here are a few use cases:
<ol>
<li><p><a href="api/org/apache/lucene/misc/SweetSpotSimilarity.html">SweetSpotSimilarity</a> — <a
href="api/org/apache/lucene/misc/SweetSpotSimilarity.html">SweetSpotSimilarity</a> gives small increases
as the frequency increases a small amount
and then greater increases when you hit the "sweet spot", i.e. where you think the frequency of terms is
more significant.</p></li>
<li><p>Overriding tf — In some applications, it doesn't matter what the score of a document is as long as a
matching term occurs. In these
cases people have overridden Similarity to return 1 from the tf() method.</p></li>
<li><p>Changing Length Normalization — By overriding <a
href="Similarity.html#lengthNorm(java.lang.String,%20int)">lengthNorm</a>,
it is possible to discount how the length of a field contributes
to a score. In <a href="DefaultSimilarity.html">DefaultSimilarity</a>,
lengthNorm = 1 / (numTerms in field)^0.5, but if one changes this to be
1 / (numTerms in field), all fields will be treated
<a href="http://www.gossamer-threads.com/lists/lucene/java-user/38967#38967">"fairly"</a>.</p></li>
</ol>
In general, Chris Hostetter sums it up best in saying (from <a
href="http://www.gossamer-threads.com/lists/lucene/java-user/39125#39125">the Lucene users's mailing list</a>):
<blockquote>[One would override the Similarity in] ... any situation where you know more about your data then just
that
it's "text" is a situation where it *might* make sense to to override your
Similarity method.</blockquote>
</p>
<a name="scoring"></a>
<h2>Changing Scoring — Expert Level</h2>
<p>Changing scoring is an expert level task, so tread carefully and be prepared to share your code if
you want help.
</p>
<p>With the warning out of the way, it is possible to change a lot more than just the Similarity
when it comes to scoring in Lucene. Lucene's scoring is a complex mechanism that is grounded by
<span >three main classes</span>:
<ol>
<li>
<a href="Query.html">Query</a> — The abstract object representation of the
user's information need.</li>
<li>
<a href="Weight.html">Weight</a> — The internal interface representation of
the user's Query, so that Query objects may be reused.</li>
<li>
<a href="Scorer.html">Scorer</a> — An abstract class containing common
functionality for scoring. Provides both scoring and explanation capabilities.</li>
</ol>
Details on each of these classes, and their children, can be found in the subsections below.
</p>
<h4>The Query Class</h4>
<p>In some sense, the
<a href="Query.html">Query</a>
class is where it all begins. Without a Query, there would be
nothing to score. Furthermore, the Query class is the catalyst for the other scoring classes as it
is often responsible
for creating them or coordinating the functionality between them. The
<a href="Query.html">Query</a> class has several methods that are important for
derived classes:
<ol>
<li>createWeight(Searcher searcher) — A
<a href="Weight.html">Weight</a> is the internal representation of the
Query, so each Query implementation must
provide an implementation of Weight. See the subsection on <a
href="#The Weight Interface">The Weight Interface</a> below for details on implementing the Weight
interface.</li>
<li>rewrite(IndexReader reader) — Rewrites queries into primitive queries. Primitive queries are:
<a href="TermQuery.html">TermQuery</a>,
<a href="BooleanQuery.html">BooleanQuery</a>, <span
>and other queries that implement Query.html#createWeight(Searcher searcher)</span></li>
</ol>
</p>
<h4>The Weight Interface</h4>
<p>The
<a href="Weight.html">Weight</a>
interface provides an internal representation of the Query so that it can be reused. Any
<a href="Searcher.html">Searcher</a>
dependent state should be stored in the Weight implementation,
not in the Query class. The interface defines six methods that must be implemented:
<ol>
<li>
<a href="Weight.html#getQuery()">Weight#getQuery()</a> — Pointer to the
Query that this Weight represents.</li>
<li>
<a href="Weight.html#getValue()">Weight#getValue()</a> — The weight for
this Query. For example, the TermQuery.TermWeight value is
equal to the idf^2 * boost * queryNorm <!-- DOUBLE CHECK THIS --></li>
<li>
<a href="Weight.html#sumOfSquaredWeights()">
Weight#sumOfSquaredWeights()</a> — The sum of squared weights. For TermQuery, this is (idf *
boost)^2</li>
<li>
<a href="Weight.html#normalize(float)">
Weight#normalize(float)</a> — Determine the query normalization factor. The query normalization may
allow for comparing scores between queries.</li>
<li>
<a href="Weight.html#scorer(org.apache.lucene.index.IndexReader, boolean, boolean)">
Weight#scorer(IndexReader, boolean, boolean)</a> — Construct a new
<a href="Scorer.html">Scorer</a>
for this Weight. See
<a href="#The Scorer Class">The Scorer Class</a>
below for help defining a Scorer. As the name implies, the
Scorer is responsible for doing the actual scoring of documents given the Query.
</li>
<li>
<a href="Weight.html#explain(org.apache.lucene.search.Searcher, org.apache.lucene.index.IndexReader, int)">
Weight#explain(Searcher, IndexReader, int)</a> — Provide a means for explaining why a given document was
scored
the way it was.</li>
</ol>
</p>
<h4>The Scorer Class</h4>
<p>The
<a href="Scorer.html">Scorer</a>
abstract class provides common scoring functionality for all Scorer implementations and
is the heart of the Lucene scoring process. The Scorer defines the following abstract (they are not
yet abstract, but will be in Lucene 3.0 and should be considered as such now) methods which
must be implemented (some of them inherited from <a href="DocIdSetIterator.html">DocIdSetIterator</a> ):
<ol>
<li>
<a href="DocIdSetIterator.html#nextDoc()">DocIdSetIterator#nextDoc()</a> — Advances to the next
document that matches this Query, returning true if and only
if there is another document that matches.</li>
<li>
<a href="DocIdSetIterator.html#docID()">DocIdSetIterator#docID()</a> — Returns the id of the
<a href="../document/Document.html">Document</a>
that contains the match. It is not valid until next() has been called at least once.
</li>
<li>
<a href="Scorer.html#score(org.apache.lucene.search.Collector)">Scorer#score(Collector)</a> —
Scores and collects all matching documents using the given Collector.
</li>
<li>
<a href="Scorer.html#score()">Scorer#score()</a> — Return the score of the
current document. This value can be determined in any
appropriate way for an application. For instance, the
<a href="http://svn.apache.org/viewvc/lucene/java/trunk/src/java/org/apache/lucene/search/TermScorer.java?view=log">TermScorer</a>
returns the tf * Weight.getValue() * fieldNorm.
</li>
<li>
<a href="DocIdSetIterator.html#advance(int)">DocIdSetIterator#advance(int)</a> — Skip ahead in
the document matches to the document whose id is greater than
or equal to the passed in value. In many instances, advance can be
implemented more efficiently than simply looping through all the matching documents until
the target document is identified.</li>
</ol>
</p>
<h4>Why would I want to add my own Query?</h4>
<p>In a nutshell, you want to add your own custom Query implementation when you think that Lucene's
aren't appropriate for the
task that you want to do. You might be doing some cutting edge research or you need more information
back
out of Lucene (similar to Doug adding SpanQuery functionality).</p>
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/package.html | HTML | art | 19,203 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.index.TermFreqVector;
/**
*
*
**/
public class QueryTermVector implements TermFreqVector {
private String [] terms = new String[0];
private int [] termFreqs = new int[0];
public String getField() { return null; }
/**
*
* @param queryTerms The original list of terms from the query, can contain duplicates
*/
public QueryTermVector(String [] queryTerms) {
processTerms(queryTerms);
}
public QueryTermVector(String queryString, Analyzer analyzer) {
if (analyzer != null)
{
TokenStream stream = analyzer.tokenStream("", new StringReader(queryString));
if (stream != null)
{
List<String> terms = new ArrayList<String>();
try {
boolean hasMoreTokens = false;
stream.reset();
TermAttribute termAtt = stream.addAttribute(TermAttribute.class);
hasMoreTokens = stream.incrementToken();
while (hasMoreTokens) {
terms.add(termAtt.term());
hasMoreTokens = stream.incrementToken();
}
processTerms(terms.toArray(new String[terms.size()]));
} catch (IOException e) {
}
}
}
}
private void processTerms(String[] queryTerms) {
if (queryTerms != null) {
Arrays.sort(queryTerms);
Map<String,Integer> tmpSet = new HashMap<String,Integer>(queryTerms.length);
//filter out duplicates
List<String> tmpList = new ArrayList<String>(queryTerms.length);
List<Integer> tmpFreqs = new ArrayList<Integer>(queryTerms.length);
int j = 0;
for (int i = 0; i < queryTerms.length; i++) {
String term = queryTerms[i];
Integer position = tmpSet.get(term);
if (position == null) {
tmpSet.put(term, Integer.valueOf(j++));
tmpList.add(term);
tmpFreqs.add(Integer.valueOf(1));
}
else {
Integer integer = tmpFreqs.get(position.intValue());
tmpFreqs.set(position.intValue(), Integer.valueOf(integer.intValue() + 1));
}
}
terms = tmpList.toArray(terms);
//termFreqs = (int[])tmpFreqs.toArray(termFreqs);
termFreqs = new int[tmpFreqs.size()];
int i = 0;
for (final Integer integer : tmpFreqs) {
termFreqs[i++] = integer.intValue();
}
}
}
@Override
public final String toString() {
StringBuilder sb = new StringBuilder();
sb.append('{');
for (int i=0; i<terms.length; i++) {
if (i>0) sb.append(", ");
sb.append(terms[i]).append('/').append(termFreqs[i]);
}
sb.append('}');
return sb.toString();
}
public int size() {
return terms.length;
}
public String[] getTerms() {
return terms;
}
public int[] getTermFrequencies() {
return termFreqs;
}
public int indexOf(String term) {
int res = Arrays.binarySearch(terms, term);
return res >= 0 ? res : -1;
}
public int[] indexesOf(String[] terms, int start, int len) {
int res[] = new int[len];
for (int i=0; i < len; i++) {
res[i] = indexOf(terms[i]);
}
return res;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/QueryTermVector.java | Java | art | 4,449 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser; // for javadoc
/**
* An abstract {@link Query} that matches documents
* containing a subset of terms provided by a {@link
* FilteredTermEnum} enumeration.
*
* <p>This query cannot be used directly; you must subclass
* it and define {@link #getEnum} to provide a {@link
* FilteredTermEnum} that iterates through the terms to be
* matched.
*
* <p><b>NOTE</b>: if {@link #setRewriteMethod} is either
* {@link #CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE} or {@link
* #SCORING_BOOLEAN_QUERY_REWRITE}, you may encounter a
* {@link BooleanQuery.TooManyClauses} exception during
* searching, which happens when the number of terms to be
* searched exceeds {@link
* BooleanQuery#getMaxClauseCount()}. Setting {@link
* #setRewriteMethod} to {@link #CONSTANT_SCORE_FILTER_REWRITE}
* prevents this.
*
* <p>The recommended rewrite method is {@link
* #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}: it doesn't spend CPU
* computing unhelpful scores, and it tries to pick the most
* performant rewrite method given the query.
*
* Note that {@link QueryParser} produces
* MultiTermQueries using {@link
* #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} by default.
*/
public abstract class MultiTermQuery extends Query {
protected RewriteMethod rewriteMethod = CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
transient int numberOfTerms = 0;
/** Abstract class that defines how the query is rewritten. */
public static abstract class RewriteMethod implements Serializable {
public abstract Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException;
}
private static final class ConstantScoreFilterRewrite extends RewriteMethod implements Serializable {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) {
Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter<MultiTermQuery>(query));
result.setBoost(query.getBoost());
return result;
}
// Make sure we are still a singleton even after deserializing
protected Object readResolve() {
return CONSTANT_SCORE_FILTER_REWRITE;
}
}
/** A rewrite method that first creates a private Filter,
* by visiting each term in sequence and marking all docs
* for that term. Matching documents are assigned a
* constant score equal to the query's boost.
*
* <p> This method is faster than the BooleanQuery
* rewrite methods when the number of matched terms or
* matched documents is non-trivial. Also, it will never
* hit an errant {@link BooleanQuery.TooManyClauses}
* exception.
*
* @see #setRewriteMethod */
public final static RewriteMethod CONSTANT_SCORE_FILTER_REWRITE = new ConstantScoreFilterRewrite();
private static class ScoringBooleanQueryRewrite extends RewriteMethod implements Serializable {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
FilteredTermEnum enumerator = query.getEnum(reader);
BooleanQuery result = new BooleanQuery(true);
int count = 0;
try {
do {
Term t = enumerator.term();
if (t != null) {
TermQuery tq = new TermQuery(t); // found a match
tq.setBoost(query.getBoost() * enumerator.difference()); // set the boost
result.add(tq, BooleanClause.Occur.SHOULD); // add to query
count++;
}
} while (enumerator.next());
} finally {
enumerator.close();
}
query.incTotalNumberOfTerms(count);
return result;
}
// Make sure we are still a singleton even after deserializing
protected Object readResolve() {
return SCORING_BOOLEAN_QUERY_REWRITE;
}
}
/** A rewrite method that first translates each term into
* {@link BooleanClause.Occur#SHOULD} clause in a
* BooleanQuery, and keeps the scores as computed by the
* query. Note that typically such scores are
* meaningless to the user, and require non-trivial CPU
* to compute, so it's almost always better to use {@link
* #CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} instead.
*
* <p><b>NOTE</b>: This rewrite method will hit {@link
* BooleanQuery.TooManyClauses} if the number of terms
* exceeds {@link BooleanQuery#getMaxClauseCount}.
*
* @see #setRewriteMethod */
public final static RewriteMethod SCORING_BOOLEAN_QUERY_REWRITE = new ScoringBooleanQueryRewrite();
private static class ConstantScoreBooleanQueryRewrite extends ScoringBooleanQueryRewrite implements Serializable {
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
// strip the scores off
Query result = new ConstantScoreQuery(new QueryWrapperFilter(super.rewrite(reader, query)));
result.setBoost(query.getBoost());
return result;
}
// Make sure we are still a singleton even after deserializing
@Override
protected Object readResolve() {
return CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE;
}
}
/** Like {@link #SCORING_BOOLEAN_QUERY_REWRITE} except
* scores are not computed. Instead, each matching
* document receives a constant score equal to the
* query's boost.
*
* <p><b>NOTE</b>: This rewrite method will hit {@link
* BooleanQuery.TooManyClauses} if the number of terms
* exceeds {@link BooleanQuery#getMaxClauseCount}.
*
* @see #setRewriteMethod */
public final static RewriteMethod CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE = new ConstantScoreBooleanQueryRewrite();
/** A rewrite method that tries to pick the best
* constant-score rewrite method based on term and
* document counts from the query. If both the number of
* terms and documents is small enough, then {@link
* #CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE} is used.
* Otherwise, {@link #CONSTANT_SCORE_FILTER_REWRITE} is
* used.
*/
public static class ConstantScoreAutoRewrite extends RewriteMethod implements Serializable {
// Defaults derived from rough tests with a 20.0 million
// doc Wikipedia index. With more than 350 terms in the
// query, the filter method is fastest:
public static int DEFAULT_TERM_COUNT_CUTOFF = 350;
// If the query will hit more than 1 in 1000 of the docs
// in the index (0.1%), the filter method is fastest:
public static double DEFAULT_DOC_COUNT_PERCENT = 0.1;
private int termCountCutoff = DEFAULT_TERM_COUNT_CUTOFF;
private double docCountPercent = DEFAULT_DOC_COUNT_PERCENT;
/** If the number of terms in this query is equal to or
* larger than this setting then {@link
* #CONSTANT_SCORE_FILTER_REWRITE} is used. */
public void setTermCountCutoff(int count) {
termCountCutoff = count;
}
/** @see #setTermCountCutoff */
public int getTermCountCutoff() {
return termCountCutoff;
}
/** If the number of documents to be visited in the
* postings exceeds this specified percentage of the
* maxDoc() for the index, then {@link
* #CONSTANT_SCORE_FILTER_REWRITE} is used.
* @param percent 0.0 to 100.0 */
public void setDocCountPercent(double percent) {
docCountPercent = percent;
}
/** @see #setDocCountPercent */
public double getDocCountPercent() {
return docCountPercent;
}
@Override
public Query rewrite(IndexReader reader, MultiTermQuery query) throws IOException {
// Get the enum and start visiting terms. If we
// exhaust the enum before hitting either of the
// cutoffs, we use ConstantBooleanQueryRewrite; else,
// ConstantFilterRewrite:
final Collection<Term> pendingTerms = new ArrayList<Term>();
final int docCountCutoff = (int) ((docCountPercent / 100.) * reader.maxDoc());
final int termCountLimit = Math.min(BooleanQuery.getMaxClauseCount(), termCountCutoff);
int docVisitCount = 0;
FilteredTermEnum enumerator = query.getEnum(reader);
try {
while(true) {
Term t = enumerator.term();
if (t != null) {
pendingTerms.add(t);
// Loading the TermInfo from the terms dict here
// should not be costly, because 1) the
// query/filter will load the TermInfo when it
// runs, and 2) the terms dict has a cache:
docVisitCount += reader.docFreq(t);
}
if (pendingTerms.size() >= termCountLimit || docVisitCount >= docCountCutoff) {
// Too many terms -- make a filter.
Query result = new ConstantScoreQuery(new MultiTermQueryWrapperFilter<MultiTermQuery>(query));
result.setBoost(query.getBoost());
return result;
} else if (!enumerator.next()) {
// Enumeration is done, and we hit a small
// enough number of terms & docs -- just make a
// BooleanQuery, now
BooleanQuery bq = new BooleanQuery(true);
for (final Term term: pendingTerms) {
TermQuery tq = new TermQuery(term);
bq.add(tq, BooleanClause.Occur.SHOULD);
}
// Strip scores
Query result = new ConstantScoreQuery(new QueryWrapperFilter(bq));
result.setBoost(query.getBoost());
query.incTotalNumberOfTerms(pendingTerms.size());
return result;
}
}
} finally {
enumerator.close();
}
}
@Override
public int hashCode() {
final int prime = 1279;
return (int) (prime * termCountCutoff + Double.doubleToLongBits(docCountPercent));
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ConstantScoreAutoRewrite other = (ConstantScoreAutoRewrite) obj;
if (other.termCountCutoff != termCountCutoff) {
return false;
}
if (Double.doubleToLongBits(other.docCountPercent) != Double.doubleToLongBits(docCountPercent)) {
return false;
}
return true;
}
}
/** Read-only default instance of {@link
* ConstantScoreAutoRewrite}, with {@link
* ConstantScoreAutoRewrite#setTermCountCutoff} set to
* {@link
* ConstantScoreAutoRewrite#DEFAULT_TERM_COUNT_CUTOFF}
* and {@link
* ConstantScoreAutoRewrite#setDocCountPercent} set to
* {@link
* ConstantScoreAutoRewrite#DEFAULT_DOC_COUNT_PERCENT}.
* Note that you cannot alter the configuration of this
* instance; you'll need to create a private instance
* instead. */
public final static RewriteMethod CONSTANT_SCORE_AUTO_REWRITE_DEFAULT = new ConstantScoreAutoRewrite() {
@Override
public void setTermCountCutoff(int count) {
throw new UnsupportedOperationException("Please create a private instance");
}
@Override
public void setDocCountPercent(double percent) {
throw new UnsupportedOperationException("Please create a private instance");
}
// Make sure we are still a singleton even after deserializing
protected Object readResolve() {
return CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
}
};
/**
* Constructs a query matching terms that cannot be represented with a single
* Term.
*/
public MultiTermQuery() {
}
/** Construct the enumeration to be used, expanding the pattern term. */
protected abstract FilteredTermEnum getEnum(IndexReader reader)
throws IOException;
/**
* Expert: Return the number of unique terms visited during execution of the query.
* If there are many of them, you may consider using another query type
* or optimize your total term count in index.
* <p>This method is not thread safe, be sure to only call it when no query is running!
* If you re-use the same query instance for another
* search, be sure to first reset the term counter
* with {@link #clearTotalNumberOfTerms}.
* <p>On optimized indexes / no MultiReaders, you get the correct number of
* unique terms for the whole index. Use this number to compare different queries.
* For non-optimized indexes this number can also be achieved in
* non-constant-score mode. In constant-score mode you get the total number of
* terms seeked for all segments / sub-readers.
* @see #clearTotalNumberOfTerms
*/
public int getTotalNumberOfTerms() {
return numberOfTerms;
}
/**
* Expert: Resets the counting of unique terms.
* Do this before executing the query/filter.
* @see #getTotalNumberOfTerms
*/
public void clearTotalNumberOfTerms() {
numberOfTerms = 0;
}
protected void incTotalNumberOfTerms(int inc) {
numberOfTerms += inc;
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
return rewriteMethod.rewrite(reader, this);
}
/**
* @see #setRewriteMethod
*/
public RewriteMethod getRewriteMethod() {
return rewriteMethod;
}
/**
* Sets the rewrite method to be used when executing the
* query. You can use one of the four core methods, or
* implement your own subclass of {@link RewriteMethod}. */
public void setRewriteMethod(RewriteMethod method) {
rewriteMethod = method;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + Float.floatToIntBits(getBoost());
result = prime * result;
result += rewriteMethod.hashCode();
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
MultiTermQuery other = (MultiTermQuery) obj;
if (Float.floatToIntBits(getBoost()) != Float.floatToIntBits(other.getBoost()))
return false;
if (!rewriteMethod.equals(other.rewriteMethod)) {
return false;
}
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/MultiTermQuery.java | Java | art | 15,027 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
import org.apache.lucene.document.FieldSelector;
/**
* An abstract base class for search implementations. Implements the main search
* methods.
*
* <p>
* Note that you can only access hits from a Searcher as long as it is not yet
* closed, otherwise an IOException will be thrown.
*/
public abstract class Searcher implements Searchable {
/** Search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
* <code>sort</code>.
*
* <p>NOTE: this does not compute scores by default; use
* {@link IndexSearcher#setDefaultFieldSortScoring} to
* enable scoring.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopFieldDocs search(Query query, Filter filter, int n,
Sort sort) throws IOException {
return search(createWeight(query), filter, n, sort);
}
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching document.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query, int)}) is usually more efficient, as it skips
* non-high-scoring hits.
* <p>Note: The <code>score</code> passed to this method is a raw score.
* In other words, the score will not necessarily be a float whose value is
* between 0 and 1.
* @throws BooleanQuery.TooManyClauses
*/
public void search(Query query, Collector results)
throws IOException {
search(createWeight(query), null, results);
}
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching
* document.
* <br>Collector-based access to remote indexes is discouraged.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query, Filter, int)}) is usually more efficient, as it skips
* non-high-scoring hits.
*
* @param query to match documents
* @param filter if non-null, used to permit documents to be collected.
* @param results to receive hits
* @throws BooleanQuery.TooManyClauses
*/
public void search(Query query, Filter filter, Collector results)
throws IOException {
search(createWeight(query), filter, results);
}
/** Finds the top <code>n</code>
* hits for <code>query</code>, applying <code>filter</code> if non-null.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopDocs search(Query query, Filter filter, int n)
throws IOException {
return search(createWeight(query), filter, n);
}
/** Finds the top <code>n</code>
* hits for <code>query</code>.
*
* @throws BooleanQuery.TooManyClauses
*/
public TopDocs search(Query query, int n)
throws IOException {
return search(query, null, n);
}
/** Returns an Explanation that describes how <code>doc</code> scored against
* <code>query</code>.
*
* <p>This is intended to be used in developing Similarity implementations,
* and, for good performance, should not be displayed with every hit.
* Computing an explanation is as expensive as executing the query over the
* entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
return explain(createWeight(query), doc);
}
/** The Similarity implementation used by this searcher. */
private Similarity similarity = Similarity.getDefault();
/** Expert: Set the Similarity implementation used by this Searcher.
*
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
this.similarity = similarity;
}
/** Expert: Return the Similarity implementation used by this Searcher.
*
* <p>This defaults to the current value of {@link Similarity#getDefault()}.
*/
public Similarity getSimilarity() {
return this.similarity;
}
/**
* creates a weight for <code>query</code>
* @return new weight
*/
protected Weight createWeight(Query query) throws IOException {
return query.weight(this);
}
// inherit javadoc
public int[] docFreqs(Term[] terms) throws IOException {
int[] result = new int[terms.length];
for (int i = 0; i < terms.length; i++) {
result[i] = docFreq(terms[i]);
}
return result;
}
abstract public void search(Weight weight, Filter filter, Collector results) throws IOException;
abstract public void close() throws IOException;
abstract public int docFreq(Term term) throws IOException;
abstract public int maxDoc() throws IOException;
abstract public TopDocs search(Weight weight, Filter filter, int n) throws IOException;
abstract public Document doc(int i) throws CorruptIndexException, IOException;
abstract public Document doc(int docid, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
abstract public Query rewrite(Query query) throws IOException;
abstract public Explanation explain(Weight weight, int doc) throws IOException;
abstract public TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort) throws IOException;
/* End patch for GCJ bug #15411. */
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/Searcher.java | Java | art | 6,278 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.text.Collator;
import java.util.Locale;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.FieldCache.DoubleParser;
import org.apache.lucene.search.FieldCache.LongParser;
import org.apache.lucene.search.FieldCache.ByteParser;
import org.apache.lucene.search.FieldCache.FloatParser;
import org.apache.lucene.search.FieldCache.IntParser;
import org.apache.lucene.search.FieldCache.ShortParser;
import org.apache.lucene.search.FieldCache.StringIndex;
/**
* Expert: a FieldComparator compares hits so as to determine their
* sort order when collecting the top results with {@link
* TopFieldCollector}. The concrete public FieldComparator
* classes here correspond to the SortField types.
*
* <p>This API is designed to achieve high performance
* sorting, by exposing a tight interaction with {@link
* FieldValueHitQueue} as it visits hits. Whenever a hit is
* competitive, it's enrolled into a virtual slot, which is
* an int ranging from 0 to numHits-1. The {@link
* FieldComparator} is made aware of segment transitions
* during searching in case any internal state it's tracking
* needs to be recomputed during these transitions.</p>
*
* <p>A comparator must define these functions:</p>
*
* <ul>
*
* <li> {@link #compare} Compare a hit at 'slot a'
* with hit 'slot b'.
*
* <li> {@link #setBottom} This method is called by
* {@link FieldValueHitQueue} to notify the
* FieldComparator of the current weakest ("bottom")
* slot. Note that this slot may not hold the weakest
* value according to your comparator, in cases where
* your comparator is not the primary one (ie, is only
* used to break ties from the comparators before it).
*
* <li> {@link #compareBottom} Compare a new hit (docID)
* against the "weakest" (bottom) entry in the queue.
*
* <li> {@link #copy} Installs a new hit into the
* priority queue. The {@link FieldValueHitQueue}
* calls this method when a new hit is competitive.
*
* <li> {@link #setNextReader} Invoked
* when the search is switching to the next segment.
* You may need to update internal state of the
* comparator, for example retrieving new values from
* the {@link FieldCache}.
*
* <li> {@link #value} Return the sort value stored in
* the specified slot. This is only called at the end
* of the search, in order to populate {@link
* FieldDoc#fields} when returning the top results.
* </ul>
*
* <b>NOTE:</b> This API is experimental and might change in
* incompatible ways in the next release.
*/
public abstract class FieldComparator {
/**
* Compare hit at slot1 with hit at slot2.
*
* @param slot1 first slot to compare
* @param slot2 second slot to compare
* @return any N < 0 if slot2's value is sorted after
* slot1, any N > 0 if the slot2's value is sorted before
* slot1 and 0 if they are equal
*/
public abstract int compare(int slot1, int slot2);
/**
* Set the bottom slot, ie the "weakest" (sorted last)
* entry in the queue. When {@link #compareBottom} is
* called, you should compare against this slot. This
* will always be called before {@link #compareBottom}.
*
* @param slot the currently weakest (sorted last) slot in the queue
*/
public abstract void setBottom(final int slot);
/**
* Compare the bottom of the queue with doc. This will
* only invoked after setBottom has been called. This
* should return the same result as {@link
* #compare(int,int)}} as if bottom were slot1 and the new
* document were slot 2.
*
* <p>For a search that hits many results, this method
* will be the hotspot (invoked by far the most
* frequently).</p>
*
* @param doc that was hit
* @return any N < 0 if the doc's value is sorted after
* the bottom entry (not competitive), any N > 0 if the
* doc's value is sorted before the bottom entry and 0 if
* they are equal.
*/
public abstract int compareBottom(int doc) throws IOException;
/**
* This method is called when a new hit is competitive.
* You should copy any state associated with this document
* that will be required for future comparisons, into the
* specified slot.
*
* @param slot which slot to copy the hit to
* @param doc docID relative to current reader
*/
public abstract void copy(int slot, int doc) throws IOException;
/**
* Set a new Reader. All doc correspond to the current Reader.
*
* @param reader current reader
* @param docBase docBase of this reader
* @throws IOException
* @throws IOException
*/
public abstract void setNextReader(IndexReader reader, int docBase) throws IOException;
/** Sets the Scorer to use in case a document's score is
* needed.
*
* @param scorer Scorer instance that you should use to
* obtain the current hit's score, if necessary. */
public void setScorer(Scorer scorer) {
// Empty implementation since most comparators don't need the score. This
// can be overridden by those that need it.
}
/**
* Return the actual value in the slot.
*
* @param slot the value
* @return value in this slot upgraded to Comparable
*/
public abstract Comparable value(int slot);
/** Parses field's values as byte (using {@link
* FieldCache#getBytes} and sorts by ascending value */
public static final class ByteComparator extends FieldComparator {
private final byte[] values;
private byte[] currentReaderValues;
private final String field;
private ByteParser parser;
private byte bottom;
ByteComparator(int numHits, String field, FieldCache.Parser parser) {
values = new byte[numHits];
this.field = field;
this.parser = (ByteParser) parser;
}
@Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2];
}
@Override
public int compareBottom(int doc) {
return bottom - currentReaderValues[doc];
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getBytes(reader, field, parser);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return Byte.valueOf(values[slot]);
}
}
/** Sorts by ascending docID */
public static final class DocComparator extends FieldComparator {
private final int[] docIDs;
private int docBase;
private int bottom;
DocComparator(int numHits) {
docIDs = new int[numHits];
}
@Override
public int compare(int slot1, int slot2) {
// No overflow risk because docIDs are non-negative
return docIDs[slot1] - docIDs[slot2];
}
@Override
public int compareBottom(int doc) {
// No overflow risk because docIDs are non-negative
return bottom - (docBase + doc);
}
@Override
public void copy(int slot, int doc) {
docIDs[slot] = docBase + doc;
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
// TODO: can we "map" our docIDs to the current
// reader? saves having to then subtract on every
// compare call
this.docBase = docBase;
}
@Override
public void setBottom(final int bottom) {
this.bottom = docIDs[bottom];
}
@Override
public Comparable value(int slot) {
return Integer.valueOf(docIDs[slot]);
}
}
/** Parses field's values as double (using {@link
* FieldCache#getDoubles} and sorts by ascending value */
public static final class DoubleComparator extends FieldComparator {
private final double[] values;
private double[] currentReaderValues;
private final String field;
private DoubleParser parser;
private double bottom;
DoubleComparator(int numHits, String field, FieldCache.Parser parser) {
values = new double[numHits];
this.field = field;
this.parser = (DoubleParser) parser;
}
@Override
public int compare(int slot1, int slot2) {
final double v1 = values[slot1];
final double v2 = values[slot2];
if (v1 > v2) {
return 1;
} else if (v1 < v2) {
return -1;
} else {
return 0;
}
}
@Override
public int compareBottom(int doc) {
final double v2 = currentReaderValues[doc];
if (bottom > v2) {
return 1;
} else if (bottom < v2) {
return -1;
} else {
return 0;
}
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getDoubles(reader, field, parser);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return Double.valueOf(values[slot]);
}
}
/** Parses field's values as float (using {@link
* FieldCache#getFloats} and sorts by ascending value */
public static final class FloatComparator extends FieldComparator {
private final float[] values;
private float[] currentReaderValues;
private final String field;
private FloatParser parser;
private float bottom;
FloatComparator(int numHits, String field, FieldCache.Parser parser) {
values = new float[numHits];
this.field = field;
this.parser = (FloatParser) parser;
}
@Override
public int compare(int slot1, int slot2) {
// TODO: are there sneaky non-branch ways to compute
// sign of float?
final float v1 = values[slot1];
final float v2 = values[slot2];
if (v1 > v2) {
return 1;
} else if (v1 < v2) {
return -1;
} else {
return 0;
}
}
@Override
public int compareBottom(int doc) {
// TODO: are there sneaky non-branch ways to compute
// sign of float?
final float v2 = currentReaderValues[doc];
if (bottom > v2) {
return 1;
} else if (bottom < v2) {
return -1;
} else {
return 0;
}
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getFloats(reader, field, parser);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return Float.valueOf(values[slot]);
}
}
/** Parses field's values as int (using {@link
* FieldCache#getInts} and sorts by ascending value */
public static final class IntComparator extends FieldComparator {
private final int[] values;
private int[] currentReaderValues;
private final String field;
private IntParser parser;
private int bottom; // Value of bottom of queue
IntComparator(int numHits, String field, FieldCache.Parser parser) {
values = new int[numHits];
this.field = field;
this.parser = (IntParser) parser;
}
@Override
public int compare(int slot1, int slot2) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
// Cannot return values[slot1] - values[slot2] because that
// may overflow
final int v1 = values[slot1];
final int v2 = values[slot2];
if (v1 > v2) {
return 1;
} else if (v1 < v2) {
return -1;
} else {
return 0;
}
}
@Override
public int compareBottom(int doc) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
// Cannot return bottom - values[slot2] because that
// may overflow
final int v2 = currentReaderValues[doc];
if (bottom > v2) {
return 1;
} else if (bottom < v2) {
return -1;
} else {
return 0;
}
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getInts(reader, field, parser);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return Integer.valueOf(values[slot]);
}
}
/** Parses field's values as long (using {@link
* FieldCache#getLongs} and sorts by ascending value */
public static final class LongComparator extends FieldComparator {
private final long[] values;
private long[] currentReaderValues;
private final String field;
private LongParser parser;
private long bottom;
LongComparator(int numHits, String field, FieldCache.Parser parser) {
values = new long[numHits];
this.field = field;
this.parser = (LongParser) parser;
}
@Override
public int compare(int slot1, int slot2) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
final long v1 = values[slot1];
final long v2 = values[slot2];
if (v1 > v2) {
return 1;
} else if (v1 < v2) {
return -1;
} else {
return 0;
}
}
@Override
public int compareBottom(int doc) {
// TODO: there are sneaky non-branch ways to compute
// -1/+1/0 sign
final long v2 = currentReaderValues[doc];
if (bottom > v2) {
return 1;
} else if (bottom < v2) {
return -1;
} else {
return 0;
}
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getLongs(reader, field, parser);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return Long.valueOf(values[slot]);
}
}
/** Sorts by descending relevance. NOTE: if you are
* sorting only by descending relevance and then
* secondarily by ascending docID, performance is faster
* using {@link TopScoreDocCollector} directly (which {@link
* IndexSearcher#search} uses when no {@link Sort} is
* specified). */
public static final class RelevanceComparator extends FieldComparator {
private final float[] scores;
private float bottom;
private Scorer scorer;
RelevanceComparator(int numHits) {
scores = new float[numHits];
}
@Override
public int compare(int slot1, int slot2) {
final float score1 = scores[slot1];
final float score2 = scores[slot2];
return score1 > score2 ? -1 : (score1 < score2 ? 1 : 0);
}
@Override
public int compareBottom(int doc) throws IOException {
float score = scorer.score();
return bottom > score ? -1 : (bottom < score ? 1 : 0);
}
@Override
public void copy(int slot, int doc) throws IOException {
scores[slot] = scorer.score();
}
@Override
public void setNextReader(IndexReader reader, int docBase) {
}
@Override
public void setBottom(final int bottom) {
this.bottom = scores[bottom];
}
@Override
public void setScorer(Scorer scorer) {
// wrap with a ScoreCachingWrappingScorer so that successive calls to
// score() will not incur score computation over and over again.
this.scorer = new ScoreCachingWrappingScorer(scorer);
}
@Override
public Comparable value(int slot) {
return Float.valueOf(scores[slot]);
}
}
/** Parses field's values as short (using {@link
* FieldCache#getShorts} and sorts by ascending value */
public static final class ShortComparator extends FieldComparator {
private final short[] values;
private short[] currentReaderValues;
private final String field;
private ShortParser parser;
private short bottom;
ShortComparator(int numHits, String field, FieldCache.Parser parser) {
values = new short[numHits];
this.field = field;
this.parser = (ShortParser) parser;
}
@Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2];
}
@Override
public int compareBottom(int doc) {
return bottom - currentReaderValues[doc];
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getShorts(reader, field, parser);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return Short.valueOf(values[slot]);
}
}
/** Sorts by a field's value using the Collator for a
* given Locale.*/
public static final class StringComparatorLocale extends FieldComparator {
private final String[] values;
private String[] currentReaderValues;
private final String field;
final Collator collator;
private String bottom;
StringComparatorLocale(int numHits, String field, Locale locale) {
values = new String[numHits];
this.field = field;
collator = Collator.getInstance(locale);
}
@Override
public int compare(int slot1, int slot2) {
final String val1 = values[slot1];
final String val2 = values[slot2];
if (val1 == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return collator.compare(val1, val2);
}
@Override
public int compareBottom(int doc) {
final String val2 = currentReaderValues[doc];
if (bottom == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return collator.compare(bottom, val2);
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getStrings(reader, field);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return values[slot];
}
}
/** Sorts by field's natural String sort order, using
* ordinals. This is functionally equivalent to {@link
* StringValComparator}, but it first resolves the string
* to their relative ordinal positions (using the index
* returned by {@link FieldCache#getStringIndex}), and
* does most comparisons using the ordinals. For medium
* to large results, this comparator will be much faster
* than {@link StringValComparator}. For very small
* result sets it may be slower. */
public static final class StringOrdValComparator extends FieldComparator {
private final int[] ords;
private final String[] values;
private final int[] readerGen;
private int currentReaderGen = -1;
private String[] lookup;
private int[] order;
private final String field;
private int bottomSlot = -1;
private int bottomOrd;
private String bottomValue;
private final boolean reversed;
private final int sortPos;
public StringOrdValComparator(int numHits, String field, int sortPos, boolean reversed) {
ords = new int[numHits];
values = new String[numHits];
readerGen = new int[numHits];
this.sortPos = sortPos;
this.reversed = reversed;
this.field = field;
}
@Override
public int compare(int slot1, int slot2) {
if (readerGen[slot1] == readerGen[slot2]) {
int cmp = ords[slot1] - ords[slot2];
if (cmp != 0) {
return cmp;
}
}
final String val1 = values[slot1];
final String val2 = values[slot2];
if (val1 == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return val1.compareTo(val2);
}
@Override
public int compareBottom(int doc) {
assert bottomSlot != -1;
int order = this.order[doc];
final int cmp = bottomOrd - order;
if (cmp != 0) {
return cmp;
}
final String val2 = lookup[order];
if (bottomValue == null) {
if (val2 == null) {
return 0;
}
// bottom wins
return -1;
} else if (val2 == null) {
// doc wins
return 1;
}
return bottomValue.compareTo(val2);
}
private void convert(int slot) {
readerGen[slot] = currentReaderGen;
int index = 0;
String value = values[slot];
if (value == null) {
ords[slot] = 0;
return;
}
if (sortPos == 0 && bottomSlot != -1 && bottomSlot != slot) {
// Since we are the primary sort, the entries in the
// queue are bounded by bottomOrd:
assert bottomOrd < lookup.length;
if (reversed) {
index = binarySearch(lookup, value, bottomOrd, lookup.length-1);
} else {
index = binarySearch(lookup, value, 0, bottomOrd);
}
} else {
// Full binary search
index = binarySearch(lookup, value);
}
if (index < 0) {
index = -index - 2;
}
ords[slot] = index;
}
@Override
public void copy(int slot, int doc) {
final int ord = order[doc];
ords[slot] = ord;
assert ord >= 0;
values[slot] = lookup[ord];
readerGen[slot] = currentReaderGen;
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
StringIndex currentReaderValues = FieldCache.DEFAULT.getStringIndex(reader, field);
currentReaderGen++;
order = currentReaderValues.order;
lookup = currentReaderValues.lookup;
assert lookup.length > 0;
if (bottomSlot != -1) {
convert(bottomSlot);
bottomOrd = ords[bottomSlot];
}
}
@Override
public void setBottom(final int bottom) {
bottomSlot = bottom;
if (readerGen[bottom] != currentReaderGen) {
convert(bottomSlot);
}
bottomOrd = ords[bottom];
assert bottomOrd >= 0;
assert bottomOrd < lookup.length;
bottomValue = values[bottom];
}
@Override
public Comparable value(int slot) {
return values[slot];
}
public String[] getValues() {
return values;
}
public int getBottomSlot() {
return bottomSlot;
}
public String getField() {
return field;
}
}
/** Sorts by field's natural String sort order. All
* comparisons are done using String.compareTo, which is
* slow for medium to large result sets but possibly
* very fast for very small results sets. */
public static final class StringValComparator extends FieldComparator {
private String[] values;
private String[] currentReaderValues;
private final String field;
private String bottom;
StringValComparator(int numHits, String field) {
values = new String[numHits];
this.field = field;
}
@Override
public int compare(int slot1, int slot2) {
final String val1 = values[slot1];
final String val2 = values[slot2];
if (val1 == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return val1.compareTo(val2);
}
@Override
public int compareBottom(int doc) {
final String val2 = currentReaderValues[doc];
if (bottom == null) {
if (val2 == null) {
return 0;
}
return -1;
} else if (val2 == null) {
return 1;
}
return bottom.compareTo(val2);
}
@Override
public void copy(int slot, int doc) {
values[slot] = currentReaderValues[doc];
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
currentReaderValues = FieldCache.DEFAULT.getStrings(reader, field);
}
@Override
public void setBottom(final int bottom) {
this.bottom = values[bottom];
}
@Override
public Comparable value(int slot) {
return values[slot];
}
}
final protected static int binarySearch(String[] a, String key) {
return binarySearch(a, key, 0, a.length-1);
}
final protected static int binarySearch(String[] a, String key, int low, int high) {
while (low <= high) {
int mid = (low + high) >>> 1;
String midVal = a[mid];
int cmp;
if (midVal != null) {
cmp = midVal.compareTo(key);
} else {
cmp = -1;
}
if (cmp < 0)
low = mid + 1;
else if (cmp > 0)
high = mid - 1;
else
return mid;
}
return -(low + 1);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FieldComparator.java | Java | art | 26,716 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* A DocIdSet contains a set of doc ids. Implementing classes must
* only implement {@link #iterator} to provide access to the set.
*/
public abstract class DocIdSet {
/** An empty {@code DocIdSet} instance for easy use, e.g. in Filters that hit no documents. */
public static final DocIdSet EMPTY_DOCIDSET = new DocIdSet() {
private final DocIdSetIterator iterator = new DocIdSetIterator() {
@Override
public int advance(int target) throws IOException { return NO_MORE_DOCS; }
@Override
public int docID() { return NO_MORE_DOCS; }
@Override
public int nextDoc() throws IOException { return NO_MORE_DOCS; }
};
@Override
public DocIdSetIterator iterator() {
return iterator;
}
@Override
public boolean isCacheable() {
return true;
}
};
/** Provides a {@link DocIdSetIterator} to access the set.
* This implementation can return <code>null</code> or
* <code>{@linkplain #EMPTY_DOCIDSET}.iterator()</code> if there
* are no docs that match. */
public abstract DocIdSetIterator iterator() throws IOException;
/**
* This method is a hint for {@link CachingWrapperFilter}, if this <code>DocIdSet</code>
* should be cached without copying it into a BitSet. The default is to return
* <code>false</code>. If you have an own <code>DocIdSet</code> implementation
* that does its iteration very effective and fast without doing disk I/O,
* override this method and return <code>true</here>.
*/
public boolean isCacheable() {
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/DocIdSet.java | Java | art | 2,456 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.PriorityQueue;
/**
* A base class for all collectors that return a {@link TopDocs} output. This
* collector allows easy extension by providing a single constructor which
* accepts a {@link PriorityQueue} as well as protected members for that
* priority queue and a counter of the number of total hits.<br>
* Extending classes can override {@link #topDocs(int, int)} and
* {@link #getTotalHits()} in order to provide their own implementation.
*/
public abstract class TopDocsCollector<T extends ScoreDoc> extends Collector {
// This is used in case topDocs() is called with illegal parameters, or there
// simply aren't (enough) results.
protected static final TopDocs EMPTY_TOPDOCS = new TopDocs(0, new ScoreDoc[0], Float.NaN);
/**
* The priority queue which holds the top documents. Note that different
* implementations of PriorityQueue give different meaning to 'top documents'.
* HitQueue for example aggregates the top scoring documents, while other PQ
* implementations may hold documents sorted by other criteria.
*/
protected PriorityQueue<T> pq;
/** The total number of documents that the collector encountered. */
protected int totalHits;
protected TopDocsCollector(PriorityQueue<T> pq) {
this.pq = pq;
}
/**
* Populates the results array with the ScoreDoc instaces. This can be
* overridden in case a different ScoreDoc type should be returned.
*/
protected void populateResults(ScoreDoc[] results, int howMany) {
for (int i = howMany - 1; i >= 0; i--) {
results[i] = pq.pop();
}
}
/**
* Returns a {@link TopDocs} instance containing the given results. If
* <code>results</code> is null it means there are no results to return,
* either because there were 0 calls to collect() or because the arguments to
* topDocs were invalid.
*/
protected TopDocs newTopDocs(ScoreDoc[] results, int start) {
return results == null ? EMPTY_TOPDOCS : new TopDocs(totalHits, results);
}
/** The total number of documents that matched this query. */
public int getTotalHits() {
return totalHits;
}
/** Returns the top docs that were collected by this collector. */
public final TopDocs topDocs() {
// In case pq was populated with sentinel values, there might be less
// results than pq.size(). Therefore return all results until either
// pq.size() or totalHits.
return topDocs(0, totalHits < pq.size() ? totalHits : pq.size());
}
/**
* Returns the documents in the rage [start .. pq.size()) that were collected
* by this collector. Note that if start >= pq.size(), an empty TopDocs is
* returned.<br>
* This method is convenient to call if the application always asks for the
* last results, starting from the last 'page'.<br>
* <b>NOTE:</b> you cannot call this method more than once for each search
* execution. If you need to call it more than once, passing each time a
* different <code>start</code>, you should call {@link #topDocs()} and work
* with the returned {@link TopDocs} object, which will contain all the
* results this search execution collected.
*/
public final TopDocs topDocs(int start) {
// In case pq was populated with sentinel values, there might be less
// results than pq.size(). Therefore return all results until either
// pq.size() or totalHits.
return topDocs(start, totalHits < pq.size() ? totalHits : pq.size());
}
/**
* Returns the documents in the rage [start .. start+howMany) that were
* collected by this collector. Note that if start >= pq.size(), an empty
* TopDocs is returned, and if pq.size() - start < howMany, then only the
* available documents in [start .. pq.size()) are returned.<br>
* This method is useful to call in case pagination of search results is
* allowed by the search application, as well as it attempts to optimize the
* memory used by allocating only as much as requested by howMany.<br>
* <b>NOTE:</b> you cannot call this method more than once for each search
* execution. If you need to call it more than once, passing each time a
* different range, you should call {@link #topDocs()} and work with the
* returned {@link TopDocs} object, which will contain all the results this
* search execution collected.
*/
public final TopDocs topDocs(int start, int howMany) {
// In case pq was populated with sentinel values, there might be less
// results than pq.size(). Therefore return all results until either
// pq.size() or totalHits.
int size = totalHits < pq.size() ? totalHits : pq.size();
// Don't bother to throw an exception, just return an empty TopDocs in case
// the parameters are invalid or out of range.
if (start < 0 || start >= size || howMany <= 0) {
return newTopDocs(null, start);
}
// We know that start < pqsize, so just fix howMany.
howMany = Math.min(size - start, howMany);
ScoreDoc[] results = new ScoreDoc[howMany];
// pq's pop() returns the 'least' element in the queue, therefore need
// to discard the first ones, until we reach the requested range.
// Note that this loop will usually not be executed, since the common usage
// should be that the caller asks for the last howMany results. However it's
// needed here for completeness.
for (int i = pq.size() - start - howMany; i > 0; i--) { pq.pop(); }
// Get the requested results from pq.
populateResults(results, howMany);
return newTopDocs(results, start);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/TopDocsCollector.java | Java | art | 6,430 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import java.io.IOException;
import java.util.Map;
import java.util.WeakHashMap;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.OpenBitSetDISI;
/**
* Wraps another filter's result and caches it. The purpose is to allow
* filters to simply filter, and then wrap with this class to add caching.
*/
public class CachingWrapperFilter extends Filter {
Filter filter;
/**
* Expert: Specifies how new deletions against a reopened
* reader should be handled.
*
* <p>The default is IGNORE, which means the cache entry
* will be re-used for a given segment, even when that
* segment has been reopened due to changes in deletions.
* This is a big performance gain, especially with
* near-real-timer readers, since you don't hit a cache
* miss on every reopened reader for prior segments.</p>
*
* <p>However, in some cases this can cause invalid query
* results, allowing deleted documents to be returned.
* This only happens if the main query does not rule out
* deleted documents on its own, such as a toplevel
* ConstantScoreQuery. To fix this, use RECACHE to
* re-create the cached filter (at a higher per-reopen
* cost, but at faster subsequent search performance), or
* use DYNAMIC to dynamically intersect deleted docs (fast
* reopen time but some hit to search performance).</p>
*/
public static enum DeletesMode {IGNORE, RECACHE, DYNAMIC};
protected final FilterCache<DocIdSet> cache;
static abstract class FilterCache<T> implements Serializable {
/**
* A transient Filter cache (package private because of test)
*/
// NOTE: not final so that we can dynamically re-init
// after de-serialize
transient Map<Object,T> cache;
private final DeletesMode deletesMode;
public FilterCache(DeletesMode deletesMode) {
this.deletesMode = deletesMode;
}
public synchronized T get(IndexReader reader, Object coreKey, Object delCoreKey) throws IOException {
T value;
if (cache == null) {
cache = new WeakHashMap<Object,T>();
}
if (deletesMode == DeletesMode.IGNORE) {
// key on core
value = cache.get(coreKey);
} else if (deletesMode == DeletesMode.RECACHE) {
// key on deletes, if any, else core
value = cache.get(delCoreKey);
} else {
assert deletesMode == DeletesMode.DYNAMIC;
// first try for exact match
value = cache.get(delCoreKey);
if (value == null) {
// now for core match, but dynamically AND NOT
// deletions
value = cache.get(coreKey);
if (value != null && reader.hasDeletions()) {
value = mergeDeletes(reader, value);
}
}
}
return value;
}
protected abstract T mergeDeletes(IndexReader reader, T value);
public synchronized void put(Object coreKey, Object delCoreKey, T value) {
if (deletesMode == DeletesMode.IGNORE) {
cache.put(coreKey, value);
} else if (deletesMode == DeletesMode.RECACHE) {
cache.put(delCoreKey, value);
} else {
cache.put(coreKey, value);
cache.put(delCoreKey, value);
}
}
}
/**
* New deletes are ignored by default, which gives higher
* cache hit rate on reopened readers. Most of the time
* this is safe, because the filter will be AND'd with a
* Query that fully enforces deletions. If instead you
* need this filter to always enforce deletions, pass
* either {@link DeletesMode#RECACHE} or {@link
* DeletesMode#DYNAMIC}.
* @param filter Filter to cache results of
*/
public CachingWrapperFilter(Filter filter) {
this(filter, DeletesMode.IGNORE);
}
/**
* Expert: by default, the cached filter will be shared
* across reopened segments that only had changes to their
* deletions.
*
* @param filter Filter to cache results of
* @param deletesMode See {@link DeletesMode}
*/
public CachingWrapperFilter(Filter filter, DeletesMode deletesMode) {
this.filter = filter;
cache = new FilterCache<DocIdSet>(deletesMode) {
@Override
public DocIdSet mergeDeletes(final IndexReader r, final DocIdSet docIdSet) {
return new FilteredDocIdSet(docIdSet) {
@Override
protected boolean match(int docID) {
return !r.isDeleted(docID);
}
};
}
};
}
/** Provide the DocIdSet to be cached, using the DocIdSet provided
* by the wrapped Filter.
* <p>This implementation returns the given {@link DocIdSet}, if {@link DocIdSet#isCacheable}
* returns <code>true</code>, else it copies the {@link DocIdSetIterator} into
* an {@link OpenBitSetDISI}.
*/
protected DocIdSet docIdSetToCache(DocIdSet docIdSet, IndexReader reader) throws IOException {
if (docIdSet == null) {
// this is better than returning null, as the nonnull result can be cached
return DocIdSet.EMPTY_DOCIDSET;
} else if (docIdSet.isCacheable()) {
return docIdSet;
} else {
final DocIdSetIterator it = docIdSet.iterator();
// null is allowed to be returned by iterator(),
// in this case we wrap with the empty set,
// which is cacheable.
return (it == null) ? DocIdSet.EMPTY_DOCIDSET : new OpenBitSetDISI(it, reader.maxDoc());
}
}
// for testing
int hitCount, missCount;
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final Object coreKey = reader.getFieldCacheKey();
final Object delCoreKey = reader.hasDeletions() ? reader.getDeletesCacheKey() : coreKey;
DocIdSet docIdSet = cache.get(reader, coreKey, delCoreKey);
if (docIdSet != null) {
hitCount++;
return docIdSet;
}
missCount++;
// cache miss
docIdSet = docIdSetToCache(filter.getDocIdSet(reader), reader);
if (docIdSet != null) {
cache.put(coreKey, delCoreKey, docIdSet);
}
return docIdSet;
}
@Override
public String toString() {
return "CachingWrapperFilter("+filter+")";
}
@Override
public boolean equals(Object o) {
if (!(o instanceof CachingWrapperFilter)) return false;
return this.filter.equals(((CachingWrapperFilter)o).filter);
}
@Override
public int hashCode() {
return filter.hashCode() ^ 0x1117BF25;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/CachingWrapperFilter.java | Java | art | 7,234 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* A {@link Scorer} which wraps another scorer and caches the score of the
* current document. Successive calls to {@link #score()} will return the same
* result and will not invoke the wrapped Scorer's score() method, unless the
* current document has changed.<br>
* This class might be useful due to the changes done to the {@link Collector}
* interface, in which the score is not computed for a document by default, only
* if the collector requests it. Some collectors may need to use the score in
* several places, however all they have in hand is a {@link Scorer} object, and
* might end up computing the score of a document more than once.
*/
public class ScoreCachingWrappingScorer extends Scorer {
private Scorer scorer;
private int curDoc = -1;
private float curScore;
/** Creates a new instance by wrapping the given scorer. */
public ScoreCachingWrappingScorer(Scorer scorer) {
super(scorer.getSimilarity());
this.scorer = scorer;
}
@Override
protected boolean score(Collector collector, int max, int firstDocID) throws IOException {
return scorer.score(collector, max, firstDocID);
}
@Override
public Similarity getSimilarity() {
return scorer.getSimilarity();
}
@Override
public float score() throws IOException {
int doc = scorer.docID();
if (doc != curDoc) {
curScore = scorer.score();
curDoc = doc;
}
return curScore;
}
@Override
public int docID() {
return scorer.docID();
}
@Override
public int nextDoc() throws IOException {
return scorer.nextDoc();
}
@Override
public void score(Collector collector) throws IOException {
scorer.score(collector);
}
@Override
public int advance(int target) throws IOException {
return scorer.advance(target);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/ScoreCachingWrappingScorer.java | Java | art | 2,685 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeSet;
import org.apache.lucene.util.ThreadInterruptedException;
/**
* Filter caching singleton. It can be used
* to save filters locally for reuse.
* This class makes it possible to cache Filters even when using RMI, as it
* keeps the cache on the searcher side of the RMI connection.
*
* Also could be used as a persistent storage for any filter as long as the
* filter provides a proper hashCode(), as that is used as the key in the cache.
*
* The cache is periodically cleaned up from a separate thread to ensure the
* cache doesn't exceed the maximum size.
*/
public class FilterManager {
protected static FilterManager manager;
/** The default maximum number of Filters in the cache */
protected static final int DEFAULT_CACHE_CLEAN_SIZE = 100;
/** The default frequency of cache cleanup */
protected static final long DEFAULT_CACHE_SLEEP_TIME = 1000 * 60 * 10;
/** The cache itself */
protected Map<Integer,FilterItem> cache;
/** Maximum allowed cache size */
protected int cacheCleanSize;
/** Cache cleaning frequency */
protected long cleanSleepTime;
/** Cache cleaner that runs in a separate thread */
protected FilterCleaner filterCleaner;
public synchronized static FilterManager getInstance() {
if (manager == null) {
manager = new FilterManager();
}
return manager;
}
/**
* Sets up the FilterManager singleton.
*/
protected FilterManager() {
cache = new HashMap<Integer,FilterItem>();
cacheCleanSize = DEFAULT_CACHE_CLEAN_SIZE; // Let the cache get to 100 items
cleanSleepTime = DEFAULT_CACHE_SLEEP_TIME; // 10 minutes between cleanings
filterCleaner = new FilterCleaner();
Thread fcThread = new Thread(filterCleaner);
// set to be a Daemon so it doesn't have to be stopped
fcThread.setDaemon(true);
fcThread.start();
}
/**
* Sets the max size that cache should reach before it is cleaned up
* @param cacheCleanSize maximum allowed cache size
*/
public void setCacheSize(int cacheCleanSize) {
this.cacheCleanSize = cacheCleanSize;
}
/**
* Sets the cache cleaning frequency in milliseconds.
* @param cleanSleepTime cleaning frequency in milliseconds
*/
public void setCleanThreadSleepTime(long cleanSleepTime) {
this.cleanSleepTime = cleanSleepTime;
}
/**
* Returns the cached version of the filter. Allows the caller to pass up
* a small filter but this will keep a persistent version around and allow
* the caching filter to do its job.
*
* @param filter The input filter
* @return The cached version of the filter
*/
public Filter getFilter(Filter filter) {
synchronized(cache) {
FilterItem fi = null;
fi = cache.get(Integer.valueOf(filter.hashCode()));
if (fi != null) {
fi.timestamp = new Date().getTime();
return fi.filter;
}
cache.put(Integer.valueOf(filter.hashCode()), new FilterItem(filter));
return filter;
}
}
/**
* Holds the filter and the last time the filter was used, to make LRU-based
* cache cleaning possible.
* TODO: Clean this up when we switch to Java 1.5
*/
protected class FilterItem {
public Filter filter;
public long timestamp;
public FilterItem (Filter filter) {
this.filter = filter;
this.timestamp = new Date().getTime();
}
}
/**
* Keeps the cache from getting too big.
* If we were using Java 1.5, we could use LinkedHashMap and we would not need this thread
* to clean out the cache.
*
* The SortedSet sortedFilterItems is used only to sort the items from the cache,
* so when it's time to clean up we have the TreeSet sort the FilterItems by
* timestamp.
*
* Removes 1.5 * the numbers of items to make the cache smaller.
* For example:
* If cache clean size is 10, and the cache is at 15, we would remove (15 - 10) * 1.5 = 7.5 round up to 8.
* This way we clean the cache a bit more, and avoid having the cache cleaner having to do it frequently.
*/
protected class FilterCleaner implements Runnable {
private boolean running = true;
private TreeSet<Map.Entry<Integer,FilterItem>> sortedFilterItems;
public FilterCleaner() {
sortedFilterItems = new TreeSet<Map.Entry<Integer,FilterItem>>(new Comparator<Map.Entry<Integer,FilterItem>>() {
public int compare(Map.Entry<Integer,FilterItem> a, Map.Entry<Integer,FilterItem> b) {
FilterItem fia = a.getValue();
FilterItem fib = b.getValue();
if ( fia.timestamp == fib.timestamp ) {
return 0;
}
// smaller timestamp first
if ( fia.timestamp < fib.timestamp ) {
return -1;
}
// larger timestamp last
return 1;
}
});
}
public void run () {
while (running) {
// sort items from oldest to newest
// we delete the oldest filters
if (cache.size() > cacheCleanSize) {
// empty the temporary set
sortedFilterItems.clear();
synchronized (cache) {
sortedFilterItems.addAll(cache.entrySet());
Iterator<Map.Entry<Integer,FilterItem>> it = sortedFilterItems.iterator();
int numToDelete = (int) ((cache.size() - cacheCleanSize) * 1.5);
int counter = 0;
// loop over the set and delete all of the cache entries not used in a while
while (it.hasNext() && counter++ < numToDelete) {
Map.Entry<Integer,FilterItem> entry = it.next();
cache.remove(entry.getKey());
}
}
// empty the set so we don't tie up the memory
sortedFilterItems.clear();
}
// take a nap
try {
Thread.sleep(cleanSleepTime);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FilterManager.java | Java | art | 7,016 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/**
* Subclass of FilteredTermEnum for enumerating all terms that match the
* specified wildcard filter term.
* <p>
* Term enumerations are always ordered by Term.compareTo(). Each term in
* the enumeration is greater than all that precede it.
*/
public class WildcardTermEnum extends FilteredTermEnum {
final Term searchTerm;
final String field;
final String text;
final String pre;
final int preLen;
boolean endEnum = false;
/**
* Creates a new <code>WildcardTermEnum</code>.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*/
public WildcardTermEnum(IndexReader reader, Term term) throws IOException {
super();
searchTerm = term;
field = searchTerm.field();
final String searchTermText = searchTerm.text();
final int sidx = searchTermText.indexOf(WILDCARD_STRING);
final int cidx = searchTermText.indexOf(WILDCARD_CHAR);
int idx = sidx;
if (idx == -1) {
idx = cidx;
}
else if (cidx >= 0) {
idx = Math.min(idx, cidx);
}
pre = idx != -1?searchTerm.text().substring(0,idx): "";
preLen = pre.length();
text = searchTermText.substring(preLen);
setEnum(reader.terms(new Term(searchTerm.field(), pre)));
}
@Override
protected final boolean termCompare(Term term) {
if (field == term.field()) {
String searchText = term.text();
if (searchText.startsWith(pre)) {
return wildcardEquals(text, 0, searchText, preLen);
}
}
endEnum = true;
return false;
}
@Override
public float difference() {
return 1.0f;
}
@Override
public final boolean endEnum() {
return endEnum;
}
/********************************************
* String equality with support for wildcards
********************************************/
public static final char WILDCARD_STRING = '*';
public static final char WILDCARD_CHAR = '?';
/**
* Determines if a word matches a wildcard pattern.
* <small>Work released by Granta Design Ltd after originally being done on
* company time.</small>
*/
public static final boolean wildcardEquals(String pattern, int patternIdx,
String string, int stringIdx)
{
int p = patternIdx;
for (int s = stringIdx; ; ++p, ++s)
{
// End of string yet?
boolean sEnd = (s >= string.length());
// End of pattern yet?
boolean pEnd = (p >= pattern.length());
// If we're looking at the end of the string...
if (sEnd)
{
// Assume the only thing left on the pattern is/are wildcards
boolean justWildcardsLeft = true;
// Current wildcard position
int wildcardSearchPos = p;
// While we haven't found the end of the pattern,
// and haven't encountered any non-wildcard characters
while (wildcardSearchPos < pattern.length() && justWildcardsLeft)
{
// Check the character at the current position
char wildchar = pattern.charAt(wildcardSearchPos);
// If it's not a wildcard character, then there is more
// pattern information after this/these wildcards.
if (wildchar != WILDCARD_CHAR && wildchar != WILDCARD_STRING)
{
justWildcardsLeft = false;
}
else
{
// to prevent "cat" matches "ca??"
if (wildchar == WILDCARD_CHAR) {
return false;
}
// Look at the next character
wildcardSearchPos++;
}
}
// This was a prefix wildcard search, and we've matched, so
// return true.
if (justWildcardsLeft)
{
return true;
}
}
// If we've gone past the end of the string, or the pattern,
// return false.
if (sEnd || pEnd)
{
break;
}
// Match a single character, so continue.
if (pattern.charAt(p) == WILDCARD_CHAR)
{
continue;
}
//
if (pattern.charAt(p) == WILDCARD_STRING)
{
// Look at the character beyond the '*'.
++p;
// Examine the string, starting at the last character.
for (int i = string.length(); i >= s; --i)
{
if (wildcardEquals(pattern, p, string, i))
{
return true;
}
}
break;
}
if (pattern.charAt(p) != string.charAt(s))
{
break;
}
}
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/WildcardTermEnum.java | Java | art | 5,654 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.TermPositions;
/** Expert: Scoring functionality for phrase queries.
* <br>A document is considered matching if it contains the phrase-query terms
* at "valid" positions. What "valid positions" are
* depends on the type of the phrase query: for an exact phrase query terms are required
* to appear in adjacent locations, while for a sloppy phrase query some distance between
* the terms is allowed. The abstract method {@link #phraseFreq()} of extending classes
* is invoked for each document containing all the phrase query terms, in order to
* compute the frequency of the phrase query in that document. A non zero frequency
* means a match.
*/
abstract class PhraseScorer extends Scorer {
private Weight weight;
protected byte[] norms;
protected float value;
private boolean firstTime = true;
private boolean more = true;
protected PhraseQueue pq;
protected PhrasePositions first, last;
private float freq; //phrase frequency in current doc as computed by phraseFreq().
PhraseScorer(Weight weight, TermPositions[] tps, int[] offsets,
Similarity similarity, byte[] norms) {
super(similarity);
this.norms = norms;
this.weight = weight;
this.value = weight.getValue();
// convert tps to a list of phrase positions.
// note: phrase-position differs from term-position in that its position
// reflects the phrase offset: pp.pos = tp.pos - offset.
// this allows to easily identify a matching (exact) phrase
// when all PhrasePositions have exactly the same position.
for (int i = 0; i < tps.length; i++) {
PhrasePositions pp = new PhrasePositions(tps[i], offsets[i]);
if (last != null) { // add next to end of list
last.next = pp;
} else {
first = pp;
}
last = pp;
}
pq = new PhraseQueue(tps.length); // construct empty pq
first.doc = -1;
}
@Override
public int docID() { return first.doc; }
@Override
public int nextDoc() throws IOException {
if (firstTime) {
init();
firstTime = false;
} else if (more) {
more = last.next(); // trigger further scanning
}
if (!doNext()) {
first.doc = NO_MORE_DOCS;
}
return first.doc;
}
// next without initial increment
private boolean doNext() throws IOException {
while (more) {
while (more && first.doc < last.doc) { // find doc w/ all the terms
more = first.skipTo(last.doc); // skip first upto last
firstToLast(); // and move it to the end
}
if (more) {
// found a doc with all of the terms
freq = phraseFreq(); // check for phrase
if (freq == 0.0f) // no match
more = last.next(); // trigger further scanning
else
return true; // found a match
}
}
return false; // no more matches
}
@Override
public float score() throws IOException {
//System.out.println("scoring " + first.doc);
float raw = getSimilarity().tf(freq) * value; // raw score
return norms == null ? raw : raw * Similarity.decodeNorm(norms[first.doc]); // normalize
}
@Override
public int advance(int target) throws IOException {
firstTime = false;
for (PhrasePositions pp = first; more && pp != null; pp = pp.next) {
more = pp.skipTo(target);
}
if (more) {
sort(); // re-sort
}
if (!doNext()) {
first.doc = NO_MORE_DOCS;
}
return first.doc;
}
/**
* phrase frequency in current doc as computed by phraseFreq().
*/
public final float currentFreq() { return freq; }
/**
* For a document containing all the phrase query terms, compute the
* frequency of the phrase in that document.
* A non zero frequency means a match.
* <br>Note, that containing all phrase terms does not guarantee a match - they have to be found in matching locations.
* @return frequency of the phrase in current doc, 0 if not found.
*/
protected abstract float phraseFreq() throws IOException;
private void init() throws IOException {
for (PhrasePositions pp = first; more && pp != null; pp = pp.next) {
more = pp.next();
}
if (more) {
sort();
}
}
private void sort() {
pq.clear();
for (PhrasePositions pp = first; pp != null; pp = pp.next) {
pq.add(pp);
}
pqToList();
}
protected final void pqToList() {
last = first = null;
while (pq.top() != null) {
PhrasePositions pp = pq.pop();
if (last != null) { // add next to end of list
last.next = pp;
} else
first = pp;
last = pp;
pp.next = null;
}
}
protected final void firstToLast() {
last.next = first; // move first to end of list
last = first;
first = first.next;
last.next = null;
}
@Override
public String toString() { return "scorer(" + weight + ")"; }
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/PhraseScorer.java | Java | art | 6,023 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.util.PriorityQueue;
/** Implements the fuzzy search query. The similarity measurement
* is based on the Levenshtein (edit distance) algorithm.
*
* Warning: this query is not very scalable with its default prefix
* length of 0 - in this case, *every* term will be enumerated and
* cause an edit score calculation.
*
*/
public class FuzzyQuery extends MultiTermQuery {
public final static float defaultMinSimilarity = 0.5f;
public final static int defaultPrefixLength = 0;
private float minimumSimilarity;
private int prefixLength;
private boolean termLongEnough = false;
protected Term term;
/**
* Create a new FuzzyQuery that will match terms with a similarity
* of at least <code>minimumSimilarity</code> to <code>term</code>.
* If a <code>prefixLength</code> > 0 is specified, a common prefix
* of that length is also required.
*
* @param term the term to search for
* @param minimumSimilarity a value between 0 and 1 to set the required similarity
* between the query term and the matching terms. For example, for a
* <code>minimumSimilarity</code> of <code>0.5</code> a term of the same length
* as the query term is considered similar to the query term if the edit distance
* between both terms is less than <code>length(term)*0.5</code>
* @param prefixLength length of common (non-fuzzy) prefix
* @throws IllegalArgumentException if minimumSimilarity is >= 1 or < 0
* or if prefixLength < 0
*/
public FuzzyQuery(Term term, float minimumSimilarity, int prefixLength) throws IllegalArgumentException {
this.term = term;
if (minimumSimilarity >= 1.0f)
throw new IllegalArgumentException("minimumSimilarity >= 1");
else if (minimumSimilarity < 0.0f)
throw new IllegalArgumentException("minimumSimilarity < 0");
if (prefixLength < 0)
throw new IllegalArgumentException("prefixLength < 0");
if (term.text().length() > 1.0f / (1.0f - minimumSimilarity)) {
this.termLongEnough = true;
}
this.minimumSimilarity = minimumSimilarity;
this.prefixLength = prefixLength;
rewriteMethod = SCORING_BOOLEAN_QUERY_REWRITE;
}
/**
* Calls {@link #FuzzyQuery(Term, float) FuzzyQuery(term, minimumSimilarity, 0)}.
*/
public FuzzyQuery(Term term, float minimumSimilarity) throws IllegalArgumentException {
this(term, minimumSimilarity, defaultPrefixLength);
}
/**
* Calls {@link #FuzzyQuery(Term, float) FuzzyQuery(term, 0.5f, 0)}.
*/
public FuzzyQuery(Term term) {
this(term, defaultMinSimilarity, defaultPrefixLength);
}
/**
* Returns the minimum similarity that is required for this query to match.
* @return float value between 0.0 and 1.0
*/
public float getMinSimilarity() {
return minimumSimilarity;
}
/**
* Returns the non-fuzzy prefix length. This is the number of characters at the start
* of a term that must be identical (not fuzzy) to the query term if the query
* is to match that term.
*/
public int getPrefixLength() {
return prefixLength;
}
@Override
protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
return new FuzzyTermEnum(reader, getTerm(), minimumSimilarity, prefixLength);
}
/**
* Returns the pattern term.
*/
public Term getTerm() {
return term;
}
@Override
public void setRewriteMethod(RewriteMethod method) {
throw new UnsupportedOperationException("FuzzyQuery cannot change rewrite method");
}
@Override
public Query rewrite(IndexReader reader) throws IOException {
if(!termLongEnough) { // can only match if it's exact
return new TermQuery(term);
}
int maxSize = BooleanQuery.getMaxClauseCount();
PriorityQueue<ScoreTerm> stQueue = new PriorityQueue<ScoreTerm>();
FilteredTermEnum enumerator = getEnum(reader);
try {
ScoreTerm st = new ScoreTerm();
do {
final Term t = enumerator.term();
if (t == null) break;
final float score = enumerator.difference();
// ignore uncompetetive hits
if (stQueue.size() >= maxSize && score <= stQueue.peek().score)
continue;
// add new entry in PQ
st.term = t;
st.score = score;
stQueue.offer(st);
// possibly drop entries from queue
st = (stQueue.size() > maxSize) ? stQueue.poll() : new ScoreTerm();
} while (enumerator.next());
} finally {
enumerator.close();
}
BooleanQuery query = new BooleanQuery(true);
for (final ScoreTerm st : stQueue) {
TermQuery tq = new TermQuery(st.term); // found a match
tq.setBoost(getBoost() * st.score); // set the boost
query.add(tq, BooleanClause.Occur.SHOULD); // add to query
}
return query;
}
private static final class ScoreTerm implements Comparable<ScoreTerm> {
public Term term;
public float score;
public int compareTo(ScoreTerm other) {
if (this.score == other.score)
return other.term.compareTo(this.term);
else
return Float.compare(this.score, other.score);
}
}
@Override
public String toString(String field) {
final StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");
}
buffer.append(term.text());
buffer.append('~');
buffer.append(Float.toString(minimumSimilarity));
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + Float.floatToIntBits(minimumSimilarity);
result = prime * result + prefixLength;
result = prime * result + ((term == null) ? 0 : term.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
FuzzyQuery other = (FuzzyQuery) obj;
if (Float.floatToIntBits(minimumSimilarity) != Float
.floatToIntBits(other.minimumSimilarity))
return false;
if (prefixLength != other.prefixLength)
return false;
if (term == null) {
if (other.term != null)
return false;
} else if (!term.equals(other.term))
return false;
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FuzzyQuery.java | Java | art | 7,492 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Expert: Describes the score computation for document and query, and
* can distinguish a match independent of a positive value. */
public class ComplexExplanation extends Explanation {
private Boolean match;
public ComplexExplanation() {
super();
}
public ComplexExplanation(boolean match, float value, String description) {
// NOTE: use of "boolean" instead of "Boolean" in params is conscious
// choice to encourage clients to be specific.
super(value, description);
this.match = Boolean.valueOf(match);
}
/**
* The match status of this explanation node.
* @return May be null if match status is unknown
*/
public Boolean getMatch() { return match; }
/**
* Sets the match status assigned to this explanation node.
* @param match May be null if match status is unknown
*/
public void setMatch(Boolean match) { this.match = match; }
/**
* Indicates whether or not this Explanation models a good match.
*
* <p>
* If the match status is explicitly set (i.e.: not null) this method
* uses it; otherwise it defers to the superclass.
* </p>
* @see #getMatch
*/
@Override
public boolean isMatch() {
Boolean m = getMatch();
return (null != m ? m.booleanValue() : super.isMatch());
}
@Override
protected String getSummary() {
if (null == getMatch())
return super.getSummary();
return getValue() + " = "
+ (isMatch() ? "(MATCH) " : "(NON-MATCH) ")
+ getDescription();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/ComplexExplanation.java | Java | art | 2,348 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Serializable;
import java.util.ArrayList;
/** Expert: Describes the score computation for document and query. */
public class Explanation implements java.io.Serializable {
private float value; // the value of this node
private String description; // what it represents
private ArrayList<Explanation> details; // sub-explanations
public Explanation() {}
public Explanation(float value, String description) {
this.value = value;
this.description = description;
}
/**
* Indicates whether or not this Explanation models a good match.
*
* <p>
* By default, an Explanation represents a "match" if the value is positive.
* </p>
* @see #getValue
*/
public boolean isMatch() {
return (0.0f < getValue());
}
/** The value assigned to this explanation node. */
public float getValue() { return value; }
/** Sets the value assigned to this explanation node. */
public void setValue(float value) { this.value = value; }
/** A description of this explanation node. */
public String getDescription() { return description; }
/** Sets the description of this explanation node. */
public void setDescription(String description) {
this.description = description;
}
/**
* A short one line summary which should contain all high level
* information about this Explanation, without the "Details"
*/
protected String getSummary() {
return getValue() + " = " + getDescription();
}
/** The sub-nodes of this explanation node. */
public Explanation[] getDetails() {
if (details == null)
return null;
return details.toArray(new Explanation[0]);
}
/** Adds a sub-node to this explanation node. */
public void addDetail(Explanation detail) {
if (details == null)
details = new ArrayList<Explanation>();
details.add(detail);
}
/** Render an explanation as text. */
@Override
public String toString() {
return toString(0);
}
protected String toString(int depth) {
StringBuilder buffer = new StringBuilder();
for (int i = 0; i < depth; i++) {
buffer.append(" ");
}
buffer.append(getSummary());
buffer.append("\n");
Explanation[] details = getDetails();
if (details != null) {
for (int i = 0 ; i < details.length; i++) {
buffer.append(details[i].toString(depth+1));
}
}
return buffer.toString();
}
/** Render an explanation as HTML. */
public String toHtml() {
StringBuilder buffer = new StringBuilder();
buffer.append("<ul>\n");
buffer.append("<li>");
buffer.append(getSummary());
buffer.append("<br />\n");
Explanation[] details = getDetails();
if (details != null) {
for (int i = 0 ; i < details.length; i++) {
buffer.append(details[i].toHtml());
}
}
buffer.append("</li>\n");
buffer.append("</ul>\n");
return buffer.toString();
}
/**
* Small Util class used to pass both an idf factor as well as an
* explanation for that factor.
*
* This class will likely be held on a {@link Weight}, so be aware
* before storing any large or un-serializable fields.
*
*/
public static abstract class IDFExplanation implements Serializable {
/**
* @return the idf factor
*/
public abstract float getIdf();
/**
* This should be calculated lazily if possible.
*
* @return the explanation for the idf factor.
*/
public abstract String explain();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/Explanation.java | Java | art | 4,408 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
/** Subclass of FilteredTermEnum for enumerating all terms that are similar
* to the specified filter term.
*
* <p>Term enumerations are always ordered by Term.compareTo(). Each term in
* the enumeration is greater than all that precede it.
*/
public final class FuzzyTermEnum extends FilteredTermEnum {
/* Allows us save time required to create a new array
* every time similarity is called.
*/
private int[] p;
private int[] d;
private float similarity;
private boolean endEnum = false;
private Term searchTerm = null;
private final String field;
private final String text;
private final String prefix;
private final float minimumSimilarity;
private final float scale_factor;
/**
* Creates a FuzzyTermEnum with an empty prefix and a minSimilarity of 0.5f.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*
* @param reader
* @param term
* @throws IOException
* @see #FuzzyTermEnum(IndexReader, Term, float, int)
*/
public FuzzyTermEnum(IndexReader reader, Term term) throws IOException {
this(reader, term, FuzzyQuery.defaultMinSimilarity, FuzzyQuery.defaultPrefixLength);
}
/**
* Creates a FuzzyTermEnum with an empty prefix.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*
* @param reader
* @param term
* @param minSimilarity
* @throws IOException
* @see #FuzzyTermEnum(IndexReader, Term, float, int)
*/
public FuzzyTermEnum(IndexReader reader, Term term, float minSimilarity) throws IOException {
this(reader, term, minSimilarity, FuzzyQuery.defaultPrefixLength);
}
/**
* Constructor for enumeration of all terms from specified <code>reader</code> which share a prefix of
* length <code>prefixLength</code> with <code>term</code> and which have a fuzzy similarity >
* <code>minSimilarity</code>.
* <p>
* After calling the constructor the enumeration is already pointing to the first
* valid term if such a term exists.
*
* @param reader Delivers terms.
* @param term Pattern term.
* @param minSimilarity Minimum required similarity for terms from the reader. Default value is 0.5f.
* @param prefixLength Length of required common prefix. Default value is 0.
* @throws IOException
*/
public FuzzyTermEnum(IndexReader reader, Term term, final float minSimilarity, final int prefixLength) throws IOException {
super();
if (minSimilarity >= 1.0f)
throw new IllegalArgumentException("minimumSimilarity cannot be greater than or equal to 1");
else if (minSimilarity < 0.0f)
throw new IllegalArgumentException("minimumSimilarity cannot be less than 0");
if(prefixLength < 0)
throw new IllegalArgumentException("prefixLength cannot be less than 0");
this.minimumSimilarity = minSimilarity;
this.scale_factor = 1.0f / (1.0f - minimumSimilarity);
this.searchTerm = term;
this.field = searchTerm.field();
//The prefix could be longer than the word.
//It's kind of silly though. It means we must match the entire word.
final int fullSearchTermLength = searchTerm.text().length();
final int realPrefixLength = prefixLength > fullSearchTermLength ? fullSearchTermLength : prefixLength;
this.text = searchTerm.text().substring(realPrefixLength);
this.prefix = searchTerm.text().substring(0, realPrefixLength);
this.p = new int[this.text.length()+1];
this.d = new int[this.text.length()+1];
setEnum(reader.terms(new Term(searchTerm.field(), prefix)));
}
/**
* The termCompare method in FuzzyTermEnum uses Levenshtein distance to
* calculate the distance between the given term and the comparing term.
*/
@Override
protected final boolean termCompare(Term term) {
if (field == term.field() && term.text().startsWith(prefix)) {
final String target = term.text().substring(prefix.length());
this.similarity = similarity(target);
return (similarity > minimumSimilarity);
}
endEnum = true;
return false;
}
/** {@inheritDoc} */
@Override
public final float difference() {
return (similarity - minimumSimilarity) * scale_factor;
}
/** {@inheritDoc} */
@Override
public final boolean endEnum() {
return endEnum;
}
/******************************
* Compute Levenshtein distance
******************************/
/**
* <p>Similarity returns a number that is 1.0f or less (including negative numbers)
* based on how similar the Term is compared to a target term. It returns
* exactly 0.0f when
* <pre>
* editDistance > maximumEditDistance</pre>
* Otherwise it returns:
* <pre>
* 1 - (editDistance / length)</pre>
* where length is the length of the shortest term (text or target) including a
* prefix that are identical and editDistance is the Levenshtein distance for
* the two words.</p>
*
* <p>Embedded within this algorithm is a fail-fast Levenshtein distance
* algorithm. The fail-fast algorithm differs from the standard Levenshtein
* distance algorithm in that it is aborted if it is discovered that the
* minimum distance between the words is greater than some threshold.
*
* <p>To calculate the maximum distance threshold we use the following formula:
* <pre>
* (1 - minimumSimilarity) * length</pre>
* where length is the shortest term including any prefix that is not part of the
* similarity comparison. This formula was derived by solving for what maximum value
* of distance returns false for the following statements:
* <pre>
* similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen)));
* return (similarity > minimumSimilarity);</pre>
* where distance is the Levenshtein distance for the two words.
* </p>
* <p>Levenshtein distance (also known as edit distance) is a measure of similarity
* between two strings where the distance is measured as the number of character
* deletions, insertions or substitutions required to transform one string to
* the other string.
* @param target the target word or phrase
* @return the similarity, 0.0 or less indicates that it matches less than the required
* threshold and 1.0 indicates that the text and target are identical
*/
private float similarity(final String target) {
final int m = target.length();
final int n = text.length();
if (n == 0) {
//we don't have anything to compare. That means if we just add
//the letters for m we get the new word
return prefix.length() == 0 ? 0.0f : 1.0f - ((float) m / prefix.length());
}
if (m == 0) {
return prefix.length() == 0 ? 0.0f : 1.0f - ((float) n / prefix.length());
}
final int maxDistance = calculateMaxDistance(m);
if (maxDistance < Math.abs(m-n)) {
//just adding the characters of m to n or vice-versa results in
//too many edits
//for example "pre" length is 3 and "prefixes" length is 8. We can see that
//given this optimal circumstance, the edit distance cannot be less than 5.
//which is 8-3 or more precisely Math.abs(3-8).
//if our maximum edit distance is 4, then we can discard this word
//without looking at it.
return 0.0f;
}
// init matrix d
for (int i = 0; i<=n; ++i) {
p[i] = i;
}
// start computing edit distance
for (int j = 1; j<=m; ++j) { // iterates through target
int bestPossibleEditDistance = m;
final char t_j = target.charAt(j-1); // jth character of t
d[0] = j;
for (int i=1; i<=n; ++i) { // iterates through text
// minimum of cell to the left+1, to the top+1, diagonally left and up +(0|1)
if (t_j != text.charAt(i-1)) {
d[i] = Math.min(Math.min(d[i-1], p[i]), p[i-1]) + 1;
} else {
d[i] = Math.min(Math.min(d[i-1]+1, p[i]+1), p[i-1]);
}
bestPossibleEditDistance = Math.min(bestPossibleEditDistance, d[i]);
}
//After calculating row i, the best possible edit distance
//can be found by found by finding the smallest value in a given column.
//If the bestPossibleEditDistance is greater than the max distance, abort.
if (j > maxDistance && bestPossibleEditDistance > maxDistance) { //equal is okay, but not greater
//the closest the target can be to the text is just too far away.
//this target is leaving the party early.
return 0.0f;
}
// copy current distance counts to 'previous row' distance counts: swap p and d
int _d[] = p;
p = d;
d = _d;
}
// our last action in the above loop was to switch d and p, so p now
// actually has the most recent cost counts
// this will return less than 0.0 when the edit distance is
// greater than the number of characters in the shorter word.
// but this was the formula that was previously used in FuzzyTermEnum,
// so it has not been changed (even though minimumSimilarity must be
// greater than 0.0)
return 1.0f - ((float)p[n] / (float) (prefix.length() + Math.min(n, m)));
}
/**
* The max Distance is the maximum Levenshtein distance for the text
* compared to some other value that results in score that is
* better than the minimum similarity.
* @param m the length of the "other value"
* @return the maximum levenshtein distance that we care about
*/
private int calculateMaxDistance(int m) {
return (int) ((1-minimumSimilarity) * (Math.min(text.length(), m) + prefix.length()));
}
/** {@inheritDoc} */
@Override
public void close() throws IOException {
p = d = null;
searchTerm = null;
super.close(); //call super.close() and let the garbage collector do its work.
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/FuzzyTermEnum.java | Java | art | 10,887 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Set;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Explanation.IDFExplanation;
import org.apache.lucene.util.ToStringUtils;
/** A Query that matches documents containing a term.
This may be combined with other terms with a {@link BooleanQuery}.
*/
public class TermQuery extends Query {
private Term term;
private class TermWeight extends Weight {
private Similarity similarity;
private float value;
private float idf;
private float queryNorm;
private float queryWeight;
private IDFExplanation idfExp;
public TermWeight(Searcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
idfExp = similarity.idfExplain(term, searcher);
idf = idfExp.getIdf();
}
@Override
public String toString() { return "weight(" + TermQuery.this + ")"; }
@Override
public Query getQuery() { return TermQuery.this; }
@Override
public float getValue() { return value; }
@Override
public float sumOfSquaredWeights() {
queryWeight = idf * getBoost(); // compute query weight
return queryWeight * queryWeight; // square it
}
@Override
public void normalize(float queryNorm) {
this.queryNorm = queryNorm;
queryWeight *= queryNorm; // normalize query weight
value = queryWeight * idf; // idf for document
}
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
TermDocs termDocs = reader.termDocs(term);
if (termDocs == null)
return null;
return new TermScorer(this, termDocs, similarity, reader.norms(term.field()));
}
@Override
public Explanation explain(IndexReader reader, int doc)
throws IOException {
ComplexExplanation result = new ComplexExplanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
Explanation expl = new Explanation(idf, idfExp.explain());
// explain query weight
Explanation queryExpl = new Explanation();
queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:");
Explanation boostExpl = new Explanation(getBoost(), "boost");
if (getBoost() != 1.0f)
queryExpl.addDetail(boostExpl);
queryExpl.addDetail(expl);
Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm");
queryExpl.addDetail(queryNormExpl);
queryExpl.setValue(boostExpl.getValue() *
expl.getValue() *
queryNormExpl.getValue());
result.addDetail(queryExpl);
// explain field weight
String field = term.field();
ComplexExplanation fieldExpl = new ComplexExplanation();
fieldExpl.setDescription("fieldWeight("+term+" in "+doc+
"), product of:");
Explanation tfExplanation = new Explanation();
int tf = 0;
TermDocs termDocs = reader.termDocs(term);
if (termDocs != null) {
try {
if (termDocs.skipTo(doc) && termDocs.doc() == doc) {
tf = termDocs.freq();
}
} finally {
termDocs.close();
}
tfExplanation.setValue(similarity.tf(tf));
tfExplanation.setDescription("tf(termFreq("+term+")="+tf+")");
} else {
tfExplanation.setValue(0.0f);
tfExplanation.setDescription("no matching term");
}
fieldExpl.addDetail(tfExplanation);
fieldExpl.addDetail(expl);
Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field);
float fieldNorm =
fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 1.0f;
fieldNormExpl.setValue(fieldNorm);
fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
fieldExpl.addDetail(fieldNormExpl);
fieldExpl.setMatch(Boolean.valueOf(tfExplanation.isMatch()));
fieldExpl.setValue(tfExplanation.getValue() *
expl.getValue() *
fieldNormExpl.getValue());
result.addDetail(fieldExpl);
result.setMatch(fieldExpl.getMatch());
// combine them
result.setValue(queryExpl.getValue() * fieldExpl.getValue());
if (queryExpl.getValue() == 1.0f)
return fieldExpl;
return result;
}
}
/** Constructs a query for the term <code>t</code>. */
public TermQuery(Term t) {
term = t;
}
/** Returns the term of this query. */
public Term getTerm() { return term; }
@Override
public Weight createWeight(Searcher searcher) throws IOException {
return new TermWeight(searcher);
}
@Override
public void extractTerms(Set<Term> terms) {
terms.add(getTerm());
}
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (!term.field().equals(field)) {
buffer.append(term.field());
buffer.append(":");
}
buffer.append(term.text());
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
/** Returns true iff <code>o</code> is equal to this. */
@Override
public boolean equals(Object o) {
if (!(o instanceof TermQuery))
return false;
TermQuery other = (TermQuery)o;
return (this.getBoost() == other.getBoost())
&& this.term.equals(other.term);
}
/** Returns a hash code value for this object.*/
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost()) ^ term.hashCode();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/TermQuery.java | Java | art | 6,637 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.LinkedList;
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
import org.apache.lucene.document.NumericField; // for javadocs
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
/**
* <p>A {@link Query} that matches numeric values within a
* specified range. To use this, you must first index the
* numeric values using {@link NumericField} (expert: {@link
* NumericTokenStream}). If your terms are instead textual,
* you should use {@link TermRangeQuery}. {@link
* NumericRangeFilter} is the filter equivalent of this
* query.</p>
*
* <p>You create a new NumericRangeQuery with the static
* factory methods, eg:
*
* <pre>
* Query q = NumericRangeQuery.newFloatRange("weight", 0.3f, 0.10f, true, true);
* </pre>
*
* matches all documents whose float valued "weight" field
* ranges from 0.3 to 0.10, inclusive.
*
* <p>The performance of NumericRangeQuery is much better
* than the corresponding {@link TermRangeQuery} because the
* number of terms that must be searched is usually far
* fewer, thanks to trie indexing, described below.</p>
*
* <p>You can optionally specify a <a
* href="#precisionStepDesc"><code>precisionStep</code></a>
* when creating this query. This is necessary if you've
* changed this configuration from its default (4) during
* indexing. Lower values consume more disk space but speed
* up searching. Suitable values are between <b>1</b> and
* <b>8</b>. A good starting point to test is <b>4</b>,
* which is the default value for all <code>Numeric*</code>
* classes. See <a href="#precisionStepDesc">below</a> for
* details.
*
* <p>This query defaults to {@linkplain
* MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT} for
* 32 bit (int/float) ranges with precisionStep ≤8 and 64
* bit (long/double) ranges with precisionStep ≤6.
* Otherwise it uses {@linkplain
* MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE} as the
* number of terms is likely to be high. With precision
* steps of ≤4, this query can be run with one of the
* BooleanQuery rewrite methods without changing
* BooleanQuery's default max clause count.
*
* <p><font color="red"><b>NOTE:</b> This API is experimental and
* might change in incompatible ways in the next release.</font>
*
* <br><h3>How it works</h3>
*
* <p>See the publication about <a target="_blank" href="http://www.panfmp.org">panFMP</a>,
* where this algorithm was described (referred to as <code>TrieRangeQuery</code>):
*
* <blockquote><strong>Schindler, U, Diepenbroek, M</strong>, 2008.
* <em>Generic XML-based Framework for Metadata Portals.</em>
* Computers & Geosciences 34 (12), 1947-1955.
* <a href="http://dx.doi.org/10.1016/j.cageo.2008.02.023"
* target="_blank">doi:10.1016/j.cageo.2008.02.023</a></blockquote>
*
* <p><em>A quote from this paper:</em> Because Apache Lucene is a full-text
* search engine and not a conventional database, it cannot handle numerical ranges
* (e.g., field value is inside user defined bounds, even dates are numerical values).
* We have developed an extension to Apache Lucene that stores
* the numerical values in a special string-encoded format with variable precision
* (all numerical values like doubles, longs, floats, and ints are converted to
* lexicographic sortable string representations and stored with different precisions
* (for a more detailed description of how the values are stored,
* see {@link NumericUtils}). A range is then divided recursively into multiple intervals for searching:
* The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
* while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
*
* <p>For the variant that stores long values in 8 different precisions (each reduced by 8 bits) that
* uses a lowest precision of 1 byte, the index contains only a maximum of 256 distinct values in the
* lowest precision. Overall, a range could consist of a theoretical maximum of
* <code>7*255*2 + 255 = 3825</code> distinct terms (when there is a term for every distinct value of an
* 8-byte-number in the index and the range covers almost all of them; a maximum of 255 distinct values is used
* because it would always be possible to reduce the full 256 values to one term with degraded precision).
* In practice, we have seen up to 300 terms in most cases (index with 500,000 metadata records
* and a uniform value distribution).</p>
*
* <a name="precisionStepDesc"><h3>Precision Step</h3>
* <p>You can choose any <code>precisionStep</code> when encoding values.
* Lower step values mean more precisions and so more terms in index (and index gets larger).
* On the other hand, the maximum number of terms to match reduces, which optimized query speed.
* The formula to calculate the maximum term count is:
* <pre>
* n = [ (bitsPerValue/precisionStep - 1) * (2^precisionStep - 1 ) * 2 ] + (2^precisionStep - 1 )
* </pre>
* <p><em>(this formula is only correct, when <code>bitsPerValue/precisionStep</code> is an integer;
* in other cases, the value must be rounded up and the last summand must contain the modulo of the division as
* precision step)</em>.
* For longs stored using a precision step of 4, <code>n = 15*15*2 + 15 = 465</code>, and for a precision
* step of 2, <code>n = 31*3*2 + 3 = 189</code>. But the faster search speed is reduced by more seeking
* in the term enum of the index. Because of this, the ideal <code>precisionStep</code> value can only
* be found out by testing. <b>Important:</b> You can index with a lower precision step value and test search speed
* using a multiple of the original step value.</p>
*
* <p>Good values for <code>precisionStep</code> are depending on usage and data type:
* <ul>
* <li>The default for all data types is <b>4</b>, which is used, when no <code>precisionStep</code> is given.
* <li>Ideal value in most cases for <em>64 bit</em> data types <em>(long, double)</em> is <b>6</b> or <b>8</b>.
* <li>Ideal value in most cases for <em>32 bit</em> data types <em>(int, float)</em> is <b>4</b>.
* <li>For low cardinality fields larger precision steps are good. If the cardinality is < 100, it is
* fair to use {@link Integer#MAX_VALUE} (see below).
* <li>Steps <b>≥64</b> for <em>long/double</em> and <b>≥32</b> for <em>int/float</em> produces one token
* per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
* to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
* <code>precisionStep</code>). Using {@link NumericField NumericFields} for sorting
* is ideal, because building the field cache is much faster than with text-only numbers.
* These fields have one term per value and therefore also work with term enumeration for building distinct lists
* (e.g. facets / preselected values to search for).
* Sorting is also possible with range query optimized fields using one of the above <code>precisionSteps</code>.
* </ul>
*
* <p>Comparisons of the different types of RangeQueries on an index with about 500,000 docs showed
* that {@link TermRangeQuery} in boolean rewrite mode (with raised {@link BooleanQuery} clause count)
* took about 30-40 secs to complete, {@link TermRangeQuery} in constant score filter rewrite mode took 5 secs
* and executing this class took <100ms to complete (on an Opteron64 machine, Java 1.5, 8 bit
* precision step). This query type was developed for a geographic portal, where the performance for
* e.g. bounding boxes or exact date/time stamps is important.</p>
*
* @since 2.9
**/
public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
private NumericRangeQuery(final String field, final int precisionStep, final int valSize,
T min, T max, final boolean minInclusive, final boolean maxInclusive
) {
assert (valSize == 32 || valSize == 64);
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
this.field = StringHelper.intern(field);
this.precisionStep = precisionStep;
this.valSize = valSize;
this.min = min;
this.max = max;
this.minInclusive = minInclusive;
this.maxInclusive = maxInclusive;
// For bigger precisionSteps this query likely
// hits too many terms, so set to CONSTANT_SCORE_FILTER right off
// (especially as the FilteredTermEnum is costly if wasted only for AUTO tests because it
// creates new enums from IndexReader for each sub-range)
switch (valSize) {
case 64:
setRewriteMethod( (precisionStep > 6) ?
CONSTANT_SCORE_FILTER_REWRITE :
CONSTANT_SCORE_AUTO_REWRITE_DEFAULT
);
break;
case 32:
setRewriteMethod( (precisionStep > 8) ?
CONSTANT_SCORE_FILTER_REWRITE :
CONSTANT_SCORE_AUTO_REWRITE_DEFAULT
);
break;
default:
// should never happen
throw new IllegalArgumentException("valSize must be 32 or 64");
}
// shortcut if upper bound == lower bound
if (min != null && min.equals(max)) {
setRewriteMethod(CONSTANT_SCORE_BOOLEAN_QUERY_REWRITE);
}
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
Long min, Long max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Long>(field, precisionStep, 64, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Long> newLongRange(final String field,
Long min, Long max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Long>(field, NumericUtils.PRECISION_STEP_DEFAULT, 64, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Integer>(field, precisionStep, 32, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Integer> newIntRange(final String field,
Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Integer>(field, NumericUtils.PRECISION_STEP_DEFAULT, 32, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
Double min, Double max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Double>(field, precisionStep, 64, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Double> newDoubleRange(final String field,
Double min, Double max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Double>(field, NumericUtils.PRECISION_STEP_DEFAULT, 64, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
Float min, Float max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Float>(field, precisionStep, 32, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (4).
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Float> newFloatRange(final String field,
Float min, Float max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<Float>(field, NumericUtils.PRECISION_STEP_DEFAULT, 32, min, max, minInclusive, maxInclusive);
}
@Override
protected FilteredTermEnum getEnum(final IndexReader reader) throws IOException {
return new NumericRangeTermEnum(reader);
}
/** Returns the field name for this query */
public String getField() { return field; }
/** Returns <code>true</code> if the lower endpoint is inclusive */
public boolean includesMin() { return minInclusive; }
/** Returns <code>true</code> if the upper endpoint is inclusive */
public boolean includesMax() { return maxInclusive; }
/** Returns the lower value of this range query */
public T getMin() { return min; }
/** Returns the upper value of this range query */
public T getMax() { return max; }
@Override
public String toString(final String field) {
final StringBuilder sb = new StringBuilder();
if (!this.field.equals(field)) sb.append(this.field).append(':');
return sb.append(minInclusive ? '[' : '{')
.append((min == null) ? "*" : min.toString())
.append(" TO ")
.append((max == null) ? "*" : max.toString())
.append(maxInclusive ? ']' : '}')
.append(ToStringUtils.boost(getBoost()))
.toString();
}
@Override
public final boolean equals(final Object o) {
if (o==this) return true;
if (!super.equals(o))
return false;
if (o instanceof NumericRangeQuery) {
final NumericRangeQuery q=(NumericRangeQuery)o;
return (
field==q.field &&
(q.min == null ? min == null : q.min.equals(min)) &&
(q.max == null ? max == null : q.max.equals(max)) &&
minInclusive == q.minInclusive &&
maxInclusive == q.maxInclusive &&
precisionStep == q.precisionStep
);
}
return false;
}
@Override
public final int hashCode() {
int hash = super.hashCode();
hash += field.hashCode()^0x4565fd66 + precisionStep^0x64365465;
if (min != null) hash += min.hashCode()^0x14fa55fb;
if (max != null) hash += max.hashCode()^0x733fa5fe;
return hash +
(Boolean.valueOf(minInclusive).hashCode()^0x14fa55fb)+
(Boolean.valueOf(maxInclusive).hashCode()^0x733fa5fe);
}
// field must be interned after reading from stream
private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
in.defaultReadObject();
field = StringHelper.intern(field);
}
// members (package private, to be also fast accessible by NumericRangeTermEnum)
String field;
final int precisionStep, valSize;
final T min, max;
final boolean minInclusive,maxInclusive;
/**
* Subclass of FilteredTermEnum for enumerating all terms that match the
* sub-ranges for trie range queries.
* <p>
* WARNING: This term enumeration is not guaranteed to be always ordered by
* {@link Term#compareTo}.
* The ordering depends on how {@link NumericUtils#splitLongRange} and
* {@link NumericUtils#splitIntRange} generates the sub-ranges. For
* {@link MultiTermQuery} ordering is not relevant.
*/
private final class NumericRangeTermEnum extends FilteredTermEnum {
private final IndexReader reader;
private final LinkedList<String> rangeBounds = new LinkedList<String>();
private final Term termTemplate = new Term(field);
private String currentUpperBound = null;
NumericRangeTermEnum(final IndexReader reader) throws IOException {
this.reader = reader;
switch (valSize) {
case 64: {
// lower
long minBound = Long.MIN_VALUE;
if (min instanceof Long) {
minBound = min.longValue();
} else if (min instanceof Double) {
minBound = NumericUtils.doubleToSortableLong(min.doubleValue());
}
if (!minInclusive && min != null) {
if (minBound == Long.MAX_VALUE) break;
minBound++;
}
// upper
long maxBound = Long.MAX_VALUE;
if (max instanceof Long) {
maxBound = max.longValue();
} else if (max instanceof Double) {
maxBound = NumericUtils.doubleToSortableLong(max.doubleValue());
}
if (!maxInclusive && max != null) {
if (maxBound == Long.MIN_VALUE) break;
maxBound--;
}
NumericUtils.splitLongRange(new NumericUtils.LongRangeBuilder() {
@Override
public final void addRange(String minPrefixCoded, String maxPrefixCoded) {
rangeBounds.add(minPrefixCoded);
rangeBounds.add(maxPrefixCoded);
}
}, precisionStep, minBound, maxBound);
break;
}
case 32: {
// lower
int minBound = Integer.MIN_VALUE;
if (min instanceof Integer) {
minBound = min.intValue();
} else if (min instanceof Float) {
minBound = NumericUtils.floatToSortableInt(min.floatValue());
}
if (!minInclusive && min != null) {
if (minBound == Integer.MAX_VALUE) break;
minBound++;
}
// upper
int maxBound = Integer.MAX_VALUE;
if (max instanceof Integer) {
maxBound = max.intValue();
} else if (max instanceof Float) {
maxBound = NumericUtils.floatToSortableInt(max.floatValue());
}
if (!maxInclusive && max != null) {
if (maxBound == Integer.MIN_VALUE) break;
maxBound--;
}
NumericUtils.splitIntRange(new NumericUtils.IntRangeBuilder() {
@Override
public final void addRange(String minPrefixCoded, String maxPrefixCoded) {
rangeBounds.add(minPrefixCoded);
rangeBounds.add(maxPrefixCoded);
}
}, precisionStep, minBound, maxBound);
break;
}
default:
// should never happen
throw new IllegalArgumentException("valSize must be 32 or 64");
}
// seek to first term
next();
}
@Override
public float difference() {
return 1.0f;
}
/** this is a dummy, it is not used by this class. */
@Override
protected boolean endEnum() {
throw new UnsupportedOperationException("not implemented");
}
/** this is a dummy, it is not used by this class. */
@Override
protected void setEnum(TermEnum tenum) {
throw new UnsupportedOperationException("not implemented");
}
/**
* Compares if current upper bound is reached,
* this also updates the term count for statistics.
* In contrast to {@link FilteredTermEnum}, a return value
* of <code>false</code> ends iterating the current enum
* and forwards to the next sub-range.
*/
@Override
protected boolean termCompare(Term term) {
return (term.field() == field && term.text().compareTo(currentUpperBound) <= 0);
}
/** Increments the enumeration to the next element. True if one exists. */
@Override
public boolean next() throws IOException {
// if a current term exists, the actual enum is initialized:
// try change to next term, if no such term exists, fall-through
if (currentTerm != null) {
assert actualEnum != null;
if (actualEnum.next()) {
currentTerm = actualEnum.term();
if (termCompare(currentTerm))
return true;
}
}
// if all above fails, we go forward to the next enum,
// if one is available
currentTerm = null;
while (rangeBounds.size() >= 2) {
assert rangeBounds.size() % 2 == 0;
// close the current enum and read next bounds
if (actualEnum != null) {
actualEnum.close();
actualEnum = null;
}
final String lowerBound = rangeBounds.removeFirst();
this.currentUpperBound = rangeBounds.removeFirst();
// create a new enum
actualEnum = reader.terms(termTemplate.createTerm(lowerBound));
currentTerm = actualEnum.term();
if (currentTerm != null && termCompare(currentTerm))
return true;
// clear the current term for next iteration
currentTerm = null;
}
// no more sub-range enums available
assert rangeBounds.size() == 0 && currentTerm == null;
return false;
}
/** Closes the enumeration to further activity, freeing resources. */
@Override
public void close() throws IOException {
rangeBounds.clear();
currentUpperBound = null;
super.close();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/NumericRangeQuery.java | Java | art | 25,210 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.util.ToStringUtils;
import java.util.Set;
import java.io.IOException;
/**
* A query that matches all documents.
*
*/
public class MatchAllDocsQuery extends Query {
public MatchAllDocsQuery() {
this(null);
}
private final String normsField;
/**
* @param normsField Field used for normalization factor (document boost). Null if nothing.
*/
public MatchAllDocsQuery(String normsField) {
this.normsField = normsField;
}
private class MatchAllScorer extends Scorer {
final TermDocs termDocs;
final float score;
final byte[] norms;
private int doc = -1;
MatchAllScorer(IndexReader reader, Similarity similarity, Weight w,
byte[] norms) throws IOException {
super(similarity);
this.termDocs = reader.termDocs(null);
score = w.getValue();
this.norms = norms;
}
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return doc = termDocs.next() ? termDocs.doc() : NO_MORE_DOCS;
}
@Override
public float score() {
return norms == null ? score : score * Similarity.decodeNorm(norms[docID()]);
}
@Override
public int advance(int target) throws IOException {
return doc = termDocs.skipTo(target) ? termDocs.doc() : NO_MORE_DOCS;
}
}
private class MatchAllDocsWeight extends Weight {
private Similarity similarity;
private float queryWeight;
private float queryNorm;
public MatchAllDocsWeight(Searcher searcher) {
this.similarity = searcher.getSimilarity();
}
@Override
public String toString() {
return "weight(" + MatchAllDocsQuery.this + ")";
}
@Override
public Query getQuery() {
return MatchAllDocsQuery.this;
}
@Override
public float getValue() {
return queryWeight;
}
@Override
public float sumOfSquaredWeights() {
queryWeight = getBoost();
return queryWeight * queryWeight;
}
@Override
public void normalize(float queryNorm) {
this.queryNorm = queryNorm;
queryWeight *= this.queryNorm;
}
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
return new MatchAllScorer(reader, similarity, this,
normsField != null ? reader.norms(normsField) : null);
}
@Override
public Explanation explain(IndexReader reader, int doc) {
// explain query weight
Explanation queryExpl = new ComplexExplanation
(true, getValue(), "MatchAllDocsQuery, product of:");
if (getBoost() != 1.0f) {
queryExpl.addDetail(new Explanation(getBoost(),"boost"));
}
queryExpl.addDetail(new Explanation(queryNorm,"queryNorm"));
return queryExpl;
}
}
@Override
public Weight createWeight(Searcher searcher) {
return new MatchAllDocsWeight(searcher);
}
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
buffer.append("*:*");
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof MatchAllDocsQuery))
return false;
MatchAllDocsQuery other = (MatchAllDocsQuery) o;
return this.getBoost() == other.getBoost();
}
@Override
public int hashCode() {
return Float.floatToIntBits(getBoost()) ^ 0x1AA71190;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/MatchAllDocsQuery.java | Java | art | 4,522 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
/**
* Constrains search results to only match those which also match a provided
* query.
*
* <p> This could be used, for example, with a {@link TermRangeQuery} on a suitably
* formatted date field to implement date filtering. One could re-use a single
* QueryFilter that matches, e.g., only documents modified within the last
* week. The QueryFilter and TermRangeQuery would only need to be reconstructed
* once per day.
*
* @version $Id:$
*/
public class QueryWrapperFilter extends Filter {
private Query query;
/** Constructs a filter which only matches documents matching
* <code>query</code>.
*/
public QueryWrapperFilter(Query query) {
this.query = query;
}
@Override
public DocIdSet getDocIdSet(final IndexReader reader) throws IOException {
final Weight weight = query.weight(new IndexSearcher(reader));
return new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return weight.scorer(reader, true, false);
}
@Override
public boolean isCacheable() { return false; }
};
}
@Override
public String toString() {
return "QueryWrapperFilter(" + query + ")";
}
@Override
public boolean equals(Object o) {
if (!(o instanceof QueryWrapperFilter))
return false;
return this.query.equals(((QueryWrapperFilter)o).query);
}
@Override
public int hashCode() {
return query.hashCode() ^ 0x923F64B9;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/QueryWrapperFilter.java | Java | art | 2,373 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** A Scorer for queries with a required part and an optional part.
* Delays skipTo() on the optional part until a score() is needed.
* <br>
* This <code>Scorer</code> implements {@link Scorer#skipTo(int)}.
*/
class ReqOptSumScorer extends Scorer {
/** The scorers passed from the constructor.
* These are set to null as soon as their next() or skipTo() returns false.
*/
private Scorer reqScorer;
private Scorer optScorer;
/** Construct a <code>ReqOptScorer</code>.
* @param reqScorer The required scorer. This must match.
* @param optScorer The optional scorer. This is used for scoring only.
*/
public ReqOptSumScorer(
Scorer reqScorer,
Scorer optScorer)
{
super(null); // No similarity used.
this.reqScorer = reqScorer;
this.optScorer = optScorer;
}
@Override
public int nextDoc() throws IOException {
return reqScorer.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return reqScorer.advance(target);
}
@Override
public int docID() {
return reqScorer.docID();
}
/** Returns the score of the current document matching the query.
* Initially invalid, until {@link #next()} is called the first time.
* @return The score of the required scorer, eventually increased by the score
* of the optional scorer when it also matches the current document.
*/
@Override
public float score() throws IOException {
int curDoc = reqScorer.docID();
float reqScore = reqScorer.score();
if (optScorer == null) {
return reqScore;
}
int optScorerDoc = optScorer.docID();
if (optScorerDoc < curDoc && (optScorerDoc = optScorer.advance(curDoc)) == NO_MORE_DOCS) {
optScorer = null;
return reqScore;
}
return optScorerDoc == curDoc ? reqScore + optScorer.score() : reqScore;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/ReqOptSumScorer.java | Java | art | 2,731 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.text.Collator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.ToStringUtils;
/**
* A Query that matches documents within an range of terms.
*
* <p>This query matches the documents looking for terms that fall into the
* supplied range according to {@link
* String#compareTo(String)}, unless a <code>Collator</code> is provided. It is not intended
* for numerical ranges; use {@link NumericRangeQuery} instead.
*
* <p>This query uses the {@link
* MultiTermQuery#CONSTANT_SCORE_AUTO_REWRITE_DEFAULT}
* rewrite method.
* @since 2.9
*/
public class TermRangeQuery extends MultiTermQuery {
private String lowerTerm;
private String upperTerm;
private Collator collator;
private String field;
private boolean includeLower;
private boolean includeUpper;
/**
* Constructs a query selecting all terms greater/equal than <code>lowerTerm</code>
* but less/equal than <code>upperTerm</code>.
*
* <p>
* If an endpoint is null, it is said
* to be "open". Either or both endpoints may be open. Open endpoints may not
* be exclusive (you can't select all but the first or last term without
* explicitly specifying the term to exclude.)
*
* @param field The field that holds both lower and upper terms.
* @param lowerTerm
* The term text at the lower end of the range
* @param upperTerm
* The term text at the upper end of the range
* @param includeLower
* If true, the <code>lowerTerm</code> is
* included in the range.
* @param includeUpper
* If true, the <code>upperTerm</code> is
* included in the range.
*/
public TermRangeQuery(String field, String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper) {
this(field, lowerTerm, upperTerm, includeLower, includeUpper, null);
}
/** Constructs a query selecting all terms greater/equal than
* <code>lowerTerm</code> but less/equal than <code>upperTerm</code>.
* <p>
* If an endpoint is null, it is said
* to be "open". Either or both endpoints may be open. Open endpoints may not
* be exclusive (you can't select all but the first or last term without
* explicitly specifying the term to exclude.)
* <p>
* If <code>collator</code> is not null, it will be used to decide whether
* index terms are within the given range, rather than using the Unicode code
* point order in which index terms are stored.
* <p>
* <strong>WARNING:</strong> Using this constructor and supplying a non-null
* value in the <code>collator</code> parameter will cause every single
* index Term in the Field referenced by lowerTerm and/or upperTerm to be
* examined. Depending on the number of index Terms in this Field, the
* operation could be very slow.
*
* @param lowerTerm The Term text at the lower end of the range
* @param upperTerm The Term text at the upper end of the range
* @param includeLower
* If true, the <code>lowerTerm</code> is
* included in the range.
* @param includeUpper
* If true, the <code>upperTerm</code> is
* included in the range.
* @param collator The collator to use to collate index Terms, to determine
* their membership in the range bounded by <code>lowerTerm</code> and
* <code>upperTerm</code>.
*/
public TermRangeQuery(String field, String lowerTerm, String upperTerm, boolean includeLower, boolean includeUpper,
Collator collator) {
this.field = field;
this.lowerTerm = lowerTerm;
this.upperTerm = upperTerm;
this.includeLower = includeLower;
this.includeUpper = includeUpper;
this.collator = collator;
}
/** Returns the field name for this query */
public String getField() { return field; }
/** Returns the lower value of this range query */
public String getLowerTerm() { return lowerTerm; }
/** Returns the upper value of this range query */
public String getUpperTerm() { return upperTerm; }
/** Returns <code>true</code> if the lower endpoint is inclusive */
public boolean includesLower() { return includeLower; }
/** Returns <code>true</code> if the upper endpoint is inclusive */
public boolean includesUpper() { return includeUpper; }
/** Returns the collator used to determine range inclusion, if any. */
public Collator getCollator() { return collator; }
@Override
protected FilteredTermEnum getEnum(IndexReader reader) throws IOException {
return new TermRangeTermEnum(reader, field, lowerTerm,
upperTerm, includeLower, includeUpper, collator);
}
/** Prints a user-readable version of this query. */
@Override
public String toString(String field) {
StringBuilder buffer = new StringBuilder();
if (!getField().equals(field)) {
buffer.append(getField());
buffer.append(":");
}
buffer.append(includeLower ? '[' : '{');
buffer.append(lowerTerm != null ? lowerTerm : "*");
buffer.append(" TO ");
buffer.append(upperTerm != null ? upperTerm : "*");
buffer.append(includeUpper ? ']' : '}');
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
@Override
public int hashCode() {
final int prime = 31;
int result = super.hashCode();
result = prime * result + ((collator == null) ? 0 : collator.hashCode());
result = prime * result + ((field == null) ? 0 : field.hashCode());
result = prime * result + (includeLower ? 1231 : 1237);
result = prime * result + (includeUpper ? 1231 : 1237);
result = prime * result + ((lowerTerm == null) ? 0 : lowerTerm.hashCode());
result = prime * result + ((upperTerm == null) ? 0 : upperTerm.hashCode());
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!super.equals(obj))
return false;
if (getClass() != obj.getClass())
return false;
TermRangeQuery other = (TermRangeQuery) obj;
if (collator == null) {
if (other.collator != null)
return false;
} else if (!collator.equals(other.collator))
return false;
if (field == null) {
if (other.field != null)
return false;
} else if (!field.equals(other.field))
return false;
if (includeLower != other.includeLower)
return false;
if (includeUpper != other.includeUpper)
return false;
if (lowerTerm == null) {
if (other.lowerTerm != null)
return false;
} else if (!lowerTerm.equals(other.lowerTerm))
return false;
if (upperTerm == null) {
if (other.upperTerm != null)
return false;
} else if (!upperTerm.equals(other.upperTerm))
return false;
return true;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/TermRangeQuery.java | Java | art | 7,695 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.util.OpenBitSet;
/**
* A wrapper for {@link MultiTermQuery}, that exposes its
* functionality as a {@link Filter}.
* <P>
* <code>MultiTermQueryWrapperFilter</code> is not designed to
* be used by itself. Normally you subclass it to provide a Filter
* counterpart for a {@link MultiTermQuery} subclass.
* <P>
* For example, {@link TermRangeFilter} and {@link PrefixFilter} extend
* <code>MultiTermQueryWrapperFilter</code>.
* This class also provides the functionality behind
* {@link MultiTermQuery#CONSTANT_SCORE_FILTER_REWRITE};
* this is why it is not abstract.
*/
public class MultiTermQueryWrapperFilter<Q extends MultiTermQuery> extends Filter {
protected final Q query;
/**
* Wrap a {@link MultiTermQuery} as a Filter.
*/
protected MultiTermQueryWrapperFilter(Q query) {
this.query = query;
}
@Override
public String toString() {
// query.toString should be ok for the filter, too, if the query boost is 1.0f
return query.toString();
}
@Override
public final boolean equals(final Object o) {
if (o==this) return true;
if (o==null) return false;
if (this.getClass().equals(o.getClass())) {
return this.query.equals( ((MultiTermQueryWrapperFilter)o).query );
}
return false;
}
@Override
public final int hashCode() {
return query.hashCode();
}
/**
* Expert: Return the number of unique terms visited during execution of the filter.
* If there are many of them, you may consider using another filter type
* or optimize your total term count in index.
* <p>This method is not thread safe, be sure to only call it when no filter is running!
* If you re-use the same filter instance for another
* search, be sure to first reset the term counter
* with {@link #clearTotalNumberOfTerms}.
* @see #clearTotalNumberOfTerms
*/
public int getTotalNumberOfTerms() {
return query.getTotalNumberOfTerms();
}
/**
* Expert: Resets the counting of unique terms.
* Do this before executing the filter.
* @see #getTotalNumberOfTerms
*/
public void clearTotalNumberOfTerms() {
query.clearTotalNumberOfTerms();
}
/**
* Returns a DocIdSet with documents that should be
* permitted in search results.
*/
@Override
public DocIdSet getDocIdSet(IndexReader reader) throws IOException {
final TermEnum enumerator = query.getEnum(reader);
try {
// if current term in enum is null, the enum is empty -> shortcut
if (enumerator.term() == null)
return DocIdSet.EMPTY_DOCIDSET;
// else fill into a OpenBitSet
final OpenBitSet bitSet = new OpenBitSet(reader.maxDoc());
final int[] docs = new int[32];
final int[] freqs = new int[32];
TermDocs termDocs = reader.termDocs();
try {
int termCount = 0;
do {
Term term = enumerator.term();
if (term == null)
break;
termCount++;
termDocs.seek(term);
while (true) {
final int count = termDocs.read(docs, freqs);
if (count != 0) {
for(int i=0;i<count;i++) {
bitSet.set(docs[i]);
}
} else {
break;
}
}
} while (enumerator.next());
query.incTotalNumberOfTerms(termCount);
} finally {
termDocs.close();
}
return bitSet;
} finally {
enumerator.close();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/MultiTermQueryWrapperFilter.java | Java | art | 4,527 |
package org.apache.lucene.search;
import org.apache.lucene.index.FieldInvertState;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Expert: Delegating scoring implementation. Useful in {@link
* Query#getSimilarity(Searcher)} implementations, to override only certain
* methods of a Searcher's Similarity implementation.. */
public class SimilarityDelegator extends Similarity {
private Similarity delegee;
/** Construct a {@link Similarity} that delegates all methods to another.
*
* @param delegee the Similarity implementation to delegate to
*/
public SimilarityDelegator(Similarity delegee) {
this.delegee = delegee;
}
@Override
public float computeNorm(String fieldName, FieldInvertState state) {
return delegee.computeNorm(fieldName, state);
}
@Override
public float lengthNorm(String fieldName, int numTerms) {
return delegee.lengthNorm(fieldName, numTerms);
}
@Override
public float queryNorm(float sumOfSquaredWeights) {
return delegee.queryNorm(sumOfSquaredWeights);
}
@Override
public float tf(float freq) {
return delegee.tf(freq);
}
@Override
public float sloppyFreq(int distance) {
return delegee.sloppyFreq(distance);
}
@Override
public float idf(int docFreq, int numDocs) {
return delegee.idf(docFreq, numDocs);
}
@Override
public float coord(int overlap, int maxOverlap) {
return delegee.coord(overlap, maxOverlap);
}
@Override
public float scorePayload(int docId, String fieldName, int start, int end, byte [] payload, int offset, int length) {
return delegee.scorePayload(docId, fieldName, start, end, payload, offset, length);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/SimilarityDelegator.java | Java | art | 2,441 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.TermPositions;
import java.io.IOException;
import java.util.HashMap;
final class SloppyPhraseScorer extends PhraseScorer {
private int slop;
private PhrasePositions repeats[];
private PhrasePositions tmpPos[]; // for flipping repeating pps.
private boolean checkedRepeats;
SloppyPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity,
int slop, byte[] norms) {
super(weight, tps, offsets, similarity, norms);
this.slop = slop;
}
/**
* Score a candidate doc for all slop-valid position-combinations (matches)
* encountered while traversing/hopping the PhrasePositions.
* <br> The score contribution of a match depends on the distance:
* <br> - highest score for distance=0 (exact match).
* <br> - score gets lower as distance gets higher.
* <br>Example: for query "a b"~2, a document "x a b a y" can be scored twice:
* once for "a b" (distance=0), and once for "b a" (distance=2).
* <br>Possibly not all valid combinations are encountered, because for efficiency
* we always propagate the least PhrasePosition. This allows to base on
* PriorityQueue and move forward faster.
* As result, for example, document "a b c b a"
* would score differently for queries "a b c"~4 and "c b a"~4, although
* they really are equivalent.
* Similarly, for doc "a b c b a f g", query "c b"~2
* would get same score as "g f"~2, although "c b"~2 could be matched twice.
* We may want to fix this in the future (currently not, for performance reasons).
*/
@Override
protected final float phraseFreq() throws IOException {
int end = initPhrasePositions();
float freq = 0.0f;
boolean done = (end<0);
while (!done) {
PhrasePositions pp = pq.pop();
int start = pp.position;
int next = pq.top().position;
boolean tpsDiffer = true;
for (int pos = start; pos <= next || !tpsDiffer; pos = pp.position) {
if (pos<=next && tpsDiffer)
start = pos; // advance pp to min window
if (!pp.nextPosition()) {
done = true; // ran out of a term -- done
break;
}
PhrasePositions pp2 = null;
tpsDiffer = !pp.repeats || (pp2 = termPositionsDiffer(pp))==null;
if (pp2!=null && pp2!=pp) {
pp = flip(pp,pp2); // flip pp to pp2
}
}
int matchLength = end - start;
if (matchLength <= slop)
freq += getSimilarity().sloppyFreq(matchLength); // score match
if (pp.position > end)
end = pp.position;
pq.add(pp); // restore pq
}
return freq;
}
// flip pp2 and pp in the queue: pop until finding pp2, insert back all but pp2, insert pp back.
// assumes: pp!=pp2, pp2 in pq, pp not in pq.
// called only when there are repeating pps.
private PhrasePositions flip(PhrasePositions pp, PhrasePositions pp2) {
int n=0;
PhrasePositions pp3;
//pop until finding pp2
while ((pp3=pq.pop()) != pp2) {
tmpPos[n++] = pp3;
}
//insert back all but pp2
for (n--; n>=0; n--) {
pq.insertWithOverflow(tmpPos[n]);
}
//insert pp back
pq.add(pp);
return pp2;
}
/**
* Init PhrasePositions in place.
* There is a one time initialization for this scorer:
* <br>- Put in repeats[] each pp that has another pp with same position in the doc.
* <br>- Also mark each such pp by pp.repeats = true.
* <br>Later can consult with repeats[] in termPositionsDiffer(pp), making that check efficient.
* In particular, this allows to score queries with no repetitions with no overhead due to this computation.
* <br>- Example 1 - query with no repetitions: "ho my"~2
* <br>- Example 2 - query with repetitions: "ho my my"~2
* <br>- Example 3 - query with repetitions: "my ho my"~2
* <br>Init per doc w/repeats in query, includes propagating some repeating pp's to avoid false phrase detection.
* @return end (max position), or -1 if any term ran out (i.e. done)
* @throws IOException
*/
private int initPhrasePositions() throws IOException {
int end = 0;
// no repeats at all (most common case is also the simplest one)
if (checkedRepeats && repeats==null) {
// build queue from list
pq.clear();
for (PhrasePositions pp = first; pp != null; pp = pp.next) {
pp.firstPosition();
if (pp.position > end)
end = pp.position;
pq.add(pp); // build pq from list
}
return end;
}
// position the pp's
for (PhrasePositions pp = first; pp != null; pp = pp.next)
pp.firstPosition();
// one time initializatin for this scorer
if (!checkedRepeats) {
checkedRepeats = true;
// check for repeats
HashMap<PhrasePositions, Object> m = null;
for (PhrasePositions pp = first; pp != null; pp = pp.next) {
int tpPos = pp.position + pp.offset;
for (PhrasePositions pp2 = pp.next; pp2 != null; pp2 = pp2.next) {
int tpPos2 = pp2.position + pp2.offset;
if (tpPos2 == tpPos) {
if (m == null)
m = new HashMap<PhrasePositions, Object>();
pp.repeats = true;
pp2.repeats = true;
m.put(pp,null);
m.put(pp2,null);
}
}
}
if (m!=null)
repeats = m.keySet().toArray(new PhrasePositions[0]);
}
// with repeats must advance some repeating pp's so they all start with differing tp's
if (repeats!=null) {
for (int i = 0; i < repeats.length; i++) {
PhrasePositions pp = repeats[i];
PhrasePositions pp2;
while ((pp2 = termPositionsDiffer(pp)) != null) {
if (!pp2.nextPosition()) // out of pps that do not differ, advance the pp with higher offset
return -1; // ran out of a term -- done
}
}
}
// build queue from list
pq.clear();
for (PhrasePositions pp = first; pp != null; pp = pp.next) {
if (pp.position > end)
end = pp.position;
pq.add(pp); // build pq from list
}
if (repeats!=null) {
tmpPos = new PhrasePositions[pq.size()];
}
return end;
}
/**
* We disallow two pp's to have the same TermPosition, thereby verifying multiple occurrences
* in the query of the same word would go elsewhere in the matched doc.
* @return null if differ (i.e. valid) otherwise return the higher offset PhrasePositions
* out of the first two PPs found to not differ.
*/
private PhrasePositions termPositionsDiffer(PhrasePositions pp) {
// efficiency note: a more efficient implementation could keep a map between repeating
// pp's, so that if pp1a, pp1b, pp1c are repeats term1, and pp2a, pp2b are repeats
// of term2, pp2a would only be checked against pp2b but not against pp1a, pp1b, pp1c.
// However this would complicate code, for a rather rare case, so choice is to compromise here.
int tpPos = pp.position + pp.offset;
for (int i = 0; i < repeats.length; i++) {
PhrasePositions pp2 = repeats[i];
if (pp2 == pp)
continue;
int tpPos2 = pp2.position + pp2.offset;
if (tpPos2 == tpPos)
return pp.offset > pp2.offset ? pp : pp2; // do not differ: return the one with higher offset.
}
return null;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/SloppyPhraseScorer.java | Java | art | 9,146 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util.DummyConcurrentLock;
import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.locks.Lock;
/** Implements search over a set of <code>Searchables</code>.
*
* <p>Applications usually need only call the inherited {@link #search(Query,int)}
* or {@link #search(Query,Filter,int)} methods.
*/
public class MultiSearcher extends Searcher {
/**
* Document Frequency cache acting as a Dummy-Searcher. This class is no
* full-fledged Searcher, but only supports the methods necessary to
* initialize Weights.
*/
private static class CachedDfSource extends Searcher {
private final Map<Term,Integer> dfMap; // Map from Terms to corresponding doc freqs
private final int maxDoc; // document count
public CachedDfSource(Map<Term,Integer> dfMap, int maxDoc, Similarity similarity) {
this.dfMap = dfMap;
this.maxDoc = maxDoc;
setSimilarity(similarity);
}
@Override
public int docFreq(Term term) {
int df;
try {
df = dfMap.get(term).intValue();
} catch (NullPointerException e) {
throw new IllegalArgumentException("df for term " + term.text()
+ " not available");
}
return df;
}
@Override
public int[] docFreqs(Term[] terms) {
final int[] result = new int[terms.length];
for (int i = 0; i < terms.length; i++) {
result[i] = docFreq(terms[i]);
}
return result;
}
@Override
public int maxDoc() {
return maxDoc;
}
@Override
public Query rewrite(Query query) {
// this is a bit of a hack. We know that a query which
// creates a Weight based on this Dummy-Searcher is
// always already rewritten (see preparedWeight()).
// Therefore we just return the unmodified query here
return query;
}
@Override
public void close() {
throw new UnsupportedOperationException();
}
@Override
public Document doc(int i) {
throw new UnsupportedOperationException();
}
@Override
public Document doc(int i, FieldSelector fieldSelector) {
throw new UnsupportedOperationException();
}
@Override
public Explanation explain(Weight weight,int doc) {
throw new UnsupportedOperationException();
}
@Override
public void search(Weight weight, Filter filter, Collector results) {
throw new UnsupportedOperationException();
}
@Override
public TopDocs search(Weight weight,Filter filter,int n) {
throw new UnsupportedOperationException();
}
@Override
public TopFieldDocs search(Weight weight,Filter filter,int n,Sort sort) {
throw new UnsupportedOperationException();
}
}
private Searchable[] searchables;
private int[] starts;
private int maxDoc = 0;
/** Creates a searcher which searches <i>searchers</i>. */
public MultiSearcher(Searchable... searchables) throws IOException {
this.searchables = searchables;
starts = new int[searchables.length + 1]; // build starts array
for (int i = 0; i < searchables.length; i++) {
starts[i] = maxDoc;
maxDoc += searchables[i].maxDoc(); // compute maxDocs
}
starts[searchables.length] = maxDoc;
}
/** Return the array of {@link Searchable}s this searches. */
public Searchable[] getSearchables() {
return searchables;
}
protected int[] getStarts() {
return starts;
}
// inherit javadoc
@Override
public void close() throws IOException {
for (int i = 0; i < searchables.length; i++)
searchables[i].close();
}
@Override
public int docFreq(Term term) throws IOException {
int docFreq = 0;
for (int i = 0; i < searchables.length; i++)
docFreq += searchables[i].docFreq(term);
return docFreq;
}
// inherit javadoc
@Override
public Document doc(int n) throws CorruptIndexException, IOException {
int i = subSearcher(n); // find searcher index
return searchables[i].doc(n - starts[i]); // dispatch to searcher
}
// inherit javadoc
@Override
public Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
int i = subSearcher(n); // find searcher index
return searchables[i].doc(n - starts[i], fieldSelector); // dispatch to searcher
}
/** Returns index of the searcher for document <code>n</code> in the array
* used to construct this searcher. */
public int subSearcher(int n) { // find searcher for doc n:
return ReaderUtil.subIndex(n, starts);
}
/** Returns the document number of document <code>n</code> within its
* sub-index. */
public int subDoc(int n) {
return n - starts[subSearcher(n)];
}
@Override
public int maxDoc() throws IOException {
return maxDoc;
}
@Override
public TopDocs search(Weight weight, Filter filter, int nDocs)
throws IOException {
final HitQueue hq = new HitQueue(nDocs, false);
int totalHits = 0;
for (int i = 0; i < searchables.length; i++) { // search each searcher
final TopDocs docs = new MultiSearcherCallableNoSort(DummyConcurrentLock.INSTANCE,
searchables[i], weight, filter, nDocs, hq, i, starts).call();
totalHits += docs.totalHits; // update totalHits
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size()-1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
float maxScore = (totalHits==0) ? Float.NEGATIVE_INFINITY : scoreDocs[0].score;
return new TopDocs(totalHits, scoreDocs, maxScore);
}
@Override
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort) throws IOException {
FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(n);
int totalHits = 0;
float maxScore=Float.NEGATIVE_INFINITY;
for (int i = 0; i < searchables.length; i++) { // search each searcher
final TopFieldDocs docs = new MultiSearcherCallableWithSort(DummyConcurrentLock.INSTANCE,
searchables[i], weight, filter, n, hq, sort, i, starts).call();
totalHits += docs.totalHits; // update totalHits
maxScore = Math.max(maxScore, docs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopFieldDocs (totalHits, scoreDocs, hq.getFields(), maxScore);
}
// inherit javadoc
@Override
public void search(Weight weight, Filter filter, final Collector collector)
throws IOException {
for (int i = 0; i < searchables.length; i++) {
final int start = starts[i];
final Collector hc = new Collector() {
@Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
@Override
public void collect(int doc) throws IOException {
collector.collect(doc);
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, start + docBase);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
};
searchables[i].search(weight, filter, hc);
}
}
@Override
public Query rewrite(Query original) throws IOException {
final Query[] queries = new Query[searchables.length];
for (int i = 0; i < searchables.length; i++) {
queries[i] = searchables[i].rewrite(original);
}
return queries[0].combine(queries);
}
@Override
public Explanation explain(Weight weight, int doc) throws IOException {
final int i = subSearcher(doc); // find searcher index
return searchables[i].explain(weight, doc - starts[i]); // dispatch to searcher
}
/**
* Create weight in multiple index scenario.
*
* Distributed query processing is done in the following steps:
* 1. rewrite query
* 2. extract necessary terms
* 3. collect dfs for these terms from the Searchables
* 4. create query weight using aggregate dfs.
* 5. distribute that weight to Searchables
* 6. merge results
*
* Steps 1-4 are done here, 5+6 in the search() methods
*
* @return rewritten queries
*/
@Override
protected Weight createWeight(Query original) throws IOException {
// step 1
final Query rewrittenQuery = rewrite(original);
// step 2
final Set<Term> terms = new HashSet<Term>();
rewrittenQuery.extractTerms(terms);
// step3
final Term[] allTermsArray = new Term[terms.size()];
terms.toArray(allTermsArray);
int[] aggregatedDfs = new int[terms.size()];
for (int i = 0; i < searchables.length; i++) {
int[] dfs = searchables[i].docFreqs(allTermsArray);
for(int j=0; j<aggregatedDfs.length; j++){
aggregatedDfs[j] += dfs[j];
}
}
final HashMap<Term,Integer> dfMap = new HashMap<Term,Integer>();
for(int i=0; i<allTermsArray.length; i++) {
dfMap.put(allTermsArray[i], Integer.valueOf(aggregatedDfs[i]));
}
// step4
final int numDocs = maxDoc();
final CachedDfSource cacheSim = new CachedDfSource(dfMap, numDocs, getSimilarity());
return rewrittenQuery.weight(cacheSim);
}
/**
* A thread subclass for searching a single searchable
*/
static class MultiSearcherCallableNoSort implements Callable<TopDocs> {
private final Lock lock;
private final Searchable searchable;
private final Weight weight;
private final Filter filter;
private final int nDocs;
private final int i;
private final HitQueue hq;
private final int[] starts;
public MultiSearcherCallableNoSort(Lock lock, Searchable searchable, Weight weight,
Filter filter, int nDocs, HitQueue hq, int i, int[] starts) {
this.lock = lock;
this.searchable = searchable;
this.weight = weight;
this.filter = filter;
this.nDocs = nDocs;
this.hq = hq;
this.i = i;
this.starts = starts;
}
public TopDocs call() throws IOException {
final TopDocs docs = searchable.search (weight, filter, nDocs);
final ScoreDoc[] scoreDocs = docs.scoreDocs;
for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
final ScoreDoc scoreDoc = scoreDocs[j];
scoreDoc.doc += starts[i]; // convert doc
//it would be so nice if we had a thread-safe insert
lock.lock();
try {
if (scoreDoc == hq.insertWithOverflow(scoreDoc))
break;
} finally {
lock.unlock();
}
}
return docs;
}
}
/**
* A thread subclass for searching a single searchable
*/
static class MultiSearcherCallableWithSort implements Callable<TopFieldDocs> {
private final Lock lock;
private final Searchable searchable;
private final Weight weight;
private final Filter filter;
private final int nDocs;
private final int i;
private final FieldDocSortedHitQueue hq;
private final int[] starts;
private final Sort sort;
public MultiSearcherCallableWithSort(Lock lock, Searchable searchable, Weight weight,
Filter filter, int nDocs, FieldDocSortedHitQueue hq, Sort sort, int i, int[] starts) {
this.lock = lock;
this.searchable = searchable;
this.weight = weight;
this.filter = filter;
this.nDocs = nDocs;
this.hq = hq;
this.i = i;
this.starts = starts;
this.sort = sort;
}
public TopFieldDocs call() throws IOException {
final TopFieldDocs docs = searchable.search (weight, filter, nDocs, sort);
// If one of the Sort fields is FIELD_DOC, need to fix its values, so that
// it will break ties by doc Id properly. Otherwise, it will compare to
// 'relative' doc Ids, that belong to two different searchables.
for (int j = 0; j < docs.fields.length; j++) {
if (docs.fields[j].getType() == SortField.DOC) {
// iterate over the score docs and change their fields value
for (int j2 = 0; j2 < docs.scoreDocs.length; j2++) {
FieldDoc fd = (FieldDoc) docs.scoreDocs[j2];
fd.fields[j] = Integer.valueOf(((Integer) fd.fields[j]).intValue() + starts[i]);
}
break;
}
}
lock.lock();
try {
hq.setFields(docs.fields);
} finally {
lock.unlock();
}
final ScoreDoc[] scoreDocs = docs.scoreDocs;
for (int j = 0; j < scoreDocs.length; j++) { // merge scoreDocs into hq
final FieldDoc fieldDoc = (FieldDoc) scoreDocs[j];
fieldDoc.doc += starts[i]; // convert doc
//it would be so nice if we had a thread-safe insert
lock.lock();
try {
if (fieldDoc == hq.insertWithOverflow(fieldDoc))
break;
} finally {
lock.unlock();
}
}
return docs;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/MultiSearcher.java | Java | art | 14,284 |
package org.apache.lucene.search;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Callable;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.NamedThreadFactory;
import org.apache.lucene.util.ThreadInterruptedException;
/** Implements parallel search over a set of <code>Searchables</code>.
*
* <p>Applications usually need only call the inherited {@link #search(Query,int)}
* or {@link #search(Query,Filter,int)} methods.
*/
public class ParallelMultiSearcher extends MultiSearcher {
private final ExecutorService executor;
private final Searchable[] searchables;
private final int[] starts;
/** Creates a {@link Searchable} which searches <i>searchables</i>. */
public ParallelMultiSearcher(Searchable... searchables) throws IOException {
super(searchables);
this.searchables = searchables;
this.starts = getStarts();
executor = Executors.newCachedThreadPool(new NamedThreadFactory(this.getClass().getSimpleName()));
}
/**
* Executes each {@link Searchable}'s docFreq() in its own thread and waits for each search to complete and merge
* the results back together.
*/
@Override
public int docFreq(final Term term) throws IOException {
final ExecutionHelper<Integer> runner = new ExecutionHelper<Integer>(executor);
for(int i = 0; i < searchables.length; i++) {
final Searchable searchable = searchables[i];
runner.submit(new Callable<Integer>() {
public Integer call() throws IOException {
return Integer.valueOf(searchable.docFreq(term));
}
});
}
int docFreq = 0;
for (Integer num : runner) {
docFreq += num.intValue();
}
return docFreq;
}
/**
* A search implementation which executes each
* {@link Searchable} in its own thread and waits for each search to complete and merge
* the results back together.
*/
@Override
public TopDocs search(Weight weight, Filter filter, int nDocs) throws IOException {
final HitQueue hq = new HitQueue(nDocs, false);
final Lock lock = new ReentrantLock();
final ExecutionHelper<TopDocs> runner = new ExecutionHelper<TopDocs>(executor);
for (int i = 0; i < searchables.length; i++) { // search each searchable
runner.submit(
new MultiSearcherCallableNoSort(lock, searchables[i], weight, filter, nDocs, hq, i, starts));
}
int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
for (final TopDocs topDocs : runner) {
totalHits += topDocs.totalHits;
maxScore = Math.max(maxScore, topDocs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopDocs(totalHits, scoreDocs, maxScore);
}
/**
* A search implementation allowing sorting which spans a new thread for each
* Searchable, waits for each search to complete and merges
* the results back together.
*/
@Override
public TopFieldDocs search(Weight weight, Filter filter, int nDocs, Sort sort) throws IOException {
if (sort == null) throw new NullPointerException();
final FieldDocSortedHitQueue hq = new FieldDocSortedHitQueue(nDocs);
final Lock lock = new ReentrantLock();
final ExecutionHelper<TopFieldDocs> runner = new ExecutionHelper<TopFieldDocs>(executor);
for (int i = 0; i < searchables.length; i++) { // search each searchable
runner.submit(
new MultiSearcherCallableWithSort(lock, searchables[i], weight, filter, nDocs, hq, sort, i, starts));
}
int totalHits = 0;
float maxScore = Float.NEGATIVE_INFINITY;
for (final TopFieldDocs topFieldDocs : runner) {
totalHits += topFieldDocs.totalHits;
maxScore = Math.max(maxScore, topFieldDocs.getMaxScore());
}
final ScoreDoc[] scoreDocs = new ScoreDoc[hq.size()];
for (int i = hq.size() - 1; i >= 0; i--) // put docs in array
scoreDocs[i] = hq.pop();
return new TopFieldDocs(totalHits, scoreDocs, hq.getFields(), maxScore);
}
/** Lower-level search API.
*
* <p>{@link Collector#collect(int)} is called for every matching document.
*
* <p>Applications should only use this if they need <i>all</i> of the
* matching documents. The high-level search API ({@link
* Searcher#search(Query,int)}) is usually more efficient, as it skips
* non-high-scoring hits.
*
* <p>This method cannot be parallelized, because {@link Collector}
* supports no concurrent access.
*
* @param weight to match documents
* @param filter if non-null, a bitset used to eliminate some documents
* @param collector to receive hits
*/
@Override
public void search(final Weight weight, final Filter filter, final Collector collector)
throws IOException {
for (int i = 0; i < searchables.length; i++) {
final int start = starts[i];
final Collector hc = new Collector() {
@Override
public void setScorer(final Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
@Override
public void collect(final int doc) throws IOException {
collector.collect(doc);
}
@Override
public void setNextReader(final IndexReader reader, final int docBase) throws IOException {
collector.setNextReader(reader, start + docBase);
}
@Override
public boolean acceptsDocsOutOfOrder() {
return collector.acceptsDocsOutOfOrder();
}
};
searchables[i].search(weight, filter, hc);
}
}
@Override
public void close() throws IOException {
executor.shutdown();
super.close();
}
/**
* A helper class that wraps a {@link CompletionService} and provides an
* iterable interface to the completed {@link Callable} instances.
*
* @param <T>
* the type of the {@link Callable} return value
*/
private static final class ExecutionHelper<T> implements Iterator<T>, Iterable<T> {
private final CompletionService<T> service;
private int numTasks;
ExecutionHelper(final Executor executor) {
this.service = new ExecutorCompletionService<T>(executor);
}
public boolean hasNext() {
return numTasks > 0;
}
public void submit(Callable<T> task) {
this.service.submit(task);
++numTasks;
}
public T next() {
if(!this.hasNext())
throw new NoSuchElementException();
try {
return service.take().get();
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
} catch (ExecutionException e) {
throw new RuntimeException(e);
} finally {
--numTasks;
}
}
public void remove() {
throw new UnsupportedOperationException();
}
public Iterator<T> iterator() {
// use the shortcut here - this is only used in a privat context
return this;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/search/ParallelMultiSearcher.java | Java | art | 8,248 |