code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231
values | license stringclasses 13
values | size int64 1 2.01M |
|---|---|---|---|---|---|
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.HashSet;
/**
* Implements {@link LockFactory} for a single in-process instance,
* meaning all locking will take place through this one instance.
* Only use this {@link LockFactory} when you are certain all
* IndexReaders and IndexWriters for a given index are running
* against a single shared in-process Directory instance. This is
* currently the default locking for RAMDirectory.
*
* @see LockFactory
*/
public class SingleInstanceLockFactory extends LockFactory {
private HashSet<String> locks = new HashSet<String>();
@Override
public Lock makeLock(String lockName) {
// We do not use the LockPrefix at all, because the private
// HashSet instance effectively scopes the locking to this
// single Directory instance.
return new SingleInstanceLock(locks, lockName);
}
@Override
public void clearLock(String lockName) throws IOException {
synchronized(locks) {
if (locks.contains(lockName)) {
locks.remove(lockName);
}
}
}
};
class SingleInstanceLock extends Lock {
String lockName;
private HashSet<String> locks;
public SingleInstanceLock(HashSet<String> locks, String lockName) {
this.locks = locks;
this.lockName = lockName;
}
@Override
public boolean obtain() throws IOException {
synchronized(locks) {
return locks.add(lockName);
}
}
@Override
public void release() {
synchronized(locks) {
locks.remove(lockName);
}
}
@Override
public boolean isLocked() {
synchronized(locks) {
return locks.contains(lockName);
}
}
@Override
public String toString() {
return super.toString() + ": " + lockName;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/SingleInstanceLockFactory.java | Java | art | 2,556 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
/**
* <p>Implements {@link LockFactory} using {@link
* File#createNewFile()}.</p>
*
* <p><b>NOTE:</b> the <a target="_top"
* href="http://java.sun.com/j2se/1.4.2/docs/api/java/io/File.html#createNewFile()">javadocs
* for <code>File.createNewFile</code></a> contain a vague
* yet spooky warning about not using the API for file
* locking. This warning was added due to <a target="_top"
* href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4676183">this
* bug</a>, and in fact the only known problem with using
* this API for locking is that the Lucene write lock may
* not be released when the JVM exits abnormally.</p>
* <p>When this happens, a {@link LockObtainFailedException}
* is hit when trying to create a writer, in which case you
* need to explicitly clear the lock file first. You can
* either manually remove the file, or use the {@link
* org.apache.lucene.index.IndexWriter#unlock(Directory)}
* API. But, first be certain that no writer is in fact
* writing to the index otherwise you can easily corrupt
* your index.</p>
*
* <p>If you suspect that this or any other LockFactory is
* not working properly in your environment, you can easily
* test it by using {@link VerifyingLockFactory}, {@link
* LockVerifyServer} and {@link LockStressTest}.</p>
*
* @see LockFactory
*/
public class SimpleFSLockFactory extends FSLockFactory {
/**
* Create a SimpleFSLockFactory instance, with null (unset)
* lock directory. When you pass this factory to a {@link FSDirectory}
* subclass, the lock directory is automatically set to the
* directory itsself. Be sure to create one instance for each directory
* your create!
*/
public SimpleFSLockFactory() throws IOException {
this((File) null);
}
/**
* Instantiate using the provided directory (as a File instance).
* @param lockDir where lock files should be created.
*/
public SimpleFSLockFactory(File lockDir) throws IOException {
setLockDir(lockDir);
}
/**
* Instantiate using the provided directory name (String).
* @param lockDirName where lock files should be created.
*/
public SimpleFSLockFactory(String lockDirName) throws IOException {
setLockDir(new File(lockDirName));
}
@Override
public Lock makeLock(String lockName) {
if (lockPrefix != null) {
lockName = lockPrefix + "-" + lockName;
}
return new SimpleFSLock(lockDir, lockName);
}
@Override
public void clearLock(String lockName) throws IOException {
if (lockDir.exists()) {
if (lockPrefix != null) {
lockName = lockPrefix + "-" + lockName;
}
File lockFile = new File(lockDir, lockName);
if (lockFile.exists() && !lockFile.delete()) {
throw new IOException("Cannot delete " + lockFile);
}
}
}
};
class SimpleFSLock extends Lock {
File lockFile;
File lockDir;
public SimpleFSLock(File lockDir, String lockFileName) {
this.lockDir = lockDir;
lockFile = new File(lockDir, lockFileName);
}
@Override
public boolean obtain() throws IOException {
// Ensure that lockDir exists and is a directory:
if (!lockDir.exists()) {
if (!lockDir.mkdirs())
throw new IOException("Cannot create directory: " +
lockDir.getAbsolutePath());
} else if (!lockDir.isDirectory()) {
throw new IOException("Found regular file where directory expected: " +
lockDir.getAbsolutePath());
}
return lockFile.createNewFile();
}
@Override
public void release() throws LockReleaseFailedException {
if (lockFile.exists() && !lockFile.delete())
throw new LockReleaseFailedException("failed to delete " + lockFile);
}
@Override
public boolean isLocked() {
return lockFile.exists();
}
@Override
public String toString() {
return "SimpleFSLock@" + lockFile;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/SimpleFSLockFactory.java | Java | art | 4,780 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* A memory-resident {@link IndexOutput} implementation.
*
* <p>For Lucene internal use.</p>
*/
public class RAMOutputStream extends IndexOutput {
static final int BUFFER_SIZE = 1024;
private RAMFile file;
private byte[] currentBuffer;
private int currentBufferIndex;
private int bufferPosition;
private long bufferStart;
private int bufferLength;
/** Construct an empty output buffer. */
public RAMOutputStream() {
this(new RAMFile());
}
public RAMOutputStream(RAMFile f) {
file = f;
// make sure that we switch to the
// first needed buffer lazily
currentBufferIndex = -1;
currentBuffer = null;
}
/** Copy the current contents of this buffer to the named output. */
public void writeTo(IndexOutput out) throws IOException {
flush();
final long end = file.length;
long pos = 0;
int buffer = 0;
while (pos < end) {
int length = BUFFER_SIZE;
long nextPos = pos + length;
if (nextPos > end) { // at the last buffer
length = (int)(end - pos);
}
out.writeBytes(file.getBuffer(buffer++), length);
pos = nextPos;
}
}
/** Resets this to an empty file. */
public void reset() {
currentBuffer = null;
currentBufferIndex = -1;
bufferPosition = 0;
bufferStart = 0;
bufferLength = 0;
file.setLength(0);
}
@Override
public void close() throws IOException {
flush();
}
@Override
public void seek(long pos) throws IOException {
// set the file length in case we seek back
// and flush() has not been called yet
setFileLength();
if (pos < bufferStart || pos >= bufferStart + bufferLength) {
currentBufferIndex = (int) (pos / BUFFER_SIZE);
switchCurrentBuffer();
}
bufferPosition = (int) (pos % BUFFER_SIZE);
}
@Override
public long length() {
return file.length;
}
@Override
public void writeByte(byte b) throws IOException {
if (bufferPosition == bufferLength) {
currentBufferIndex++;
switchCurrentBuffer();
}
currentBuffer[bufferPosition++] = b;
}
@Override
public void writeBytes(byte[] b, int offset, int len) throws IOException {
assert b != null;
while (len > 0) {
if (bufferPosition == bufferLength) {
currentBufferIndex++;
switchCurrentBuffer();
}
int remainInBuffer = currentBuffer.length - bufferPosition;
int bytesToCopy = len < remainInBuffer ? len : remainInBuffer;
System.arraycopy(b, offset, currentBuffer, bufferPosition, bytesToCopy);
offset += bytesToCopy;
len -= bytesToCopy;
bufferPosition += bytesToCopy;
}
}
private final void switchCurrentBuffer() throws IOException {
if (currentBufferIndex == file.numBuffers()) {
currentBuffer = file.addBuffer(BUFFER_SIZE);
} else {
currentBuffer = file.getBuffer(currentBufferIndex);
}
bufferPosition = 0;
bufferStart = (long) BUFFER_SIZE * (long) currentBufferIndex;
bufferLength = currentBuffer.length;
}
private void setFileLength() {
long pointer = bufferStart + bufferPosition;
if (pointer > file.length) {
file.setLength(pointer);
}
}
@Override
public void flush() throws IOException {
file.setLastModified(System.currentTimeMillis());
setFileLength();
}
@Override
public long getFilePointer() {
return currentBufferIndex < 0 ? 0 : bufferStart + bufferPosition;
}
/** Returns byte usage of all buffers. */
public long sizeInBytes() {
return file.numBuffers() * BUFFER_SIZE;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/RAMOutputStream.java | Java | art | 4,472 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* Use this {@link LockFactory} to disable locking entirely.
* Only one instance of this lock is created. You should call {@link
* #getNoLockFactory()} to get the instance.
*
* @see LockFactory
*/
public class NoLockFactory extends LockFactory {
// Single instance returned whenever makeLock is called.
private static NoLock singletonLock = new NoLock();
private static NoLockFactory singleton = new NoLockFactory();
public static NoLockFactory getNoLockFactory() {
return singleton;
}
@Override
public Lock makeLock(String lockName) {
return singletonLock;
}
@Override
public void clearLock(String lockName) {};
};
class NoLock extends Lock {
@Override
public boolean obtain() throws IOException {
return true;
}
@Override
public void release() {
}
@Override
public boolean isLocked() {
return false;
}
@Override
public String toString() {
return "NoLock";
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/NoLockFactory.java | Java | art | 1,812 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.File;
/**
* Simple standalone tool that forever acquires & releases a
* lock using a specific LockFactory. Run without any args
* to see usage.
*
* @see VerifyingLockFactory
* @see LockVerifyServer
*/
public class LockStressTest {
public static void main(String[] args) throws Exception {
if (args.length != 6) {
System.out.println("\nUsage: java org.apache.lucene.store.LockStressTest myID verifierHostOrIP verifierPort lockFactoryClassName lockDirName sleepTime\n" +
"\n" +
" myID = int from 0 .. 255 (should be unique for test process)\n" +
" verifierHostOrIP = host name or IP address where LockVerifyServer is running\n" +
" verifierPort = port that LockVerifyServer is listening on\n" +
" lockFactoryClassName = primary LockFactory class that we will use\n" +
" lockDirName = path to the lock directory (only set for Simple/NativeFSLockFactory\n" +
" sleepTimeMS = milliseconds to pause betweeen each lock obtain/release\n" +
"\n" +
"You should run multiple instances of this process, each with its own\n" +
"unique ID, and each pointing to the same lock directory, to verify\n" +
"that locking is working correctly.\n" +
"\n" +
"Make sure you are first running LockVerifyServer.\n" +
"\n");
System.exit(1);
}
final int myID = Integer.parseInt(args[0]);
if (myID < 0 || myID > 255) {
System.out.println("myID must be a unique int 0..255");
System.exit(1);
}
final String verifierHost = args[1];
final int verifierPort = Integer.parseInt(args[2]);
final String lockFactoryClassName = args[3];
final String lockDirName = args[4];
final int sleepTimeMS = Integer.parseInt(args[5]);
LockFactory lockFactory;
try {
lockFactory = Class.forName(lockFactoryClassName).asSubclass(LockFactory.class).newInstance();
} catch (IllegalAccessException e) {
throw new IOException("IllegalAccessException when instantiating LockClass " + lockFactoryClassName);
} catch (InstantiationException e) {
throw new IOException("InstantiationException when instantiating LockClass " + lockFactoryClassName);
} catch (ClassCastException e) {
throw new IOException("unable to cast LockClass " + lockFactoryClassName + " instance to a LockFactory");
} catch (ClassNotFoundException e) {
throw new IOException("unable to find LockClass " + lockFactoryClassName);
}
File lockDir = new File(lockDirName);
if (lockFactory instanceof FSLockFactory) {
((FSLockFactory) lockFactory).setLockDir(lockDir);
}
lockFactory.setLockPrefix("test");
LockFactory verifyLF = new VerifyingLockFactory((byte) myID, lockFactory, verifierHost, verifierPort);
Lock l = verifyLF.makeLock("test.lock");
while(true) {
boolean obtained = false;
try {
obtained = l.obtain(10);
} catch (LockObtainFailedException e) {
System.out.print("x");
}
if (obtained) {
System.out.print("l");
l.release();
}
Thread.sleep(sleepTimeMS);
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/LockStressTest.java | Java | art | 4,295 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
Binary i/o API, used for all index data.
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/package.html | HTML | art | 1,001 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
/**
* Base class for file system based locking implementation.
*/
public abstract class FSLockFactory extends LockFactory {
/**
* Directory for the lock files.
*/
protected File lockDir = null;
/**
* Set the lock directory. This method can be only called
* once to initialize the lock directory. It is used by {@link FSDirectory}
* to set the lock directory to itsself.
* Subclasses can also use this method to set the directory
* in the constructor.
*/
protected final void setLockDir(File lockDir) {
if (this.lockDir != null)
throw new IllegalStateException("You can set the lock directory for this factory only once.");
this.lockDir = lockDir;
}
/**
* Retrieve the lock directory.
*/
public File getLockDir() {
return lockDir;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/FSLockFactory.java | Java | art | 1,667 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
/** Writes bytes through to a primary IndexOutput, computing
* checksum as it goes. Note that you cannot use seek(). */
public class ChecksumIndexInput extends IndexInput {
IndexInput main;
Checksum digest;
public ChecksumIndexInput(IndexInput main) {
this.main = main;
digest = new CRC32();
}
@Override
public byte readByte() throws IOException {
final byte b = main.readByte();
digest.update(b);
return b;
}
@Override
public void readBytes(byte[] b, int offset, int len)
throws IOException {
main.readBytes(b, offset, len);
digest.update(b, offset, len);
}
public long getChecksum() {
return digest.getValue();
}
@Override
public void close() throws IOException {
main.close();
}
@Override
public long getFilePointer() {
return main.getFilePointer();
}
@Override
public void seek(long pos) {
throw new RuntimeException("not allowed");
}
@Override
public long length() {
return main.length();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/ChecksumIndexInput.java | Java | art | 1,937 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.Socket;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* A {@link LockFactory} that wraps another {@link
* LockFactory} and verifies that each lock obtain/release
* is "correct" (never results in two processes holding the
* lock at the same time). It does this by contacting an
* external server ({@link LockVerifyServer}) to assert that
* at most one process holds the lock at a time. To use
* this, you should also run {@link LockVerifyServer} on the
* host & port matching what you pass to the constructor.
*
* @see LockVerifyServer
* @see LockStressTest
*/
public class VerifyingLockFactory extends LockFactory {
LockFactory lf;
byte id;
String host;
int port;
private class CheckedLock extends Lock {
private Lock lock;
public CheckedLock(Lock lock) {
this.lock = lock;
}
private void verify(byte message) {
try {
Socket s = new Socket(host, port);
OutputStream out = s.getOutputStream();
out.write(id);
out.write(message);
InputStream in = s.getInputStream();
int result = in.read();
in.close();
out.close();
s.close();
if (result != 0)
throw new RuntimeException("lock was double acquired");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Override
public synchronized boolean obtain(long lockWaitTimeout)
throws LockObtainFailedException, IOException {
boolean obtained = lock.obtain(lockWaitTimeout);
if (obtained)
verify((byte) 1);
return obtained;
}
@Override
public synchronized boolean obtain()
throws LockObtainFailedException, IOException {
return lock.obtain();
}
@Override
public synchronized boolean isLocked() throws IOException {
return lock.isLocked();
}
@Override
public synchronized void release() throws IOException {
if (isLocked()) {
verify((byte) 0);
lock.release();
}
}
}
/**
* @param id should be a unique id across all clients
* @param lf the LockFactory that we are testing
* @param host host or IP where {@link LockVerifyServer}
is running
* @param port the port {@link LockVerifyServer} is
listening on
*/
public VerifyingLockFactory(byte id, LockFactory lf, String host, int port) throws IOException {
this.id = id;
this.lf = lf;
this.host = host;
this.port = port;
}
@Override
public synchronized Lock makeLock(String lockName) {
return new CheckedLock(lf.makeLock(lockName));
}
@Override
public synchronized void clearLock(String lockName)
throws IOException {
lf.clearLock(lockName);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/VerifyingLockFactory.java | Java | art | 3,624 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
/** A straightforward implementation of {@link FSDirectory}
* using java.io.RandomAccessFile. However, this class has
* poor concurrent performance (multiple threads will
* bottleneck) as it synchronizes when multiple threads
* read from the same file. It's usually better to use
* {@link NIOFSDirectory} or {@link MMapDirectory} instead. */
public class SimpleFSDirectory extends FSDirectory {
/** Create a new SimpleFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public SimpleFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new SimpleFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public SimpleFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexOutput for the file with the given name. */
@Override
public IndexOutput createOutput(String name) throws IOException {
initOutput(name);
return new SimpleFSIndexOutput(new File(directory, name));
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new SimpleFSIndexInput(new File(directory, name), bufferSize, getReadChunkSize());
}
protected static class SimpleFSIndexInput extends BufferedIndexInput {
protected static class Descriptor extends RandomAccessFile {
// remember if the file is open, so that we don't try to close it
// more than once
protected volatile boolean isOpen;
long position;
final long length;
public Descriptor(File file, String mode) throws IOException {
super(file, mode);
isOpen=true;
length=length();
}
@Override
public void close() throws IOException {
if (isOpen) {
isOpen=false;
super.close();
}
}
}
protected final Descriptor file;
boolean isClone;
// LUCENE-1566 - maximum read length on a 32bit JVM to prevent incorrect OOM
protected final int chunkSize;
public SimpleFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(bufferSize);
file = new Descriptor(path, "r");
this.chunkSize = chunkSize;
}
/** IndexInput methods */
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException {
synchronized (file) {
long position = getFilePointer();
if (position != file.position) {
file.seek(position);
file.position = position;
}
int total = 0;
try {
do {
final int readLength;
if (total + chunkSize > len) {
readLength = len - total;
} else {
// LUCENE-1566 - work around JVM Bug by breaking very large reads into chunks
readLength = chunkSize;
}
final int i = file.read(b, offset + total, readLength);
if (i == -1) {
throw new IOException("read past EOF");
}
file.position += i;
total += i;
} while (total < len);
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a a value smaller than the current chunks size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
@Override
public void close() throws IOException {
// only close the file if this is not a clone
if (!isClone) file.close();
}
@Override
protected void seekInternal(long position) {
}
@Override
public long length() {
return file.length;
}
@Override
public Object clone() {
SimpleFSIndexInput clone = (SimpleFSIndexInput)super.clone();
clone.isClone = true;
return clone;
}
/** Method used for testing. Returns true if the underlying
* file descriptor is valid.
*/
boolean isFDValid() throws IOException {
return file.getFD().valid();
}
}
protected static class SimpleFSIndexOutput extends BufferedIndexOutput {
RandomAccessFile file = null;
// remember if the file is open, so that we don't try to close it
// more than once
private volatile boolean isOpen;
public SimpleFSIndexOutput(File path) throws IOException {
file = new RandomAccessFile(path, "rw");
isOpen = true;
}
/** output methods: */
@Override
public void flushBuffer(byte[] b, int offset, int size) throws IOException {
file.write(b, offset, size);
}
@Override
public void close() throws IOException {
// only close the file if it has not been closed yet
if (isOpen) {
boolean success = false;
try {
super.close();
success = true;
} finally {
isOpen = false;
if (!success) {
try {
file.close();
} catch (Throwable t) {
// Suppress so we don't mask original exception
}
} else
file.close();
}
}
}
/** Random-access methods */
@Override
public void seek(long pos) throws IOException {
super.seek(pos);
file.seek(pos);
}
@Override
public long length() throws IOException {
return file.length();
}
@Override
public void setLength(long length) throws IOException {
file.setLength(length);
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/SimpleFSDirectory.java | Java | art | 7,150 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.lang.management.ManagementFactory;
import java.nio.channels.FileChannel;
import java.nio.channels.FileLock;
import java.io.File;
import java.io.RandomAccessFile;
import java.io.IOException;
import java.util.HashSet;
import java.util.Random;
/**
* <p>Implements {@link LockFactory} using native OS file
* locks. Note that because this LockFactory relies on
* java.nio.* APIs for locking, any problems with those APIs
* will cause locking to fail. Specifically, on certain NFS
* environments the java.nio.* locks will fail (the lock can
* incorrectly be double acquired) whereas {@link
* SimpleFSLockFactory} worked perfectly in those same
* environments. For NFS based access to an index, it's
* recommended that you try {@link SimpleFSLockFactory}
* first and work around the one limitation that a lock file
* could be left when the JVM exits abnormally.</p>
*
* <p>The primary benefit of {@link NativeFSLockFactory} is
* that lock files will be properly removed (by the OS) if
* the JVM has an abnormal exit.</p>
*
* <p>Note that, unlike {@link SimpleFSLockFactory}, the existence of
* leftover lock files in the filesystem on exiting the JVM
* is fine because the OS will free the locks held against
* these files even though the files still remain.</p>
*
* <p>If you suspect that this or any other LockFactory is
* not working properly in your environment, you can easily
* test it by using {@link VerifyingLockFactory}, {@link
* LockVerifyServer} and {@link LockStressTest}.</p>
*
* @see LockFactory
*/
public class NativeFSLockFactory extends FSLockFactory {
private volatile boolean tested = false;
// Simple test to verify locking system is "working". On
// NFS, if it's misconfigured, you can hit long (35
// second) timeouts which cause Lock.obtain to take far
// too long (it assumes the obtain() call takes zero
// time).
private synchronized void acquireTestLock() {
if (tested) return;
tested = true;
// Ensure that lockDir exists and is a directory.
if (!lockDir.exists()) {
if (!lockDir.mkdirs())
throw new RuntimeException("Cannot create directory: " +
lockDir.getAbsolutePath());
} else if (!lockDir.isDirectory()) {
throw new RuntimeException("Found regular file where directory expected: " +
lockDir.getAbsolutePath());
}
// add the RuntimeMXBean's name to the lock file, to reduce the chance for
// name collisions when this code is invoked by multiple JVMs (such as in
// our tests). On most systems, the name includes the process Id.
// Also, remove any non-alphanumeric characters, so that the lock file will
// be created for sure on all systems.
String randomLockName = "lucene-"
+ ManagementFactory.getRuntimeMXBean().getName().replaceAll("[^a..zA..Z0..9]+","") + "-"
+ Long.toString(new Random().nextInt(), Character.MAX_RADIX)
+ "-test.lock";
Lock l = makeLock(randomLockName);
try {
l.obtain();
l.release();
// If the test lock failed to delete after all the attempts, attempt a
// delete when the JVM exits.
File lockFile = new File(lockDir, randomLockName);
if (lockFile.exists()) {
lockFile.deleteOnExit();
}
} catch (IOException e) {
RuntimeException e2 = new RuntimeException("Failed to acquire random test lock; please verify filesystem for lock directory '" + lockDir + "' supports locking");
e2.initCause(e);
throw e2;
}
}
/**
* Create a NativeFSLockFactory instance, with null (unset)
* lock directory. When you pass this factory to a {@link FSDirectory}
* subclass, the lock directory is automatically set to the
* directory itsself. Be sure to create one instance for each directory
* your create!
*/
public NativeFSLockFactory() throws IOException {
this((File) null);
}
/**
* Create a NativeFSLockFactory instance, storing lock
* files into the specified lockDirName:
*
* @param lockDirName where lock files are created.
*/
public NativeFSLockFactory(String lockDirName) throws IOException {
this(new File(lockDirName));
}
/**
* Create a NativeFSLockFactory instance, storing lock
* files into the specified lockDir:
*
* @param lockDir where lock files are created.
*/
public NativeFSLockFactory(File lockDir) throws IOException {
setLockDir(lockDir);
}
@Override
public synchronized Lock makeLock(String lockName) {
acquireTestLock();
if (lockPrefix != null)
lockName = lockPrefix + "-" + lockName;
return new NativeFSLock(lockDir, lockName);
}
@Override
public void clearLock(String lockName) throws IOException {
// Note that this isn't strictly required anymore
// because the existence of these files does not mean
// they are locked, but, still do this in case people
// really want to see the files go away:
if (lockDir.exists()) {
if (lockPrefix != null) {
lockName = lockPrefix + "-" + lockName;
}
File lockFile = new File(lockDir, lockName);
if (lockFile.exists() && !lockFile.delete()) {
throw new IOException("Cannot delete " + lockFile);
}
}
}
}
class NativeFSLock extends Lock {
private RandomAccessFile f;
private FileChannel channel;
private FileLock lock;
private File path;
private File lockDir;
/*
* The javadocs for FileChannel state that you should have
* a single instance of a FileChannel (per JVM) for all
* locking against a given file. To ensure this, we have
* a single (static) HashSet that contains the file paths
* of all currently locked locks. This protects against
* possible cases where different Directory instances in
* one JVM (each with their own NativeFSLockFactory
* instance) have set the same lock dir and lock prefix.
*/
private static HashSet<String> LOCK_HELD = new HashSet<String>();
public NativeFSLock(File lockDir, String lockFileName) {
this.lockDir = lockDir;
path = new File(lockDir, lockFileName);
}
private synchronized boolean lockExists() {
return lock != null;
}
@Override
public synchronized boolean obtain() throws IOException {
if (lockExists()) {
// Our instance is already locked:
return false;
}
// Ensure that lockDir exists and is a directory.
if (!lockDir.exists()) {
if (!lockDir.mkdirs())
throw new IOException("Cannot create directory: " +
lockDir.getAbsolutePath());
} else if (!lockDir.isDirectory()) {
throw new IOException("Found regular file where directory expected: " +
lockDir.getAbsolutePath());
}
String canonicalPath = path.getCanonicalPath();
boolean markedHeld = false;
try {
// Make sure nobody else in-process has this lock held
// already, and, mark it held if not:
synchronized(LOCK_HELD) {
if (LOCK_HELD.contains(canonicalPath)) {
// Someone else in this JVM already has the lock:
return false;
} else {
// This "reserves" the fact that we are the one
// thread trying to obtain this lock, so we own
// the only instance of a channel against this
// file:
LOCK_HELD.add(canonicalPath);
markedHeld = true;
}
}
try {
f = new RandomAccessFile(path, "rw");
} catch (IOException e) {
// On Windows, we can get intermittent "Access
// Denied" here. So, we treat this as failure to
// acquire the lock, but, store the reason in case
// there is in fact a real error case.
failureReason = e;
f = null;
}
if (f != null) {
try {
channel = f.getChannel();
try {
lock = channel.tryLock();
} catch (IOException e) {
// At least on OS X, we will sometimes get an
// intermittent "Permission Denied" IOException,
// which seems to simply mean "you failed to get
// the lock". But other IOExceptions could be
// "permanent" (eg, locking is not supported via
// the filesystem). So, we record the failure
// reason here; the timeout obtain (usually the
// one calling us) will use this as "root cause"
// if it fails to get the lock.
failureReason = e;
} finally {
if (lock == null) {
try {
channel.close();
} finally {
channel = null;
}
}
}
} finally {
if (channel == null) {
try {
f.close();
} finally {
f = null;
}
}
}
}
} finally {
if (markedHeld && !lockExists()) {
synchronized(LOCK_HELD) {
if (LOCK_HELD.contains(canonicalPath)) {
LOCK_HELD.remove(canonicalPath);
}
}
}
}
return lockExists();
}
@Override
public synchronized void release() throws IOException {
if (lockExists()) {
try {
lock.release();
} finally {
lock = null;
try {
channel.close();
} finally {
channel = null;
try {
f.close();
} finally {
f = null;
synchronized(LOCK_HELD) {
LOCK_HELD.remove(path.getCanonicalPath());
}
}
}
}
// LUCENE-2421: we don't care anymore if the file cannot be deleted
// because it's held up by another process (e.g. AntiVirus). NativeFSLock
// does not depend on the existence/absence of the lock file
path.delete();
} else {
// if we don't hold the lock, and somebody still called release(), for
// example as a result of calling IndexWriter.unlock(), we should attempt
// to obtain the lock and release it. If the obtain fails, it means the
// lock cannot be released, and we should throw a proper exception rather
// than silently failing/not doing anything.
boolean obtained = false;
try {
if (!(obtained = obtain())) {
throw new LockReleaseFailedException(
"Cannot forcefully unlock a NativeFSLock which is held by another indexer component: "
+ path);
}
} finally {
if (obtained) {
release();
}
}
}
}
@Override
public synchronized boolean isLocked() {
// The test for is isLocked is not directly possible with native file locks:
// First a shortcut, if a lock reference in this instance is available
if (lockExists()) return true;
// Look if lock file is present; if not, there can definitely be no lock!
if (!path.exists()) return false;
// Try to obtain and release (if was locked) the lock
try {
boolean obtained = obtain();
if (obtained) release();
return !obtained;
} catch (IOException ioe) {
return false;
}
}
@Override
public String toString() {
return "NativeFSLock@" + path;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/NativeFSLockFactory.java | Java | art | 12,185 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.FileNotFoundException;
import java.io.Serializable;
import java.util.HashMap;
import java.util.Set;
import org.apache.lucene.util.ThreadInterruptedException;
/**
* A memory-resident {@link Directory} implementation. Locking
* implementation is by default the {@link SingleInstanceLockFactory}
* but can be changed with {@link #setLockFactory}.
*/
public class RAMDirectory extends Directory implements Serializable {
private static final long serialVersionUID = 1l;
HashMap<String,RAMFile> fileMap = new HashMap<String,RAMFile>();
long sizeInBytes;
// *****
// Lock acquisition sequence: RAMDirectory, then RAMFile
// *****
/** Constructs an empty {@link Directory}. */
public RAMDirectory() {
setLockFactory(new SingleInstanceLockFactory());
}
/**
* Creates a new <code>RAMDirectory</code> instance from a different
* <code>Directory</code> implementation. This can be used to load
* a disk-based index into memory.
* <P>
* This should be used only with indices that can fit into memory.
* <P>
* Note that the resulting <code>RAMDirectory</code> instance is fully
* independent from the original <code>Directory</code> (it is a
* complete copy). Any subsequent changes to the
* original <code>Directory</code> will not be visible in the
* <code>RAMDirectory</code> instance.
*
* @param dir a <code>Directory</code> value
* @exception IOException if an error occurs
*/
public RAMDirectory(Directory dir) throws IOException {
this(dir, false);
}
private RAMDirectory(Directory dir, boolean closeDir) throws IOException {
this();
Directory.copy(dir, this, closeDir);
}
@Override
public synchronized final String[] listAll() {
ensureOpen();
Set<String> fileNames = fileMap.keySet();
String[] result = new String[fileNames.size()];
int i = 0;
for(final String fileName: fileNames)
result[i++] = fileName;
return result;
}
/** Returns true iff the named file exists in this directory. */
@Override
public final boolean fileExists(String name) {
ensureOpen();
RAMFile file;
synchronized (this) {
file = fileMap.get(name);
}
return file != null;
}
/** Returns the time the named file was last modified.
* @throws IOException if the file does not exist
*/
@Override
public final long fileModified(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = fileMap.get(name);
}
if (file==null)
throw new FileNotFoundException(name);
return file.getLastModified();
}
/** Set the modified time of an existing file to now.
* @throws IOException if the file does not exist
*/
@Override
public void touchFile(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = fileMap.get(name);
}
if (file==null)
throw new FileNotFoundException(name);
long ts2, ts1 = System.currentTimeMillis();
do {
try {
Thread.sleep(0, 1);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
ts2 = System.currentTimeMillis();
} while(ts1 == ts2);
file.setLastModified(ts2);
}
/** Returns the length in bytes of a file in the directory.
* @throws IOException if the file does not exist
*/
@Override
public final long fileLength(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = fileMap.get(name);
}
if (file==null)
throw new FileNotFoundException(name);
return file.getLength();
}
/** Return total size in bytes of all files in this
* directory. This is currently quantized to
* RAMOutputStream.BUFFER_SIZE. */
public synchronized final long sizeInBytes() {
ensureOpen();
return sizeInBytes;
}
/** Removes an existing file in the directory.
* @throws IOException if the file does not exist
*/
@Override
public synchronized void deleteFile(String name) throws IOException {
ensureOpen();
RAMFile file = fileMap.get(name);
if (file!=null) {
fileMap.remove(name);
file.directory = null;
sizeInBytes -= file.sizeInBytes;
} else
throw new FileNotFoundException(name);
}
/** Creates a new, empty file in the directory with the given name. Returns a stream writing this file. */
@Override
public IndexOutput createOutput(String name) throws IOException {
ensureOpen();
RAMFile file = new RAMFile(this);
synchronized (this) {
RAMFile existing = fileMap.get(name);
if (existing!=null) {
sizeInBytes -= existing.sizeInBytes;
existing.directory = null;
}
fileMap.put(name, file);
}
return new RAMOutputStream(file);
}
/** Returns a stream reading an existing file. */
@Override
public IndexInput openInput(String name) throws IOException {
ensureOpen();
RAMFile file;
synchronized (this) {
file = fileMap.get(name);
}
if (file == null)
throw new FileNotFoundException(name);
return new RAMInputStream(file);
}
/** Closes the store to future operations, releasing associated memory. */
@Override
public void close() {
isOpen = false;
fileMap = null;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/RAMDirectory.java | Java | art | 6,234 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This exception is thrown when there is an attempt to
* access something that has already been closed.
*/
public class AlreadyClosedException extends IllegalStateException {
public AlreadyClosedException(String message) {
super(message);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/AlreadyClosedException.java | Java | art | 1,096 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
import java.util.Map;
import java.util.HashMap;
/** Abstract base class for input from a file in a {@link Directory}. A
* random-access input stream. Used for all Lucene index input operations.
* @see Directory
*/
public abstract class IndexInput implements Cloneable,Closeable {
private boolean preUTF8Strings; // true if we are reading old (modified UTF8) string format
/** Reads and returns a single byte.
* @see IndexOutput#writeByte(byte)
*/
public abstract byte readByte() throws IOException;
/** Reads a specified number of bytes into an array at the specified offset.
* @param b the array to read bytes into
* @param offset the offset in the array to start storing bytes
* @param len the number of bytes to read
* @see IndexOutput#writeBytes(byte[],int)
*/
public abstract void readBytes(byte[] b, int offset, int len)
throws IOException;
/** Reads a specified number of bytes into an array at the
* specified offset with control over whether the read
* should be buffered (callers who have their own buffer
* should pass in "false" for useBuffer). Currently only
* {@link BufferedIndexInput} respects this parameter.
* @param b the array to read bytes into
* @param offset the offset in the array to start storing bytes
* @param len the number of bytes to read
* @param useBuffer set to false if the caller will handle
* buffering.
* @see IndexOutput#writeBytes(byte[],int)
*/
public void readBytes(byte[] b, int offset, int len, boolean useBuffer)
throws IOException
{
// Default to ignoring useBuffer entirely
readBytes(b, offset, len);
}
/** Reads four bytes and returns an int.
* @see IndexOutput#writeInt(int)
*/
public int readInt() throws IOException {
return ((readByte() & 0xFF) << 24) | ((readByte() & 0xFF) << 16)
| ((readByte() & 0xFF) << 8) | (readByte() & 0xFF);
}
/** Reads an int stored in variable-length format. Reads between one and
* five bytes. Smaller values take fewer bytes. Negative numbers are not
* supported.
* @see IndexOutput#writeVInt(int)
*/
public int readVInt() throws IOException {
byte b = readByte();
int i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = readByte();
i |= (b & 0x7F) << shift;
}
return i;
}
/** Reads eight bytes and returns a long.
* @see IndexOutput#writeLong(long)
*/
public long readLong() throws IOException {
return (((long)readInt()) << 32) | (readInt() & 0xFFFFFFFFL);
}
/** Reads a long stored in variable-length format. Reads between one and
* nine bytes. Smaller values take fewer bytes. Negative numbers are not
* supported. */
public long readVLong() throws IOException {
byte b = readByte();
long i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = readByte();
i |= (b & 0x7FL) << shift;
}
return i;
}
/** Call this if readString should read characters stored
* in the old modified UTF8 format (length in java chars
* and java's modified UTF8 encoding). This is used for
* indices written pre-2.4 See LUCENE-510 for details. */
public void setModifiedUTF8StringsMode() {
preUTF8Strings = true;
}
/** Reads a string.
* @see IndexOutput#writeString(String)
*/
public String readString() throws IOException {
if (preUTF8Strings)
return readModifiedUTF8String();
int length = readVInt();
final byte[] bytes = new byte[length];
readBytes(bytes, 0, length);
return new String(bytes, 0, length, "UTF-8");
}
private String readModifiedUTF8String() throws IOException {
int length = readVInt();
final char[] chars = new char[length];
readChars(chars, 0, length);
return new String(chars, 0, length);
}
/** Reads Lucene's old "modified UTF-8" encoded
* characters into an array.
* @param buffer the array to read characters into
* @param start the offset in the array to start storing characters
* @param length the number of characters to read
* @see IndexOutput#writeChars(String,int,int)
* @deprecated -- please use readString or readBytes
* instead, and construct the string
* from those utf8 bytes
*/
public void readChars(char[] buffer, int start, int length)
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
byte b = readByte();
if ((b & 0x80) == 0)
buffer[i] = (char)(b & 0x7F);
else if ((b & 0xE0) != 0xE0) {
buffer[i] = (char)(((b & 0x1F) << 6)
| (readByte() & 0x3F));
} else
buffer[i] = (char)(((b & 0x0F) << 12)
| ((readByte() & 0x3F) << 6)
| (readByte() & 0x3F));
}
}
/**
* Expert
*
* Similar to {@link #readChars(char[], int, int)} but does not do any conversion operations on the bytes it is reading in. It still
* has to invoke {@link #readByte()} just as {@link #readChars(char[], int, int)} does, but it does not need a buffer to store anything
* and it does not have to do any of the bitwise operations, since we don't actually care what is in the byte except to determine
* how many more bytes to read
* @param length The number of chars to read
* @deprecated this method operates on old "modified utf8" encoded
* strings
*/
public void skipChars(int length) throws IOException{
for (int i = 0; i < length; i++) {
byte b = readByte();
if ((b & 0x80) == 0){
//do nothing, we only need one byte
}
else if ((b & 0xE0) != 0xE0) {
readByte();//read an additional byte
} else{
//read two additional bytes.
readByte();
readByte();
}
}
}
/** Closes the stream to further operations. */
public abstract void close() throws IOException;
/** Returns the current position in this file, where the next read will
* occur.
* @see #seek(long)
*/
public abstract long getFilePointer();
/** Sets current position in this file, where the next read will occur.
* @see #getFilePointer()
*/
public abstract void seek(long pos) throws IOException;
/** The number of bytes in the file. */
public abstract long length();
/** Returns a clone of this stream.
*
* <p>Clones of a stream access the same data, and are positioned at the same
* point as the stream they were cloned from.
*
* <p>Expert: Subclasses must ensure that clones may be positioned at
* different points in the input from each other and from the stream they
* were cloned from.
*/
@Override
public Object clone() {
IndexInput clone = null;
try {
clone = (IndexInput)super.clone();
} catch (CloneNotSupportedException e) {}
return clone;
}
public Map<String,String> readStringStringMap() throws IOException {
final Map<String,String> map = new HashMap<String,String>();
final int count = readInt();
for(int i=0;i<count;i++) {
final String key = readString();
final String val = readString();
map.put(key, val);
}
return map;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/IndexInput.java | Java | art | 8,074 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.net.ServerSocket;
import java.net.Socket;
import java.io.OutputStream;
import java.io.InputStream;
import java.io.IOException;
/**
* Simple standalone server that must be running when you
* use {@link VerifyingLockFactory}. This server simply
* verifies at most one process holds the lock at a time.
* Run without any args to see usage.
*
* @see VerifyingLockFactory
* @see LockStressTest
*/
public class LockVerifyServer {
private static String getTime(long startTime) {
return "[" + ((System.currentTimeMillis()-startTime)/1000) + "s] ";
}
public static void main(String[] args) throws IOException {
if (args.length != 1) {
System.out.println("\nUsage: java org.apache.lucene.store.LockVerifyServer port\n");
System.exit(1);
}
final int port = Integer.parseInt(args[0]);
ServerSocket s = new ServerSocket(port);
s.setReuseAddress(true);
System.out.println("\nReady on port " + port + "...");
int lockedID = 0;
long startTime = System.currentTimeMillis();
while(true) {
Socket cs = s.accept();
OutputStream out = cs.getOutputStream();
InputStream in = cs.getInputStream();
int id = in.read();
int command = in.read();
boolean err = false;
if (command == 1) {
// Locked
if (lockedID != 0) {
err = true;
System.out.println(getTime(startTime) + " ERROR: id " + id + " got lock, but " + lockedID + " already holds the lock");
}
lockedID = id;
} else if (command == 0) {
if (lockedID != id) {
err = true;
System.out.println(getTime(startTime) + " ERROR: id " + id + " released the lock, but " + lockedID + " is the one holding the lock");
}
lockedID = 0;
} else
throw new RuntimeException("unrecognized command " + command);
System.out.print(".");
if (err)
out.write(1);
else
out.write(0);
out.close();
in.close();
cs.close();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/LockVerifyServer.java | Java | art | 2,877 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.ArrayList;
import java.io.Serializable;
/** For Lucene internal use */
public class RAMFile implements Serializable {
private static final long serialVersionUID = 1l;
protected ArrayList<byte[]> buffers = new ArrayList<byte[]>();
long length;
RAMDirectory directory;
protected long sizeInBytes;
// This is publicly modifiable via Directory.touchFile(), so direct access not supported
private long lastModified = System.currentTimeMillis();
// File used as buffer, in no RAMDirectory
protected RAMFile() {}
RAMFile(RAMDirectory directory) {
this.directory = directory;
}
// For non-stream access from thread that might be concurrent with writing
public synchronized long getLength() {
return length;
}
protected synchronized void setLength(long length) {
this.length = length;
}
// For non-stream access from thread that might be concurrent with writing
public synchronized long getLastModified() {
return lastModified;
}
protected synchronized void setLastModified(long lastModified) {
this.lastModified = lastModified;
}
protected final byte[] addBuffer(int size) {
byte[] buffer = newBuffer(size);
synchronized(this) {
buffers.add(buffer);
sizeInBytes += size;
}
if (directory != null) {
synchronized(directory) {
directory.sizeInBytes += size;
}
}
return buffer;
}
protected final synchronized byte[] getBuffer(int index) {
return buffers.get(index);
}
protected final synchronized int numBuffers() {
return buffers.size();
}
/**
* Expert: allocate a new buffer.
* Subclasses can allocate differently.
* @param size size of allocated buffer.
* @return allocated buffer.
*/
protected byte[] newBuffer(int size) {
return new byte[size];
}
public synchronized long getSizeInBytes() {
return sizeInBytes;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/RAMFile.java | Java | art | 2,757 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** Base implementation class for buffered {@link IndexOutput}. */
public abstract class BufferedIndexOutput extends IndexOutput {
static final int BUFFER_SIZE = 16384;
private final byte[] buffer = new byte[BUFFER_SIZE];
private long bufferStart = 0; // position in file of buffer
private int bufferPosition = 0; // position in buffer
/** Writes a single byte.
* @see IndexInput#readByte()
*/
@Override
public void writeByte(byte b) throws IOException {
if (bufferPosition >= BUFFER_SIZE)
flush();
buffer[bufferPosition++] = b;
}
/** Writes an array of bytes.
* @param b the bytes to write
* @param length the number of bytes to write
* @see IndexInput#readBytes(byte[],int,int)
*/
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
int bytesLeft = BUFFER_SIZE - bufferPosition;
// is there enough space in the buffer?
if (bytesLeft >= length) {
// we add the data to the end of the buffer
System.arraycopy(b, offset, buffer, bufferPosition, length);
bufferPosition += length;
// if the buffer is full, flush it
if (BUFFER_SIZE - bufferPosition == 0)
flush();
} else {
// is data larger then buffer?
if (length > BUFFER_SIZE) {
// we flush the buffer
if (bufferPosition > 0)
flush();
// and write data at once
flushBuffer(b, offset, length);
bufferStart += length;
} else {
// we fill/flush the buffer (until the input is written)
int pos = 0; // position in the input data
int pieceLength;
while (pos < length) {
pieceLength = (length - pos < bytesLeft) ? length - pos : bytesLeft;
System.arraycopy(b, pos + offset, buffer, bufferPosition, pieceLength);
pos += pieceLength;
bufferPosition += pieceLength;
// if the buffer is full, flush it
bytesLeft = BUFFER_SIZE - bufferPosition;
if (bytesLeft == 0) {
flush();
bytesLeft = BUFFER_SIZE;
}
}
}
}
}
/** Forces any buffered output to be written. */
@Override
public void flush() throws IOException {
flushBuffer(buffer, bufferPosition);
bufferStart += bufferPosition;
bufferPosition = 0;
}
/** Expert: implements buffer write. Writes bytes at the current position in
* the output.
* @param b the bytes to write
* @param len the number of bytes to write
*/
private void flushBuffer(byte[] b, int len) throws IOException {
flushBuffer(b, 0, len);
}
/** Expert: implements buffer write. Writes bytes at the current position in
* the output.
* @param b the bytes to write
* @param offset the offset in the byte array
* @param len the number of bytes to write
*/
protected abstract void flushBuffer(byte[] b, int offset, int len) throws IOException;
/** Closes this stream to further operations. */
@Override
public void close() throws IOException {
flush();
}
/** Returns the current position in this file, where the next write will
* occur.
* @see #seek(long)
*/
@Override
public long getFilePointer() {
return bufferStart + bufferPosition;
}
/** Sets current position in this file, where the next write will occur.
* @see #getFilePointer()
*/
@Override
public void seek(long pos) throws IOException {
flush();
bufferStart = pos;
}
/** The number of bytes in the file. */
@Override
public abstract long length() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/BufferedIndexOutput.java | Java | art | 4,473 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Set;
/**
* Expert: A Directory instance that switches files between
* two other Directory instances.
* <p>Files with the specified extensions are placed in the
* primary directory; others are placed in the secondary
* directory. The provided Set must not change once passed
* to this class, and must allow multiple threads to call
* contains at once.</p>
*
* <p><b>NOTE</b>: this API is new and experimental and is
* subject to suddenly change in the next release.
*/
public class FileSwitchDirectory extends Directory {
private final Directory secondaryDir;
private final Directory primaryDir;
private final Set<String> primaryExtensions;
private boolean doClose;
public FileSwitchDirectory(Set<String> primaryExtensions, Directory primaryDir, Directory secondaryDir, boolean doClose) {
this.primaryExtensions = primaryExtensions;
this.primaryDir = primaryDir;
this.secondaryDir = secondaryDir;
this.doClose = doClose;
this.lockFactory = primaryDir.getLockFactory();
}
/** Return the primary directory */
public Directory getPrimaryDir() {
return primaryDir;
}
/** Return the secondary directory */
public Directory getSecondaryDir() {
return secondaryDir;
}
@Override
public void close() throws IOException {
if (doClose) {
try {
secondaryDir.close();
} finally {
primaryDir.close();
}
doClose = false;
}
}
@Override
public String[] listAll() throws IOException {
String[] primaryFiles = primaryDir.listAll();
String[] secondaryFiles = secondaryDir.listAll();
String[] files = new String[primaryFiles.length + secondaryFiles.length];
System.arraycopy(primaryFiles, 0, files, 0, primaryFiles.length);
System.arraycopy(secondaryFiles, 0, files, primaryFiles.length, secondaryFiles.length);
return files;
}
/** Utility method to return a file's extension. */
public static String getExtension(String name) {
int i = name.lastIndexOf('.');
if (i == -1) {
return "";
}
return name.substring(i+1, name.length());
}
private Directory getDirectory(String name) {
String ext = getExtension(name);
if (primaryExtensions.contains(ext)) {
return primaryDir;
} else {
return secondaryDir;
}
}
@Override
public boolean fileExists(String name) throws IOException {
return getDirectory(name).fileExists(name);
}
@Override
public long fileModified(String name) throws IOException {
return getDirectory(name).fileModified(name);
}
@Override
public void touchFile(String name) throws IOException {
getDirectory(name).touchFile(name);
}
@Override
public void deleteFile(String name) throws IOException {
getDirectory(name).deleteFile(name);
}
@Override
public long fileLength(String name) throws IOException {
return getDirectory(name).fileLength(name);
}
@Override
public IndexOutput createOutput(String name) throws IOException {
return getDirectory(name).createOutput(name);
}
@Override
public void sync(String name) throws IOException {
getDirectory(name).sync(name);
}
@Override
public IndexInput openInput(String name) throws IOException {
return getDirectory(name).openInput(name);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/FileSwitchDirectory.java | Java | art | 4,172 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to You under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
/**
* An {@link FSDirectory} implementation that uses
* java.nio's FileChannel's positional read, which allows
* multiple threads to read from the same file without
* synchronizing.
*
* <p>This class only uses FileChannel when reading; writing
* is achieved with {@link SimpleFSDirectory.SimpleFSIndexOutput}.
*
* <p><b>NOTE</b>: NIOFSDirectory is not recommended on Windows because of a bug
* in how FileChannel.read is implemented in Sun's JRE.
* Inside of the implementation the position is apparently
* synchronized. See <a
* href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6265734">here</a>
* for details.
*/
public class NIOFSDirectory extends FSDirectory {
/** Create a new NIOFSDirectory for the named location.
*
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
public NIOFSDirectory(File path, LockFactory lockFactory) throws IOException {
super(path, lockFactory);
}
/** Create a new NIOFSDirectory for the named location and {@link NativeFSLockFactory}.
*
* @param path the path of the directory
* @throws IOException
*/
public NIOFSDirectory(File path) throws IOException {
super(path, null);
}
/** Creates an IndexInput for the file with the given name. */
@Override
public IndexInput openInput(String name, int bufferSize) throws IOException {
ensureOpen();
return new NIOFSIndexInput(new File(getFile(), name), bufferSize, getReadChunkSize());
}
/** Creates an IndexOutput for the file with the given name. */
@Override
public IndexOutput createOutput(String name) throws IOException {
initOutput(name);
return new SimpleFSDirectory.SimpleFSIndexOutput(new File(directory, name));
}
protected static class NIOFSIndexInput extends SimpleFSDirectory.SimpleFSIndexInput {
private ByteBuffer byteBuf; // wraps the buffer for NIO
private byte[] otherBuffer;
private ByteBuffer otherByteBuf;
final FileChannel channel;
public NIOFSIndexInput(File path, int bufferSize, int chunkSize) throws IOException {
super(path, bufferSize, chunkSize);
channel = file.getChannel();
}
@Override
protected void newBuffer(byte[] newBuffer) {
super.newBuffer(newBuffer);
byteBuf = ByteBuffer.wrap(newBuffer);
}
@Override
public void close() throws IOException {
if (!isClone && file.isOpen) {
// Close the channel & file
try {
channel.close();
} finally {
file.close();
}
}
}
@Override
protected void readInternal(byte[] b, int offset, int len) throws IOException {
final ByteBuffer bb;
// Determine the ByteBuffer we should use
if (b == buffer && 0 == offset) {
// Use our own pre-wrapped byteBuf:
assert byteBuf != null;
byteBuf.clear();
byteBuf.limit(len);
bb = byteBuf;
} else {
if (offset == 0) {
if (otherBuffer != b) {
// Now wrap this other buffer; with compound
// file, we are repeatedly called with its
// buffer, so we wrap it once and then re-use it
// on subsequent calls
otherBuffer = b;
otherByteBuf = ByteBuffer.wrap(b);
} else
otherByteBuf.clear();
otherByteBuf.limit(len);
bb = otherByteBuf;
} else {
// Always wrap when offset != 0
bb = ByteBuffer.wrap(b, offset, len);
}
}
int readOffset = bb.position();
int readLength = bb.limit() - readOffset;
assert readLength == len;
long pos = getFilePointer();
try {
while (readLength > 0) {
final int limit;
if (readLength > chunkSize) {
// LUCENE-1566 - work around JVM Bug by breaking
// very large reads into chunks
limit = readOffset + chunkSize;
} else {
limit = readOffset + readLength;
}
bb.limit(limit);
int i = channel.read(bb, pos);
if (i == -1) {
throw new IOException("read past EOF");
}
pos += i;
readOffset += i;
readLength -= i;
}
} catch (OutOfMemoryError e) {
// propagate OOM up and add a hint for 32bit VM Users hitting the bug
// with a large chunk size in the fast path.
final OutOfMemoryError outOfMemoryError = new OutOfMemoryError(
"OutOfMemoryError likely caused by the Sun VM Bug described in "
+ "https://issues.apache.org/jira/browse/LUCENE-1566; try calling FSDirectory.setReadChunkSize "
+ "with a a value smaller than the current chunk size (" + chunkSize + ")");
outOfMemoryError.initCause(e);
throw outOfMemoryError;
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/NIOFSDirectory.java | Java | art | 5,910 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
import java.util.Map;
import org.apache.lucene.util.UnicodeUtil;
/** Abstract base class for output to a file in a Directory. A random-access
* output stream. Used for all Lucene index output operations.
* @see Directory
* @see IndexInput
*/
public abstract class IndexOutput implements Closeable {
/** Writes a single byte.
* @see IndexInput#readByte()
*/
public abstract void writeByte(byte b) throws IOException;
/** Writes an array of bytes.
* @param b the bytes to write
* @param length the number of bytes to write
* @see IndexInput#readBytes(byte[],int,int)
*/
public void writeBytes(byte[] b, int length) throws IOException {
writeBytes(b, 0, length);
}
/** Writes an array of bytes.
* @param b the bytes to write
* @param offset the offset in the byte array
* @param length the number of bytes to write
* @see IndexInput#readBytes(byte[],int,int)
*/
public abstract void writeBytes(byte[] b, int offset, int length) throws IOException;
/** Writes an int as four bytes.
* @see IndexInput#readInt()
*/
public void writeInt(int i) throws IOException {
writeByte((byte)(i >> 24));
writeByte((byte)(i >> 16));
writeByte((byte)(i >> 8));
writeByte((byte) i);
}
/** Writes an int in a variable-length format. Writes between one and
* five bytes. Smaller values take fewer bytes. Negative numbers are not
* supported.
* @see IndexInput#readVInt()
*/
public void writeVInt(int i) throws IOException {
while ((i & ~0x7F) != 0) {
writeByte((byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte((byte)i);
}
/** Writes a long as eight bytes.
* @see IndexInput#readLong()
*/
public void writeLong(long i) throws IOException {
writeInt((int) (i >> 32));
writeInt((int) i);
}
/** Writes an long in a variable-length format. Writes between one and five
* bytes. Smaller values take fewer bytes. Negative numbers are not
* supported.
* @see IndexInput#readVLong()
*/
public void writeVLong(long i) throws IOException {
while ((i & ~0x7F) != 0) {
writeByte((byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte((byte)i);
}
/** Writes a string.
* @see IndexInput#readString()
*/
public void writeString(String s) throws IOException {
final UnicodeUtil.UTF8Result utf8Result = new UnicodeUtil.UTF8Result();
UnicodeUtil.UTF16toUTF8(s, 0, s.length(), utf8Result);
writeVInt(utf8Result.length);
writeBytes(utf8Result.result, 0, utf8Result.length);
}
/** Writes a sub sequence of characters from s as the old
* format (modified UTF-8 encoded bytes).
* @param s the source of the characters
* @param start the first character in the sequence
* @param length the number of characters in the sequence
* @deprecated -- please pre-convert to utf8 bytes
* instead or use {@link #writeString}
*/
public void writeChars(String s, int start, int length)
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
final int code = (int)s.charAt(i);
if (code >= 0x01 && code <= 0x7F)
writeByte((byte)code);
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
writeByte((byte)(0xC0 | (code >> 6)));
writeByte((byte)(0x80 | (code & 0x3F)));
} else {
writeByte((byte)(0xE0 | (code >>> 12)));
writeByte((byte)(0x80 | ((code >> 6) & 0x3F)));
writeByte((byte)(0x80 | (code & 0x3F)));
}
}
}
/** Writes a sub sequence of characters from char[] as
* the old format (modified UTF-8 encoded bytes).
* @param s the source of the characters
* @param start the first character in the sequence
* @param length the number of characters in the sequence
* @deprecated -- please pre-convert to utf8 bytes instead or use {@link #writeString}
*/
public void writeChars(char[] s, int start, int length)
throws IOException {
final int end = start + length;
for (int i = start; i < end; i++) {
final int code = (int)s[i];
if (code >= 0x01 && code <= 0x7F)
writeByte((byte)code);
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
writeByte((byte)(0xC0 | (code >> 6)));
writeByte((byte)(0x80 | (code & 0x3F)));
} else {
writeByte((byte)(0xE0 | (code >>> 12)));
writeByte((byte)(0x80 | ((code >> 6) & 0x3F)));
writeByte((byte)(0x80 | (code & 0x3F)));
}
}
}
private static int COPY_BUFFER_SIZE = 16384;
private byte[] copyBuffer;
/** Copy numBytes bytes from input to ourself. */
public void copyBytes(IndexInput input, long numBytes) throws IOException {
assert numBytes >= 0: "numBytes=" + numBytes;
long left = numBytes;
if (copyBuffer == null)
copyBuffer = new byte[COPY_BUFFER_SIZE];
while(left > 0) {
final int toCopy;
if (left > COPY_BUFFER_SIZE)
toCopy = COPY_BUFFER_SIZE;
else
toCopy = (int) left;
input.readBytes(copyBuffer, 0, toCopy);
writeBytes(copyBuffer, 0, toCopy);
left -= toCopy;
}
}
/** Forces any buffered output to be written. */
public abstract void flush() throws IOException;
/** Closes this stream to further operations. */
public abstract void close() throws IOException;
/** Returns the current position in this file, where the next write will
* occur.
* @see #seek(long)
*/
public abstract long getFilePointer();
/** Sets current position in this file, where the next write will occur.
* @see #getFilePointer()
*/
public abstract void seek(long pos) throws IOException;
/** The number of bytes in the file. */
public abstract long length() throws IOException;
/** Set the file length. By default, this method does
* nothing (it's optional for a Directory to implement
* it). But, certain Directory implementations (for
* example @see FSDirectory) can use this to inform the
* underlying IO system to pre-allocate the file to the
* specified size. If the length is longer than the
* current file length, the bytes added to the file are
* undefined. Otherwise the file is truncated.
* @param length file length
*/
public void setLength(long length) throws IOException {};
public void writeStringStringMap(Map<String,String> map) throws IOException {
if (map == null) {
writeInt(0);
} else {
writeInt(map.size());
for(final Map.Entry<String, String> entry: map.entrySet()) {
writeString(entry.getKey());
writeString(entry.getValue());
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/IndexOutput.java | Java | art | 7,464 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.ThreadInterruptedException;
import java.io.IOException;
/** An interprocess mutex lock.
* <p>Typical use might look like:<pre>
* new Lock.With(directory.makeLock("my.lock")) {
* public Object doBody() {
* <i>... code to execute while locked ...</i>
* }
* }.run();
* </pre>
*
* @see Directory#makeLock(String)
*/
public abstract class Lock {
/** How long {@link #obtain(long)} waits, in milliseconds,
* in between attempts to acquire the lock. */
public static long LOCK_POLL_INTERVAL = 1000;
/** Pass this value to {@link #obtain(long)} to try
* forever to obtain the lock. */
public static final long LOCK_OBTAIN_WAIT_FOREVER = -1;
/** Attempts to obtain exclusive access and immediately return
* upon success or failure.
* @return true iff exclusive access is obtained
*/
public abstract boolean obtain() throws IOException;
/**
* If a lock obtain called, this failureReason may be set
* with the "root cause" Exception as to why the lock was
* not obtained.
*/
protected Throwable failureReason;
/** Attempts to obtain an exclusive lock within amount of
* time given. Polls once per {@link #LOCK_POLL_INTERVAL}
* (currently 1000) milliseconds until lockWaitTimeout is
* passed.
* @param lockWaitTimeout length of time to wait in
* milliseconds or {@link
* #LOCK_OBTAIN_WAIT_FOREVER} to retry forever
* @return true if lock was obtained
* @throws LockObtainFailedException if lock wait times out
* @throws IllegalArgumentException if lockWaitTimeout is
* out of bounds
* @throws IOException if obtain() throws IOException
*/
public boolean obtain(long lockWaitTimeout) throws LockObtainFailedException, IOException {
failureReason = null;
boolean locked = obtain();
if (lockWaitTimeout < 0 && lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER)
throw new IllegalArgumentException("lockWaitTimeout should be LOCK_OBTAIN_WAIT_FOREVER or a non-negative number (got " + lockWaitTimeout + ")");
long maxSleepCount = lockWaitTimeout / LOCK_POLL_INTERVAL;
long sleepCount = 0;
while (!locked) {
if (lockWaitTimeout != LOCK_OBTAIN_WAIT_FOREVER && sleepCount++ >= maxSleepCount) {
String reason = "Lock obtain timed out: " + this.toString();
if (failureReason != null) {
reason += ": " + failureReason;
}
LockObtainFailedException e = new LockObtainFailedException(reason);
if (failureReason != null) {
e.initCause(failureReason);
}
throw e;
}
try {
Thread.sleep(LOCK_POLL_INTERVAL);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
locked = obtain();
}
return locked;
}
/** Releases exclusive access. */
public abstract void release() throws IOException;
/** Returns true if the resource is currently locked. Note that one must
* still call {@link #obtain()} before using the resource. */
public abstract boolean isLocked() throws IOException;
/** Utility class for executing code with exclusive access. */
public abstract static class With {
private Lock lock;
private long lockWaitTimeout;
/** Constructs an executor that will grab the named lock. */
public With(Lock lock, long lockWaitTimeout) {
this.lock = lock;
this.lockWaitTimeout = lockWaitTimeout;
}
/** Code to execute with exclusive access. */
protected abstract Object doBody() throws IOException;
/** Calls {@link #doBody} while <i>lock</i> is obtained. Blocks if lock
* cannot be obtained immediately. Retries to obtain lock once per second
* until it is obtained, or until it has tried ten times. Lock is released when
* {@link #doBody} exits.
* @throws LockObtainFailedException if lock could not
* be obtained
* @throws IOException if {@link Lock#obtain} throws IOException
*/
public Object run() throws LockObtainFailedException, IOException {
boolean locked = false;
try {
locked = lock.obtain(lockWaitTimeout);
return doBody();
} finally {
if (locked)
lock.release();
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/Lock.java | Java | art | 5,118 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
/**
* This exception is thrown when you try to list a
* non-existent directory.
*/
public class NoSuchDirectoryException extends FileNotFoundException {
public NoSuchDirectoryException(String message) {
super(message);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/NoSuchDirectoryException.java | Java | art | 1,112 |
package org.apache.lucene.store;
import java.io.IOException;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* A memory-resident {@link IndexInput} implementation.
*/
class RAMInputStream extends IndexInput implements Cloneable {
static final int BUFFER_SIZE = RAMOutputStream.BUFFER_SIZE;
private RAMFile file;
private long length;
private byte[] currentBuffer;
private int currentBufferIndex;
private int bufferPosition;
private long bufferStart;
private int bufferLength;
RAMInputStream(RAMFile f) throws IOException {
file = f;
length = file.length;
if (length/BUFFER_SIZE >= Integer.MAX_VALUE) {
throw new IOException("Too large RAMFile! "+length);
}
// make sure that we switch to the
// first needed buffer lazily
currentBufferIndex = -1;
currentBuffer = null;
}
@Override
public void close() {
// nothing to do here
}
@Override
public long length() {
return length;
}
@Override
public byte readByte() throws IOException {
if (bufferPosition >= bufferLength) {
currentBufferIndex++;
switchCurrentBuffer(true);
}
return currentBuffer[bufferPosition++];
}
@Override
public void readBytes(byte[] b, int offset, int len) throws IOException {
while (len > 0) {
if (bufferPosition >= bufferLength) {
currentBufferIndex++;
switchCurrentBuffer(true);
}
int remainInBuffer = bufferLength - bufferPosition;
int bytesToCopy = len < remainInBuffer ? len : remainInBuffer;
System.arraycopy(currentBuffer, bufferPosition, b, offset, bytesToCopy);
offset += bytesToCopy;
len -= bytesToCopy;
bufferPosition += bytesToCopy;
}
}
private final void switchCurrentBuffer(boolean enforceEOF) throws IOException {
if (currentBufferIndex >= file.numBuffers()) {
// end of file reached, no more buffers left
if (enforceEOF)
throw new IOException("Read past EOF");
else {
// Force EOF if a read takes place at this position
currentBufferIndex--;
bufferPosition = BUFFER_SIZE;
}
} else {
currentBuffer = file.getBuffer(currentBufferIndex);
bufferPosition = 0;
bufferStart = (long) BUFFER_SIZE * (long) currentBufferIndex;
long buflen = length - bufferStart;
bufferLength = buflen > BUFFER_SIZE ? BUFFER_SIZE : (int) buflen;
}
}
@Override
public long getFilePointer() {
return currentBufferIndex < 0 ? 0 : bufferStart + bufferPosition;
}
@Override
public void seek(long pos) throws IOException {
if (currentBuffer==null || pos < bufferStart || pos >= bufferStart + BUFFER_SIZE) {
currentBufferIndex = (int) (pos / BUFFER_SIZE);
switchCurrentBuffer(false);
}
bufferPosition = (int) (pos % BUFFER_SIZE);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/RAMInputStream.java | Java | art | 3,594 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import org.apache.lucene.util.ThreadInterruptedException;
import org.apache.lucene.util.Constants;
/**
* <a name="subclasses"/>
* Base class for Directory implementations that store index
* files in the file system. There are currently three core
* subclasses:
*
* <ul>
*
* <li> {@link SimpleFSDirectory} is a straightforward
* implementation using java.io.RandomAccessFile.
* However, it has poor concurrent performance
* (multiple threads will bottleneck) as it
* synchronizes when multiple threads read from the
* same file.
*
* <li> {@link NIOFSDirectory} uses java.nio's
* FileChannel's positional io when reading to avoid
* synchronization when reading from the same file.
* Unfortunately, due to a Windows-only <a
* href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6265734">Sun
* JRE bug</a> this is a poor choice for Windows, but
* on all other platforms this is the preferred
* choice.
*
* <li> {@link MMapDirectory} uses memory-mapped IO when
* reading. This is a good choice if you have plenty
* of virtual memory relative to your index size, eg
* if you are running on a 64 bit JRE, or you are
* running on a 32 bit JRE but your index sizes are
* small enough to fit into the virtual memory space.
* Java has currently the limitation of not being able to
* unmap files from user code. The files are unmapped, when GC
* releases the byte buffers. Due to
* <a href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4724038">
* this bug</a> in Sun's JRE, MMapDirectory's {@link IndexInput#close}
* is unable to close the underlying OS file handle. Only when
* GC finally collects the underlying objects, which could be
* quite some time later, will the file handle be closed.
* This will consume additional transient disk usage: on Windows,
* attempts to delete or overwrite the files will result in an
* exception; on other platforms, which typically have a "delete on
* last close" semantics, while such operations will succeed, the bytes
* are still consuming space on disk. For many applications this
* limitation is not a problem (e.g. if you have plenty of disk space,
* and you don't rely on overwriting files on Windows) but it's still
* an important limitation to be aware of. This class supplies a
* (possibly dangerous) workaround mentioned in the bug report,
* which may fail on non-Sun JVMs.
* </ul>
*
* Unfortunately, because of system peculiarities, there is
* no single overall best implementation. Therefore, we've
* added the {@link #open} method, to allow Lucene to choose
* the best FSDirectory implementation given your
* environment, and the known limitations of each
* implementation. For users who have no reason to prefer a
* specific implementation, it's best to simply use {@link
* #open}. For all others, you should instantiate the
* desired implementation directly.
*
* <p>The locking implementation is by default {@link
* NativeFSLockFactory}, but can be changed by
* passing in a custom {@link LockFactory} instance.
*
* @see Directory
*/
public abstract class FSDirectory extends Directory {
private static MessageDigest DIGESTER;
static {
try {
DIGESTER = MessageDigest.getInstance("MD5");
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e.toString(), e);
}
}
// returns the canonical version of the directory, creating it if it doesn't exist.
private static File getCanonicalPath(File file) throws IOException {
return new File(file.getCanonicalPath());
}
private boolean checked;
final void createDir() throws IOException {
if (!checked) {
if (!directory.exists())
if (!directory.mkdirs())
throw new IOException("Cannot create directory: " + directory);
checked = true;
}
}
/** Initializes the directory to create a new file with the given name.
* This method should be used in {@link #createOutput}. */
protected final void initOutput(String name) throws IOException {
ensureOpen();
createDir();
File file = new File(directory, name);
if (file.exists() && !file.delete()) // delete existing, if any
throw new IOException("Cannot overwrite: " + file);
}
/** The underlying filesystem directory */
protected File directory = null;
/** Create a new FSDirectory for the named location (ctor for subclasses).
* @param path the path of the directory
* @param lockFactory the lock factory to use, or null for the default
* ({@link NativeFSLockFactory});
* @throws IOException
*/
protected FSDirectory(File path, LockFactory lockFactory) throws IOException {
path = getCanonicalPath(path);
// new ctors use always NativeFSLockFactory as default:
if (lockFactory == null) {
lockFactory = new NativeFSLockFactory();
}
directory = path;
if (directory.exists() && !directory.isDirectory())
throw new NoSuchDirectoryException("file '" + directory + "' exists but is not a directory");
setLockFactory(lockFactory);
// for filesystem based LockFactory, delete the lockPrefix, if the locks are placed
// in index dir. If no index dir is given, set ourselves
if (lockFactory instanceof FSLockFactory) {
final FSLockFactory lf = (FSLockFactory) lockFactory;
final File dir = lf.getLockDir();
// if the lock factory has no lockDir set, use the this directory as lockDir
if (dir == null) {
lf.setLockDir(this.directory);
lf.setLockPrefix(null);
} else if (dir.getCanonicalPath().equals(this.directory.getCanonicalPath())) {
lf.setLockPrefix(null);
}
}
}
/** Creates an FSDirectory instance, trying to pick the
* best implementation given the current environment.
* The directory returned uses the {@link NativeFSLockFactory}.
*
* <p>Currently this returns {@link NIOFSDirectory}
* on non-Windows JREs and {@link SimpleFSDirectory}
* on Windows.
*
* <p><b>NOTE</b>: this method may suddenly change which
* implementation is returned from release to release, in
* the event that higher performance defaults become
* possible; if the precise implementation is important to
* your application, please instantiate it directly,
* instead. On 64 bit systems, it may also good to
* return {@link MMapDirectory}, but this is disabled
* because of officially missing unmap support in Java.
* For optimal performance you should consider using
* this implementation on 64 bit JVMs.
*
* <p>See <a href="#subclasses">above</a> */
public static FSDirectory open(File path) throws IOException {
return open(path, null);
}
/** Just like {@link #open(File)}, but allows you to
* also specify a custom {@link LockFactory}. */
public static FSDirectory open(File path, LockFactory lockFactory) throws IOException {
/* For testing:
MMapDirectory dir=new MMapDirectory(path, lockFactory);
dir.setUseUnmap(true);
return dir;
*/
if (Constants.WINDOWS) {
return new SimpleFSDirectory(path, lockFactory);
} else {
return new NIOFSDirectory(path, lockFactory);
}
}
/** Lists all files (not subdirectories) in the
* directory. This method never returns null (throws
* {@link IOException} instead).
*
* @throws NoSuchDirectoryException if the directory
* does not exist, or does exist but is not a
* directory.
* @throws IOException if list() returns null */
public static String[] listAll(File dir) throws IOException {
if (!dir.exists())
throw new NoSuchDirectoryException("directory '" + dir + "' does not exist");
else if (!dir.isDirectory())
throw new NoSuchDirectoryException("file '" + dir + "' exists but is not a directory");
// Exclude subdirs
String[] result = dir.list(new FilenameFilter() {
public boolean accept(File dir, String file) {
return !new File(dir, file).isDirectory();
}
});
if (result == null)
throw new IOException("directory '" + dir + "' exists and is a directory, but cannot be listed: list() returned null");
return result;
}
/** Lists all files (not subdirectories) in the
* directory.
* @see #listAll(File) */
@Override
public String[] listAll() throws IOException {
ensureOpen();
return listAll(directory);
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
ensureOpen();
File file = new File(directory, name);
return file.exists();
}
/** Returns the time the named file was last modified. */
@Override
public long fileModified(String name) {
ensureOpen();
File file = new File(directory, name);
return file.lastModified();
}
/** Returns the time the named file was last modified. */
public static long fileModified(File directory, String name) {
File file = new File(directory, name);
return file.lastModified();
}
/** Set the modified time of an existing file to now. */
@Override
public void touchFile(String name) {
ensureOpen();
File file = new File(directory, name);
file.setLastModified(System.currentTimeMillis());
}
/** Returns the length in bytes of a file in the directory. */
@Override
public long fileLength(String name) {
ensureOpen();
File file = new File(directory, name);
return file.length();
}
/** Removes an existing file in the directory. */
@Override
public void deleteFile(String name) throws IOException {
ensureOpen();
File file = new File(directory, name);
if (!file.delete())
throw new IOException("Cannot delete " + file);
}
@Override
public void sync(String name) throws IOException {
ensureOpen();
File fullFile = new File(directory, name);
boolean success = false;
int retryCount = 0;
IOException exc = null;
while(!success && retryCount < 5) {
retryCount++;
RandomAccessFile file = null;
try {
try {
file = new RandomAccessFile(fullFile, "rw");
file.getFD().sync();
success = true;
} finally {
if (file != null)
file.close();
}
} catch (IOException ioe) {
if (exc == null)
exc = ioe;
try {
// Pause 5 msec
Thread.sleep(5);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
if (!success)
// Throw original exception
throw exc;
}
// Inherit javadoc
@Override
public IndexInput openInput(String name) throws IOException {
ensureOpen();
return openInput(name, BufferedIndexInput.BUFFER_SIZE);
}
/**
* So we can do some byte-to-hexchar conversion below
*/
private static final char[] HEX_DIGITS =
{'0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
@Override
public String getLockID() {
ensureOpen();
String dirName; // name to be hashed
try {
dirName = directory.getCanonicalPath();
} catch (IOException e) {
throw new RuntimeException(e.toString(), e);
}
byte digest[];
synchronized (DIGESTER) {
digest = DIGESTER.digest(dirName.getBytes());
}
StringBuilder buf = new StringBuilder();
buf.append("lucene-");
for (int i = 0; i < digest.length; i++) {
int b = digest[i];
buf.append(HEX_DIGITS[(b >> 4) & 0xf]);
buf.append(HEX_DIGITS[b & 0xf]);
}
return buf.toString();
}
/** Closes the store to future operations. */
@Override
public synchronized void close() {
isOpen = false;
}
public File getFile() {
ensureOpen();
return directory;
}
/** For debug output. */
@Override
public String toString() {
return this.getClass().getName() + "@" + directory;
}
/**
* Default read chunk size. This is a conditional
* default: on 32bit JVMs, it defaults to 100 MB. On
* 64bit JVMs, it's <code>Integer.MAX_VALUE</code>.
* @see #setReadChunkSize
*/
public static final int DEFAULT_READ_CHUNK_SIZE = Constants.JRE_IS_64BIT ? Integer.MAX_VALUE: 100 * 1024 * 1024;
// LUCENE-1566
private int chunkSize = DEFAULT_READ_CHUNK_SIZE;
/**
* Sets the maximum number of bytes read at once from the
* underlying file during {@link IndexInput#readBytes}.
* The default value is {@link #DEFAULT_READ_CHUNK_SIZE};
*
* <p> This was introduced due to <a
* href="http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6478546">Sun
* JVM Bug 6478546</a>, which throws an incorrect
* OutOfMemoryError when attempting to read too many bytes
* at once. It only happens on 32bit JVMs with a large
* maximum heap size.</p>
*
* <p>Changes to this value will not impact any
* already-opened {@link IndexInput}s. You should call
* this before attempting to open an index on the
* directory.</p>
*
* <p> <b>NOTE</b>: This value should be as large as
* possible to reduce any possible performance impact. If
* you still encounter an incorrect OutOfMemoryError,
* trying lowering the chunk size.</p>
*/
public final void setReadChunkSize(int chunkSize) {
// LUCENE-1566
if (chunkSize <= 0) {
throw new IllegalArgumentException("chunkSize must be positive");
}
if (!Constants.JRE_IS_64BIT) {
this.chunkSize = chunkSize;
}
}
/**
* The maximum number of bytes to read at once from the
* underlying file during {@link IndexInput#readBytes}.
* @see #setReadChunkSize
*/
public final int getReadChunkSize() {
// LUCENE-1566
return chunkSize;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/FSDirectory.java | Java | art | 14,984 |
package org.apache.lucene.store;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
/** Writes bytes through to a primary IndexOutput, computing
* checksum. Note that you cannot use seek().*/
public class ChecksumIndexOutput extends IndexOutput {
IndexOutput main;
Checksum digest;
public ChecksumIndexOutput(IndexOutput main) {
this.main = main;
digest = new CRC32();
}
@Override
public void writeByte(byte b) throws IOException {
digest.update(b);
main.writeByte(b);
}
@Override
public void writeBytes(byte[] b, int offset, int length) throws IOException {
digest.update(b, offset, length);
main.writeBytes(b, offset, length);
}
public long getChecksum() {
return digest.getValue();
}
@Override
public void flush() throws IOException {
main.flush();
}
@Override
public void close() throws IOException {
main.close();
}
@Override
public long getFilePointer() {
return main.getFilePointer();
}
@Override
public void seek(long pos) {
throw new RuntimeException("not allowed");
}
/**
* Starts but does not complete the commit of this file (=
* writing of the final checksum at the end). After this
* is called must call {@link #finishCommit} and the
* {@link #close} to complete the commit.
*/
public void prepareCommit() throws IOException {
final long checksum = getChecksum();
// Intentionally write a mismatched checksum. This is
// because we want to 1) test, as best we can, that we
// are able to write a long to the file, but 2) not
// actually "commit" the file yet. This (prepare
// commit) is phase 1 of a two-phase commit.
final long pos = main.getFilePointer();
main.writeLong(checksum-1);
main.flush();
main.seek(pos);
}
/** See {@link #prepareCommit} */
public void finishCommit() throws IOException {
main.writeLong(getChecksum());
}
@Override
public long length() throws IOException {
return main.length();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/store/ChecksumIndexOutput.java | Java | art | 2,862 |
package org.apache.lucene.queryParser;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.Version;
/**
* A QueryParser which constructs queries to search multiple fields.
*
* @version $Revision: 829231 $
*/
public class MultiFieldQueryParser extends QueryParser
{
protected String[] fields;
protected Map<String,Float> boosts;
/**
* Creates a MultiFieldQueryParser.
* Allows passing of a map with term to Boost, and the boost to apply to each term.
*
* <p>It will, when parse(String query)
* is called, construct a query like this (assuming the query consists of
* two terms and you specify the two fields <code>title</code> and <code>body</code>):</p>
*
* <code>
* (title:term1 body:term1) (title:term2 body:term2)
* </code>
*
* <p>When setDefaultOperator(AND_OPERATOR) is set, the result will be:</p>
*
* <code>
* +(title:term1 body:term1) +(title:term2 body:term2)
* </code>
*
* <p>When you pass a boost (title=>5 body=>10) you can get </p>
*
* <code>
* +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0)
* </code>
*
* <p>In other words, all the query's terms must appear, but it doesn't matter in
* what fields they appear.</p>
*/
public MultiFieldQueryParser(Version matchVersion, String[] fields, Analyzer analyzer, Map<String,Float> boosts) {
this(matchVersion, fields, analyzer);
this.boosts = boosts;
}
/**
* Creates a MultiFieldQueryParser.
*
* <p>It will, when parse(String query)
* is called, construct a query like this (assuming the query consists of
* two terms and you specify the two fields <code>title</code> and <code>body</code>):</p>
*
* <code>
* (title:term1 body:term1) (title:term2 body:term2)
* </code>
*
* <p>When setDefaultOperator(AND_OPERATOR) is set, the result will be:</p>
*
* <code>
* +(title:term1 body:term1) +(title:term2 body:term2)
* </code>
*
* <p>In other words, all the query's terms must appear, but it doesn't matter in
* what fields they appear.</p>
*/
public MultiFieldQueryParser(Version matchVersion, String[] fields, Analyzer analyzer) {
super(matchVersion, null, analyzer);
this.fields = fields;
}
protected Query getFieldQuery(String field, String queryText, int slop) throws ParseException {
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
Query q = super.getFieldQuery(fields[i], queryText);
if (q != null) {
//If the user passes a map of boosts
if (boosts != null) {
//Get the boost from the map and apply them
Float boost = boosts.get(fields[i]);
if (boost != null) {
q.setBoost(boost.floatValue());
}
}
applySlop(q,slop);
clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD));
}
}
if (clauses.size() == 0) // happens for stopwords
return null;
return getBooleanQuery(clauses, true);
}
Query q = super.getFieldQuery(field, queryText);
applySlop(q,slop);
return q;
}
private void applySlop(Query q, int slop) {
if (q instanceof PhraseQuery) {
((PhraseQuery) q).setSlop(slop);
} else if (q instanceof MultiPhraseQuery) {
((MultiPhraseQuery) q).setSlop(slop);
}
}
protected Query getFieldQuery(String field, String queryText) throws ParseException {
return getFieldQuery(field, queryText, 0);
}
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
{
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getFuzzyQuery(fields[i], termStr, minSimilarity),
BooleanClause.Occur.SHOULD));
}
return getBooleanQuery(clauses, true);
}
return super.getFuzzyQuery(field, termStr, minSimilarity);
}
protected Query getPrefixQuery(String field, String termStr) throws ParseException
{
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getPrefixQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD));
}
return getBooleanQuery(clauses, true);
}
return super.getPrefixQuery(field, termStr);
}
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getWildcardQuery(fields[i], termStr),
BooleanClause.Occur.SHOULD));
}
return getBooleanQuery(clauses, true);
}
return super.getWildcardQuery(field, termStr);
}
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
if (field == null) {
List<BooleanClause> clauses = new ArrayList<BooleanClause>();
for (int i = 0; i < fields.length; i++) {
clauses.add(new BooleanClause(getRangeQuery(fields[i], part1, part2, inclusive),
BooleanClause.Occur.SHOULD));
}
return getBooleanQuery(clauses, true);
}
return super.getRangeQuery(field, part1, part2, inclusive);
}
/**
* Parses a query which searches on the fields specified.
* <p>
* If x fields are specified, this effectively constructs:
* <pre>
* <code>
* (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
* </code>
* </pre>
* @param matchVersion Lucene version to match; this is passed through to QueryParser.
* @param queries Queries strings to parse
* @param fields Fields to search on
* @param analyzer Analyzer to use
* @throws ParseException if query parsing fails
* @throws IllegalArgumentException if the length of the queries array differs
* from the length of the fields array
*/
public static Query parse(Version matchVersion, String[] queries, String[] fields,
Analyzer analyzer) throws ParseException
{
if (queries.length != fields.length)
throw new IllegalArgumentException("queries.length != fields.length");
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.length; i++)
{
QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
Query q = qp.parse(queries[i]);
if (q!=null && // q never null, just being defensive
(!(q instanceof BooleanQuery) || ((BooleanQuery)q).getClauses().length>0)) {
bQuery.add(q, BooleanClause.Occur.SHOULD);
}
}
return bQuery;
}
/**
* Parses a query, searching on the fields specified.
* Use this if you need to specify certain fields as required,
* and others as prohibited.
* <p><pre>
* Usage:
* <code>
* String[] fields = {"filename", "contents", "description"};
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
* BooleanClause.Occur.MUST,
* BooleanClause.Occur.MUST_NOT};
* MultiFieldQueryParser.parse("query", fields, flags, analyzer);
* </code>
* </pre>
*<p>
* The code above would construct a query:
* <pre>
* <code>
* (filename:query) +(contents:query) -(description:query)
* </code>
* </pre>
*
* @param matchVersion Lucene version to match; this is passed through to QueryParser.
* @param query Query string to parse
* @param fields Fields to search on
* @param flags Flags describing the fields
* @param analyzer Analyzer to use
* @throws ParseException if query parsing fails
* @throws IllegalArgumentException if the length of the fields array differs
* from the length of the flags array
*/
public static Query parse(Version matchVersion, String query, String[] fields,
BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
if (fields.length != flags.length)
throw new IllegalArgumentException("fields.length != flags.length");
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.length; i++) {
QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
Query q = qp.parse(query);
if (q!=null && // q never null, just being defensive
(!(q instanceof BooleanQuery) || ((BooleanQuery)q).getClauses().length>0)) {
bQuery.add(q, flags[i]);
}
}
return bQuery;
}
/**
* Parses a query, searching on the fields specified.
* Use this if you need to specify certain fields as required,
* and others as prohibited.
* <p><pre>
* Usage:
* <code>
* String[] query = {"query1", "query2", "query3"};
* String[] fields = {"filename", "contents", "description"};
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
* BooleanClause.Occur.MUST,
* BooleanClause.Occur.MUST_NOT};
* MultiFieldQueryParser.parse(query, fields, flags, analyzer);
* </code>
* </pre>
*<p>
* The code above would construct a query:
* <pre>
* <code>
* (filename:query1) +(contents:query2) -(description:query3)
* </code>
* </pre>
*
* @param matchVersion Lucene version to match; this is passed through to QueryParser.
* @param queries Queries string to parse
* @param fields Fields to search on
* @param flags Flags describing the fields
* @param analyzer Analyzer to use
* @throws ParseException if query parsing fails
* @throws IllegalArgumentException if the length of the queries, fields,
* and flags array differ
*/
public static Query parse(Version matchVersion, String[] queries, String[] fields, BooleanClause.Occur[] flags,
Analyzer analyzer) throws ParseException
{
if (!(queries.length == fields.length && queries.length == flags.length))
throw new IllegalArgumentException("queries, fields, and flags array have have different length");
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.length; i++)
{
QueryParser qp = new QueryParser(matchVersion, fields[i], analyzer);
Query q = qp.parse(queries[i]);
if (q!=null && // q never null, just being defensive
(!(q instanceof BooleanQuery) || ((BooleanQuery)q).getClauses().length>0)) {
bQuery.add(q, flags[i]);
}
}
return bQuery;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/queryParser/MultiFieldQueryParser.java | Java | art | 11,731 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
A simple query parser implemented with JavaCC.
<p>Note that JavaCC defines lots of public classes, methods and fields
that do not need to be public. These clutter the documentation.
Sorry.
<p>Note that because JavaCC defines a class named <tt>Token</tt>, <tt>org.apache.lucene.analysis.Token</tt>
must always be fully qualified in source code in this package.
<p><b>NOTE</b>: contrib/queryparser has an alternative queryparser that matches the syntax of this one, but is more modular,
enabling substantial customization to how a query is created.
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/queryParser/package.html | HTML | art | 1,522 |
// FastCharStream.java
package org.apache.lucene.queryParser;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import java.io.*;
/** An efficient implementation of JavaCC's CharStream interface. <p>Note that
* this does not do line-number counting, but instead keeps track of the
* character position of the token in the input, as required by Lucene's {@link
* org.apache.lucene.analysis.Token} API.
* */
public final class FastCharStream implements CharStream {
char[] buffer = null;
int bufferLength = 0; // end of valid chars
int bufferPosition = 0; // next char to read
int tokenStart = 0; // offset in buffer
int bufferStart = 0; // position in file of buffer
Reader input; // source of chars
/** Constructs from a Reader. */
public FastCharStream(Reader r) {
input = r;
}
public final char readChar() throws IOException {
if (bufferPosition >= bufferLength)
refill();
return buffer[bufferPosition++];
}
private final void refill() throws IOException {
int newPosition = bufferLength - tokenStart;
if (tokenStart == 0) { // token won't fit in buffer
if (buffer == null) { // first time: alloc buffer
buffer = new char[2048];
} else if (bufferLength == buffer.length) { // grow buffer
char[] newBuffer = new char[buffer.length*2];
System.arraycopy(buffer, 0, newBuffer, 0, bufferLength);
buffer = newBuffer;
}
} else { // shift token to front
System.arraycopy(buffer, tokenStart, buffer, 0, newPosition);
}
bufferLength = newPosition; // update state
bufferPosition = newPosition;
bufferStart += tokenStart;
tokenStart = 0;
int charsRead = // fill space in buffer
input.read(buffer, newPosition, buffer.length-newPosition);
if (charsRead == -1)
throw new IOException("read past eof");
else
bufferLength += charsRead;
}
public final char BeginToken() throws IOException {
tokenStart = bufferPosition;
return readChar();
}
public final void backup(int amount) {
bufferPosition -= amount;
}
public final String GetImage() {
return new String(buffer, tokenStart, bufferPosition - tokenStart);
}
public final char[] GetSuffix(int len) {
char[] value = new char[len];
System.arraycopy(buffer, bufferPosition - len, value, 0, len);
return value;
}
public final void Done() {
try {
input.close();
} catch (IOException e) {
System.err.println("Caught: " + e + "; ignoring.");
}
}
public final int getColumn() {
return bufferStart + bufferPosition;
}
public final int getLine() {
return 1;
}
public final int getEndColumn() {
return bufferStart + bufferPosition;
}
public final int getEndLine() {
return 1;
}
public final int getBeginColumn() {
return bufferStart + tokenStart;
}
public final int getBeginLine() {
return 1;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/queryParser/FastCharStream.java | Java | art | 3,707 |
package org.apache.lucene.index;
import org.apache.lucene.util.StringHelper;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
A Term represents a word from text. This is the unit of search. It is
composed of two elements, the text of the word, as a string, and the name of
the field that the text occurred in, an interned string.
Note that terms may represent more than words from text fields, but also
things like dates, email addresses, urls, etc. */
public final class Term implements Comparable<Term>, java.io.Serializable {
String field;
String text;
/** Constructs a Term with the given field and text.
* <p>Note that a null field or null text value results in undefined
* behavior for most Lucene APIs that accept a Term parameter. */
public Term(String fld, String txt) {
field = StringHelper.intern(fld);
text = txt;
}
/** Constructs a Term with the given field and empty text.
* This serves two purposes: 1) reuse of a Term with the same field.
* 2) pattern for a query.
*
* @param fld
*/
public Term(String fld) {
this(fld, "", true);
}
Term(String fld, String txt, boolean intern) {
field = intern ? StringHelper.intern(fld) : fld; // field names are interned
text = txt; // unless already known to be
}
/** Returns the field of this term, an interned string. The field indicates
the part of a document which this term came from. */
public final String field() { return field; }
/** Returns the text of this term. In the case of words, this is simply the
text of the word. In the case of dates and other types, this is an
encoding of the object as a string. */
public final String text() { return text; }
/**
* Optimized construction of new Terms by reusing same field as this Term
* - avoids field.intern() overhead
* @param text The text of the new term (field is implicitly same as this Term instance)
* @return A new Term
*/
public Term createTerm(String text)
{
return new Term(field,text,false);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
Term other = (Term) obj;
if (field == null) {
if (other.field != null)
return false;
} else if (!field.equals(other.field))
return false;
if (text == null) {
if (other.text != null)
return false;
} else if (!text.equals(other.text))
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + ((field == null) ? 0 : field.hashCode());
result = prime * result + ((text == null) ? 0 : text.hashCode());
return result;
}
/** Compares two terms, returning a negative integer if this
term belongs before the argument, zero if this term is equal to the
argument, and a positive integer if this term belongs after the argument.
The ordering of terms is first by field, then by text.*/
public final int compareTo(Term other) {
if (field == other.field) // fields are interned
return text.compareTo(other.text);
else
return field.compareTo(other.field);
}
/** Resets the field and text of a Term. */
final void set(String fld, String txt) {
field = fld;
text = txt;
}
@Override
public final String toString() { return field + ":" + text; }
private void readObject(java.io.ObjectInputStream in)
throws java.io.IOException, ClassNotFoundException
{
in.defaultReadObject();
field = StringHelper.intern(field);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/Term.java | Java | art | 4,472 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.*;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.Closeable;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
/** IndexReader is an abstract class, providing an interface for accessing an
index. Search of an index is done entirely through this abstract interface,
so that any subclass which implements it is searchable.
<p> Concrete subclasses of IndexReader are usually constructed with a call to
one of the static <code>open()</code> methods, e.g. {@link
#open(Directory, boolean)}.
<p> For efficiency, in this API documents are often referred to via
<i>document numbers</i>, non-negative integers which each name a unique
document in the index. These document numbers are ephemeral--they may change
as documents are added to and deleted from an index. Clients should thus not
rely on a given document having the same number between sessions.
<p> An IndexReader can be opened on a directory for which an IndexWriter is
opened already, but it cannot be used to delete documents from the index then.
<p>
<b>NOTE</b>: for backwards API compatibility, several methods are not listed
as abstract, but have no useful implementations in this base class and
instead always throw UnsupportedOperationException. Subclasses are
strongly encouraged to override these methods, but in many cases may not
need to.
</p>
<p>
<b>NOTE</b>: as of 2.4, it's possible to open a read-only
IndexReader using the static open methods that accept the
boolean readOnly parameter. Such a reader has better
concurrency as it's not necessary to synchronize on the
isDeleted method. You must specify false if you want to
make changes with the resulting IndexReader.
</p>
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
<code>IndexReader</code>} instances are completely thread
safe, meaning multiple threads can call any of its methods,
concurrently. If your application requires external
synchronization, you should <b>not</b> synchronize on the
<code>IndexReader</code> instance; use your own
(non-Lucene) objects instead.
*/
public abstract class IndexReader implements Cloneable,Closeable {
/**
* Constants describing field properties, for example used for
* {@link IndexReader#getFieldNames(FieldOption)}.
*/
public static final class FieldOption {
private String option;
private FieldOption() { }
private FieldOption(String option) {
this.option = option;
}
@Override
public String toString() {
return this.option;
}
/** All fields */
public static final FieldOption ALL = new FieldOption ("ALL");
/** All indexed fields */
public static final FieldOption INDEXED = new FieldOption ("INDEXED");
/** All fields that store payloads */
public static final FieldOption STORES_PAYLOADS = new FieldOption ("STORES_PAYLOADS");
/** All fields that omit tf */
public static final FieldOption OMIT_TERM_FREQ_AND_POSITIONS = new FieldOption ("OMIT_TERM_FREQ_AND_POSITIONS");
/** All fields which are not indexed */
public static final FieldOption UNINDEXED = new FieldOption ("UNINDEXED");
/** All fields which are indexed with termvectors enabled */
public static final FieldOption INDEXED_WITH_TERMVECTOR = new FieldOption ("INDEXED_WITH_TERMVECTOR");
/** All fields which are indexed but don't have termvectors enabled */
public static final FieldOption INDEXED_NO_TERMVECTOR = new FieldOption ("INDEXED_NO_TERMVECTOR");
/** All fields with termvectors enabled. Please note that only standard termvector fields are returned */
public static final FieldOption TERMVECTOR = new FieldOption ("TERMVECTOR");
/** All fields with termvectors with position values enabled */
public static final FieldOption TERMVECTOR_WITH_POSITION = new FieldOption ("TERMVECTOR_WITH_POSITION");
/** All fields with termvectors with offset values enabled */
public static final FieldOption TERMVECTOR_WITH_OFFSET = new FieldOption ("TERMVECTOR_WITH_OFFSET");
/** All fields with termvectors with offset values and position values enabled */
public static final FieldOption TERMVECTOR_WITH_POSITION_OFFSET = new FieldOption ("TERMVECTOR_WITH_POSITION_OFFSET");
}
private boolean closed;
protected boolean hasChanges;
private int refCount;
static int DEFAULT_TERMS_INDEX_DIVISOR = 1;
/** Expert: returns the current refCount for this reader */
public synchronized int getRefCount() {
return refCount;
}
/**
* Expert: increments the refCount of this IndexReader
* instance. RefCounts are used to determine when a
* reader can be closed safely, i.e. as soon as there are
* no more references. Be sure to always call a
* corresponding {@link #decRef}, in a finally clause;
* otherwise the reader may never be closed. Note that
* {@link #close} simply calls decRef(), which means that
* the IndexReader will not really be closed until {@link
* #decRef} has been called for all outstanding
* references.
*
* @see #decRef
*/
public synchronized void incRef() {
assert refCount > 0;
ensureOpen();
refCount++;
}
/**
* Expert: decreases the refCount of this IndexReader
* instance. If the refCount drops to 0, then pending
* changes (if any) are committed to the index and this
* reader is closed.
*
* @throws IOException in case an IOException occurs in commit() or doClose()
*
* @see #incRef
*/
public synchronized void decRef() throws IOException {
assert refCount > 0;
ensureOpen();
if (refCount == 1) {
commit();
doClose();
}
refCount--;
}
protected IndexReader() {
refCount = 1;
}
/**
* @throws AlreadyClosedException if this IndexReader is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (refCount <= 0) {
throw new AlreadyClosedException("this IndexReader is closed");
}
}
/** Returns a IndexReader reading the index in the given
* Directory, with readOnly=true.
* @param directory the index directory
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final Directory directory) throws CorruptIndexException, IOException {
return open(directory, null, null, true, DEFAULT_TERMS_INDEX_DIVISOR);
}
/** Returns an IndexReader reading the index in the given
* Directory. You should pass readOnly=true, since it
* gives much better concurrent performance, unless you
* intend to do write operations (delete documents or
* change norms) with the reader.
* @param directory the index directory
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final Directory directory, boolean readOnly) throws CorruptIndexException, IOException {
return open(directory, null, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
}
/** Expert: returns an IndexReader reading the index in the given
* {@link IndexCommit}. You should pass readOnly=true, since it
* gives much better concurrent performance, unless you
* intend to do write operations (delete documents or
* change norms) with the reader.
* @param commit the commit point to open
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final IndexCommit commit, boolean readOnly) throws CorruptIndexException, IOException {
return open(commit.getDirectory(), null, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
}
/** Expert: returns an IndexReader reading the index in
* the given Directory, with a custom {@link
* IndexDeletionPolicy}. You should pass readOnly=true,
* since it gives much better concurrent performance,
* unless you intend to do write operations (delete
* documents or change norms) with the reader.
* @param directory the index directory
* @param deletionPolicy a custom deletion policy (only used
* if you use this reader to perform deletes or to set
* norms); see {@link IndexWriter} for details.
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final Directory directory, IndexDeletionPolicy deletionPolicy, boolean readOnly) throws CorruptIndexException, IOException {
return open(directory, deletionPolicy, null, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
}
/** Expert: returns an IndexReader reading the index in
* the given Directory, with a custom {@link
* IndexDeletionPolicy}. You should pass readOnly=true,
* since it gives much better concurrent performance,
* unless you intend to do write operations (delete
* documents or change norms) with the reader.
* @param directory the index directory
* @param deletionPolicy a custom deletion policy (only used
* if you use this reader to perform deletes or to set
* norms); see {@link IndexWriter} for details.
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
* @param termInfosIndexDivisor Subsamples which indexed
* terms are loaded into RAM. This has the same effect as {@link
* IndexWriter#setTermIndexInterval} except that setting
* must be done at indexing time while this setting can be
* set per reader. When set to N, then one in every
* N*termIndexInterval terms in the index is loaded into
* memory. By setting this to a value > 1 you can reduce
* memory usage, at the expense of higher latency when
* loading a TermInfo. The default value is 1. Set this
* to -1 to skip loading the terms index entirely.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final Directory directory, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return open(directory, deletionPolicy, null, readOnly, termInfosIndexDivisor);
}
/** Expert: returns an IndexReader reading the index in
* the given Directory, using a specific commit and with
* a custom {@link IndexDeletionPolicy}. You should pass
* readOnly=true, since it gives much better concurrent
* performance, unless you intend to do write operations
* (delete documents or change norms) with the reader.
* @param commit the specific {@link IndexCommit} to open;
* see {@link IndexReader#listCommits} to list all commits
* in a directory
* @param deletionPolicy a custom deletion policy (only used
* if you use this reader to perform deletes or to set
* norms); see {@link IndexWriter} for details.
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final IndexCommit commit, IndexDeletionPolicy deletionPolicy, boolean readOnly) throws CorruptIndexException, IOException {
return open(commit.getDirectory(), deletionPolicy, commit, readOnly, DEFAULT_TERMS_INDEX_DIVISOR);
}
/** Expert: returns an IndexReader reading the index in
* the given Directory, using a specific commit and with
* a custom {@link IndexDeletionPolicy}. You should pass
* readOnly=true, since it gives much better concurrent
* performance, unless you intend to do write operations
* (delete documents or change norms) with the reader.
* @param commit the specific {@link IndexCommit} to open;
* see {@link IndexReader#listCommits} to list all commits
* in a directory
* @param deletionPolicy a custom deletion policy (only used
* if you use this reader to perform deletes or to set
* norms); see {@link IndexWriter} for details.
* @param readOnly true if no changes (deletions, norms) will be made with this IndexReader
* @param termInfosIndexDivisor Subsamples which indexed
* terms are loaded into RAM. This has the same effect as {@link
* IndexWriter#setTermIndexInterval} except that setting
* must be done at indexing time while this setting can be
* set per reader. When set to N, then one in every
* N*termIndexInterval terms in the index is loaded into
* memory. By setting this to a value > 1 you can reduce
* memory usage, at the expense of higher latency when
* loading a TermInfo. The default value is 1. Set this
* to -1 to skip loading the terms index entirely.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static IndexReader open(final IndexCommit commit, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return open(commit.getDirectory(), deletionPolicy, commit, readOnly, termInfosIndexDivisor);
}
private static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly, int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return DirectoryReader.open(directory, deletionPolicy, commit, readOnly, termInfosIndexDivisor);
}
/**
* Refreshes an IndexReader if the index has changed since this instance
* was (re)opened.
* <p>
* Opening an IndexReader is an expensive operation. This method can be used
* to refresh an existing IndexReader to reduce these costs. This method
* tries to only load segments that have changed or were created after the
* IndexReader was (re)opened.
* <p>
* If the index has not changed since this instance was (re)opened, then this
* call is a NOOP and returns this instance. Otherwise, a new instance is
* returned. The old instance is <b>not</b> closed and remains usable.<br>
* <p>
* If the reader is reopened, even though they share
* resources internally, it's safe to make changes
* (deletions, norms) with the new reader. All shared
* mutable state obeys "copy on write" semantics to ensure
* the changes are not seen by other readers.
* <p>
* You can determine whether a reader was actually reopened by comparing the
* old instance with the instance returned by this method:
* <pre>
* IndexReader reader = ...
* ...
* IndexReader newReader = r.reopen();
* if (newReader != reader) {
* ... // reader was reopened
* reader.close();
* }
* reader = newReader;
* ...
* </pre>
*
* Be sure to synchronize that code so that other threads,
* if present, can never use reader after it has been
* closed and before it's switched to newReader.
*
* <p><b>NOTE</b>: If this reader is a near real-time
* reader (obtained from {@link IndexWriter#getReader()},
* reopen() will simply call writer.getReader() again for
* you, though this may change in the future.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized IndexReader reopen() throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen().");
}
/** Just like {@link #reopen()}, except you can change the
* readOnly of the original reader. If the index is
* unchanged but readOnly is different then a new reader
* will be returned. */
public synchronized IndexReader reopen(boolean openReadOnly) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen().");
}
/** Expert: reopen this reader on a specific commit point.
* This always returns a readOnly reader. If the
* specified commit point matches what this reader is
* already on, and this reader is already readOnly, then
* this same instance is returned; if it is not already
* readOnly, a readOnly clone is returned. */
public synchronized IndexReader reopen(final IndexCommit commit) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support reopen(IndexCommit).");
}
/**
* Efficiently clones the IndexReader (sharing most
* internal state).
* <p>
* On cloning a reader with pending changes (deletions,
* norms), the original reader transfers its write lock to
* the cloned reader. This means only the cloned reader
* may make further changes to the index, and commit the
* changes to the index on close, but the old reader still
* reflects all changes made up until it was cloned.
* <p>
* Like {@link #reopen()}, it's safe to make changes to
* either the original or the cloned reader: all shared
* mutable state obeys "copy on write" semantics to ensure
* the changes are not seen by other readers.
* <p>
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
@Override
public synchronized Object clone() {
throw new UnsupportedOperationException("This reader does not implement clone()");
}
/**
* Clones the IndexReader and optionally changes readOnly. A readOnly
* reader cannot open a writeable reader.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not implement clone()");
}
/**
* Returns the directory associated with this index. The Default
* implementation returns the directory specified by subclasses when
* delegating to the IndexReader(Directory) constructor, or throws an
* UnsupportedOperationException if one was not specified.
* @throws UnsupportedOperationException if no directory
*/
public Directory directory() {
ensureOpen();
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Returns the time the index in the named directory was last modified.
* Do not use this to check whether the reader is still up-to-date, use
* {@link #isCurrent()} instead.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static long lastModified(final Directory directory2) throws CorruptIndexException, IOException {
return ((Long) new SegmentInfos.FindSegmentsFile(directory2) {
@Override
public Object doBody(String segmentFileName) throws IOException {
return Long.valueOf(directory2.fileModified(segmentFileName));
}
}.run()).longValue();
}
/**
* Reads version number from segments files. The version number is
* initialized with a timestamp and then increased by one for each change of
* the index.
*
* @param directory where the index resides.
* @return version number.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public static long getCurrentVersion(Directory directory) throws CorruptIndexException, IOException {
return SegmentInfos.readCurrentVersion(directory);
}
/**
* Reads commitUserData, previously passed to {@link
* IndexWriter#commit(Map)}, from current index
* segments file. This will return null if {@link
* IndexWriter#commit(Map)} has never been called for
* this index.
*
* @param directory where the index resides.
* @return commit userData.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*
* @see #getCommitUserData()
*/
public static Map<String,String> getCommitUserData(Directory directory) throws CorruptIndexException, IOException {
return SegmentInfos.readCurrentUserData(directory);
}
/**
* Version number when this IndexReader was opened. Not
* implemented in the IndexReader base class.
*
* <p>If this reader is based on a Directory (ie, was
* created by calling {@link #open}, or {@link #reopen} on
* a reader based on a Directory), then this method
* returns the version recorded in the commit that the
* reader opened. This version is advanced every time
* {@link IndexWriter#commit} is called.</p>
*
* <p>If instead this reader is a near real-time reader
* (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #reopen}
* on a near real-time reader), then this method returns
* the version of the last commit done by the writer.
* Note that even as further changes are made with the
* writer, the version will not changed until a commit is
* completed. Thus, you should not rely on this method to
* determine when a near real-time reader should be
* opened. Use {@link #isCurrent} instead.</p>
*
* @throws UnsupportedOperationException unless overridden in subclass
*/
public long getVersion() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Retrieve the String userData optionally passed to
* IndexWriter#commit. This will return null if {@link
* IndexWriter#commit(Map)} has never been called for
* this index.
*
* @see #getCommitUserData(Directory)
*/
public Map<String,String> getCommitUserData() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Check whether any new changes have occurred to the
* index since this reader was opened.
*
* <p>If this reader is based on a Directory (ie, was
* created by calling {@link #open}, or {@link #reopen} on
* a reader based on a Directory), then this method checks
* if any further commits (see {@link IndexWriter#commit}
* have occurred in that directory).</p>
*
* <p>If instead this reader is a near real-time reader
* (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #reopen}
* on a near real-time reader), then this method checks if
* either a new commmit has occurred, or any new
* uncommitted changes have taken place via the writer.
* Note that even if the writer has only performed
* merging, this method will still return false.</p>
*
* <p>In any event, if this returns false, you should call
* {@link #reopen} to get a new reader that sees the
* changes.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @throws UnsupportedOperationException unless overridden in subclass
*/
public boolean isCurrent() throws CorruptIndexException, IOException {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Checks is the index is optimized (if it has a single segment and
* no deletions). Not implemented in the IndexReader base class.
* @return <code>true</code> if the index is optimized; <code>false</code> otherwise
* @throws UnsupportedOperationException unless overridden in subclass
*/
public boolean isOptimized() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Return an array of term frequency vectors for the specified document.
* The array contains a vector for each vectorized field in the document.
* Each vector contains terms and frequencies for all terms in a given vectorized field.
* If no such fields existed, the method returns null. The term vectors that are
* returned may either be of type {@link TermFreqVector}
* or of type {@link TermPositionVector} if
* positions or offsets have been stored.
*
* @param docNumber document for which term frequency vectors are returned
* @return array of term frequency vectors. May be null if no term vectors have been
* stored for the specified document.
* @throws IOException if index cannot be accessed
* @see org.apache.lucene.document.Field.TermVector
*/
abstract public TermFreqVector[] getTermFreqVectors(int docNumber)
throws IOException;
/**
* Return a term frequency vector for the specified document and field. The
* returned vector contains terms and frequencies for the terms in
* the specified field of this document, if the field had the storeTermVector
* flag set. If termvectors had been stored with positions or offsets, a
* {@link TermPositionVector} is returned.
*
* @param docNumber document for which the term frequency vector is returned
* @param field field for which the term frequency vector is returned.
* @return term frequency vector May be null if field does not exist in the specified
* document or term vector was not stored.
* @throws IOException if index cannot be accessed
* @see org.apache.lucene.document.Field.TermVector
*/
abstract public TermFreqVector getTermFreqVector(int docNumber, String field)
throws IOException;
/**
* Load the Term Vector into a user-defined data structure instead of relying on the parallel arrays of
* the {@link TermFreqVector}.
* @param docNumber The number of the document to load the vector for
* @param field The name of the field to load
* @param mapper The {@link TermVectorMapper} to process the vector. Must not be null
* @throws IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified.
*
*/
abstract public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException;
/**
* Map all the term vectors for all fields in a Document
* @param docNumber The number of the document to load the vector for
* @param mapper The {@link TermVectorMapper} to process the vector. Must not be null
* @throws IOException if term vectors cannot be accessed or if they do not exist on the field and doc. specified.
*/
abstract public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException;
/**
* Returns <code>true</code> if an index exists at the specified directory.
* If the directory does not exist or if there is no index in it.
* @param directory the directory to check for an index
* @return <code>true</code> if an index exists; <code>false</code> otherwise
* @throws IOException if there is a problem with accessing the index
*/
public static boolean indexExists(Directory directory) throws IOException {
return SegmentInfos.getCurrentSegmentGeneration(directory) != -1;
}
/** Returns the number of documents in this index. */
public abstract int numDocs();
/** Returns one greater than the largest possible document number.
* This may be used to, e.g., determine how big to allocate an array which
* will have an element for every document number in an index.
*/
public abstract int maxDoc();
/** Returns the number of deleted documents. */
public int numDeletedDocs() {
return maxDoc() - numDocs();
}
/**
* Returns the stored fields of the <code>n</code><sup>th</sup>
* <code>Document</code> in this index.
* <p>
* <b>NOTE:</b> for performance reasons, this method does not check if the
* requested document is deleted, and therefore asking for a deleted document
* may yield unspecified results. Usually this is not required, however you
* can call {@link #isDeleted(int)} with the requested document ID to verify
* the document is not deleted.
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public Document document(int n) throws CorruptIndexException, IOException {
ensureOpen();
return document(n, null);
}
/**
* Get the {@link org.apache.lucene.document.Document} at the <code>n</code>
* <sup>th</sup> position. The {@link FieldSelector} may be used to determine
* what {@link org.apache.lucene.document.Field}s to load and how they should
* be loaded. <b>NOTE:</b> If this Reader (more specifically, the underlying
* <code>FieldsReader</code>) is closed before the lazy
* {@link org.apache.lucene.document.Field} is loaded an exception may be
* thrown. If you want the value of a lazy
* {@link org.apache.lucene.document.Field} to be available after closing you
* must explicitly load it or fetch the Document again with a new loader.
* <p>
* <b>NOTE:</b> for performance reasons, this method does not check if the
* requested document is deleted, and therefore asking for a deleted document
* may yield unspecified results. Usually this is not required, however you
* can call {@link #isDeleted(int)} with the requested document ID to verify
* the document is not deleted.
*
* @param n Get the document at the <code>n</code><sup>th</sup> position
* @param fieldSelector The {@link FieldSelector} to use to determine what
* Fields should be loaded on the Document. May be null, in which case
* all Fields will be loaded.
* @return The stored fields of the
* {@link org.apache.lucene.document.Document} at the nth position
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @see org.apache.lucene.document.Fieldable
* @see org.apache.lucene.document.FieldSelector
* @see org.apache.lucene.document.SetBasedFieldSelector
* @see org.apache.lucene.document.LoadFirstFieldSelector
*/
// TODO (1.5): When we convert to JDK 1.5 make this Set<String>
public abstract Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException;
/** Returns true if document <i>n</i> has been deleted */
public abstract boolean isDeleted(int n);
/** Returns true if any documents have been deleted */
public abstract boolean hasDeletions();
/** Returns true if there are norms stored for this field. */
public boolean hasNorms(String field) throws IOException {
// backward compatible implementation.
// SegmentReader has an efficient implementation.
ensureOpen();
return norms(field) != null;
}
/** Returns the byte-encoded normalization factor for the named field of
* every document. This is used by the search code to score documents.
*
* @see org.apache.lucene.document.Field#setBoost(float)
*/
public abstract byte[] norms(String field) throws IOException;
/** Reads the byte-encoded normalization factor for the named field of every
* document. This is used by the search code to score documents.
*
* @see org.apache.lucene.document.Field#setBoost(float)
*/
public abstract void norms(String field, byte[] bytes, int offset)
throws IOException;
/** Expert: Resets the normalization factor for the named field of the named
* document. The norm represents the product of the field's {@link
* org.apache.lucene.document.Fieldable#setBoost(float) boost} and its {@link Similarity#lengthNorm(String,
* int) length normalization}. Thus, to preserve the length normalization
* values when resetting this, one should base the new value upon the old.
*
* <b>NOTE:</b> If this field does not store norms, then
* this method call will silently do nothing.
*
* @see #norms(String)
* @see Similarity#decodeNorm(byte)
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if there is a low-level IO error
*/
public synchronized void setNorm(int doc, String field, byte value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
acquireWriteLock();
hasChanges = true;
doSetNorm(doc, field, value);
}
/** Implements setNorm in subclass.*/
protected abstract void doSetNorm(int doc, String field, byte value)
throws CorruptIndexException, IOException;
/** Expert: Resets the normalization factor for the named field of the named
* document.
*
* @see #norms(String)
* @see Similarity#decodeNorm(byte)
*
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if there is a low-level IO error
*/
public void setNorm(int doc, String field, float value)
throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
setNorm(doc, field, Similarity.encodeNorm(value));
}
/** Returns an enumeration of all the terms in the index. The
* enumeration is ordered by Term.compareTo(). Each term is greater
* than all that precede it in the enumeration. Note that after
* calling terms(), {@link TermEnum#next()} must be called
* on the resulting enumeration before calling other methods such as
* {@link TermEnum#term()}.
* @throws IOException if there is a low-level IO error
*/
public abstract TermEnum terms() throws IOException;
/** Returns an enumeration of all terms starting at a given term. If
* the given term does not exist, the enumeration is positioned at the
* first term greater than the supplied term. The enumeration is
* ordered by Term.compareTo(). Each term is greater than all that
* precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public abstract TermEnum terms(Term t) throws IOException;
/** Returns the number of documents containing the term <code>t</code>.
* @throws IOException if there is a low-level IO error
*/
public abstract int docFreq(Term t) throws IOException;
/** Returns an enumeration of all the documents which contain
* <code>term</code>. For each document, the document number, the frequency of
* the term in that document is also provided, for use in
* search scoring. If term is null, then all non-deleted
* docs are returned with freq=1.
* Thus, this method implements the mapping:
* <p><ul>
* Term => <docNum, freq><sup>*</sup>
* </ul>
* <p>The enumeration is ordered by document number. Each document number
* is greater than all that precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
TermDocs termDocs = termDocs();
termDocs.seek(term);
return termDocs;
}
/** Returns an unpositioned {@link TermDocs} enumerator.
* @throws IOException if there is a low-level IO error
*/
public abstract TermDocs termDocs() throws IOException;
/** Returns an enumeration of all the documents which contain
* <code>term</code>. For each document, in addition to the document number
* and frequency of the term in that document, a list of all of the ordinal
* positions of the term in the document is available. Thus, this method
* implements the mapping:
*
* <p><ul>
* Term => <docNum, freq,
* <pos<sub>1</sub>, pos<sub>2</sub>, ...
* pos<sub>freq-1</sub>>
* ><sup>*</sup>
* </ul>
* <p> This positional information facilitates phrase and proximity searching.
* <p>The enumeration is ordered by document number. Each document number is
* greater than all that precede it in the enumeration.
* @throws IOException if there is a low-level IO error
*/
public TermPositions termPositions(Term term) throws IOException {
ensureOpen();
TermPositions termPositions = termPositions();
termPositions.seek(term);
return termPositions;
}
/** Returns an unpositioned {@link TermPositions} enumerator.
* @throws IOException if there is a low-level IO error
*/
public abstract TermPositions termPositions() throws IOException;
/** Deletes the document numbered <code>docNum</code>. Once a document is
* deleted it will not appear in TermDocs or TermPostitions enumerations.
* Attempts to read its field with the {@link #document}
* method will result in an error. The presence of this document may still be
* reflected in the {@link #docFreq} statistic, though
* this will be corrected eventually as the index is further modified.
*
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if there is a low-level IO error
*/
public synchronized void deleteDocument(int docNum) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
acquireWriteLock();
hasChanges = true;
doDelete(docNum);
}
/** Implements deletion of the document numbered <code>docNum</code>.
* Applications should call {@link #deleteDocument(int)} or {@link #deleteDocuments(Term)}.
*/
protected abstract void doDelete(int docNum) throws CorruptIndexException, IOException;
/** Deletes all documents that have a given <code>term</code> indexed.
* This is useful if one uses a document field to hold a unique ID string for
* the document. Then to delete such a document, one merely constructs a
* term with the appropriate field and the unique ID string as its text and
* passes it to this method.
* See {@link #deleteDocument(int)} for information about when this deletion will
* become effective.
*
* @return the number of documents deleted
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if there is a low-level IO error
*/
public int deleteDocuments(Term term) throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
TermDocs docs = termDocs(term);
if (docs == null) return 0;
int n = 0;
try {
while (docs.next()) {
deleteDocument(docs.doc());
n++;
}
} finally {
docs.close();
}
return n;
}
/** Undeletes all documents currently marked as deleted in this index.
*
* @throws StaleReaderException if the index has changed
* since this reader was opened
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public synchronized void undeleteAll() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
ensureOpen();
acquireWriteLock();
hasChanges = true;
doUndeleteAll();
}
/** Implements actual undeleteAll() in subclass. */
protected abstract void doUndeleteAll() throws CorruptIndexException, IOException;
/** Does nothing by default. Subclasses that require a write lock for
* index modifications must implement this method. */
protected synchronized void acquireWriteLock() throws IOException {
/* NOOP */
}
/**
*
* @throws IOException
*/
public final synchronized void flush() throws IOException {
ensureOpen();
commit();
}
/**
* @param commitUserData Opaque Map (String -> String)
* that's recorded into the segments file in the index,
* and retrievable by {@link
* IndexReader#getCommitUserData}.
* @throws IOException
*/
public final synchronized void flush(Map<String, String> commitUserData) throws IOException {
ensureOpen();
commit(commitUserData);
}
/**
* Commit changes resulting from delete, undeleteAll, or
* setNorm operations
*
* If an exception is hit, then either no changes or all
* changes will have been committed to the index
* (transactional semantics).
* @throws IOException if there is a low-level IO error
*/
protected final synchronized void commit() throws IOException {
commit(null);
}
/**
* Commit changes resulting from delete, undeleteAll, or
* setNorm operations
*
* If an exception is hit, then either no changes or all
* changes will have been committed to the index
* (transactional semantics).
* @throws IOException if there is a low-level IO error
*/
public final synchronized void commit(Map<String, String> commitUserData) throws IOException {
if (hasChanges) {
doCommit(commitUserData);
}
hasChanges = false;
}
/** Implements commit. */
protected abstract void doCommit(Map<String, String> commitUserData) throws IOException;
/**
* Closes files associated with this index.
* Also saves any new deletions to disk.
* No other methods should be called after this has been called.
* @throws IOException if there is a low-level IO error
*/
public final synchronized void close() throws IOException {
if (!closed) {
decRef();
closed = true;
}
}
/** Implements close. */
protected abstract void doClose() throws IOException;
/**
* Get a list of unique field names that exist in this index and have the specified
* field option information.
* @param fldOption specifies which field option should be available for the returned fields
* @return Collection of Strings indicating the names of the fields.
* @see IndexReader.FieldOption
*/
public abstract Collection<String> getFieldNames(FieldOption fldOption);
/**
* Expert: return the IndexCommit that this reader has
* opened. This method is only implemented by those
* readers that correspond to a Directory with its own
* segments_N file.
*
* <p><b>WARNING</b>: this API is new and experimental and
* may suddenly change.</p>
*/
public IndexCommit getIndexCommit() throws IOException {
throw new UnsupportedOperationException("This reader does not support this method.");
}
/**
* Prints the filename and size of each file within a given compound file.
* Add the -extract flag to extract files to the current working directory.
* In order to make the extracted version of the index work, you have to copy
* the segments file from the compound index into the directory where the extracted files are stored.
* @param args Usage: org.apache.lucene.index.IndexReader [-extract] <cfsfile>
*/
public static void main(String [] args) {
String filename = null;
boolean extract = false;
for (int i = 0; i < args.length; ++i) {
if (args[i].equals("-extract")) {
extract = true;
} else if (filename == null) {
filename = args[i];
}
}
if (filename == null) {
System.out.println("Usage: org.apache.lucene.index.IndexReader [-extract] <cfsfile>");
return;
}
Directory dir = null;
CompoundFileReader cfr = null;
try {
File file = new File(filename);
String dirname = file.getAbsoluteFile().getParent();
filename = file.getName();
dir = FSDirectory.open(new File(dirname));
cfr = new CompoundFileReader(dir, filename);
String [] files = cfr.listAll();
Arrays.sort(files); // sort the array of filename so that the output is more readable
for (int i = 0; i < files.length; ++i) {
long len = cfr.fileLength(files[i]);
if (extract) {
System.out.println("extract " + files[i] + " with " + len + " bytes to local directory...");
IndexInput ii = cfr.openInput(files[i]);
FileOutputStream f = new FileOutputStream(files[i]);
// read and write with a small buffer, which is more effective than reading byte by byte
byte[] buffer = new byte[1024];
int chunk = buffer.length;
while(len > 0) {
final int bufLen = (int) Math.min(chunk, len);
ii.readBytes(buffer, 0, bufLen);
f.write(buffer, 0, bufLen);
len -= bufLen;
}
f.close();
ii.close();
}
else
System.out.println(files[i] + ": " + len + " bytes");
}
} catch (IOException ioe) {
ioe.printStackTrace();
}
finally {
try {
if (dir != null)
dir.close();
if (cfr != null)
cfr.close();
}
catch (IOException ioe) {
ioe.printStackTrace();
}
}
}
/** Returns all commit points that exist in the Directory.
* Normally, because the default is {@link
* KeepOnlyLastCommitDeletionPolicy}, there would be only
* one commit point. But if you're using a custom {@link
* IndexDeletionPolicy} then there could be many commits.
* Once you have a given commit, you can open a reader on
* it by calling {@link IndexReader#open(IndexCommit,boolean)}
* There must be at least one commit in
* the Directory, else this method throws {@link
* java.io.IOException}. Note that if a commit is in
* progress while this method is running, that commit
* may or may not be returned array. */
public static Collection<IndexCommit> listCommits(Directory dir) throws IOException {
return DirectoryReader.listCommits(dir);
}
/** Expert: returns the sequential sub readers that this
* reader is logically composed of. For example,
* IndexSearcher uses this API to drive searching by one
* sub reader at a time. If this reader is not composed
* of sequential child readers, it should return null.
* If this method returns an empty array, that means this
* reader is a null reader (for example a MultiReader
* that has no sub readers).
* <p>
* NOTE: You should not try using sub-readers returned by
* this method to make any changes (setNorm, deleteDocument,
* etc.). While this might succeed for one composite reader
* (like MultiReader), it will most likely lead to index
* corruption for other readers (like DirectoryReader obtained
* through {@link #open}. Use the parent reader directly. */
public IndexReader[] getSequentialSubReaders() {
return null;
}
/** Expert */
public Object getFieldCacheKey() {
return this;
}
/** Expert. Warning: this returns null if the reader has
* no deletions */
public Object getDeletesCacheKey() {
return this;
}
/** Returns the number of unique terms (across all fields)
* in this reader.
*
* This method returns long, even though internally
* Lucene cannot handle more than 2^31 unique terms, for
* a possible future when this limitation is removed.
*
* @throws UnsupportedOperationException if this count
* cannot be easily determined (eg Multi*Readers).
* Instead, you should call {@link
* #getSequentialSubReaders} and ask each sub reader for
* its unique term count. */
public long getUniqueTermCount() throws IOException {
throw new UnsupportedOperationException("this reader does not implement getUniqueTermCount()");
}
/** For IndexReader implementations that use
* TermInfosReader to read terms, this returns the
* current indexDivisor as specified when the reader was
* opened.
*/
public int getTermInfosIndexDivisor() {
throw new UnsupportedOperationException("This reader does not support this method.");
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexReader.java | Java | art | 49,757 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.search.DefaultSimilarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
/**
* An IndexReader which reads indexes with multiple segments.
*/
class DirectoryReader extends IndexReader implements Cloneable {
protected Directory directory;
protected boolean readOnly;
IndexWriter writer;
private IndexDeletionPolicy deletionPolicy;
private final HashSet<String> synced = new HashSet<String>();
private Lock writeLock;
private SegmentInfos segmentInfos;
private SegmentInfos segmentInfosStart;
private boolean stale;
private final int termInfosIndexDivisor;
private boolean rollbackHasChanges;
private SegmentInfos rollbackSegmentInfos;
private SegmentReader[] subReaders;
private int[] starts; // 1st docno for each segment
private Map<String,byte[]> normsCache = new HashMap<String,byte[]>();
private int maxDoc = 0;
private int numDocs = -1;
private boolean hasDeletions = false;
static IndexReader open(final Directory directory, final IndexDeletionPolicy deletionPolicy, final IndexCommit commit, final boolean readOnly,
final int termInfosIndexDivisor) throws CorruptIndexException, IOException {
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(directory, segmentFileName);
if (readOnly)
return new ReadOnlyDirectoryReader(directory, infos, deletionPolicy, termInfosIndexDivisor);
else
return new DirectoryReader(directory, infos, deletionPolicy, false, termInfosIndexDivisor);
}
}.run(commit);
}
/** Construct reading the named set of readers. */
DirectoryReader(Directory directory, SegmentInfos sis, IndexDeletionPolicy deletionPolicy, boolean readOnly, int termInfosIndexDivisor) throws IOException {
this.directory = directory;
this.readOnly = readOnly;
this.segmentInfos = sis;
this.deletionPolicy = deletionPolicy;
this.termInfosIndexDivisor = termInfosIndexDivisor;
if (!readOnly) {
// We assume that this segments_N was previously
// properly sync'd:
synced.addAll(sis.files(directory, true));
}
// To reduce the chance of hitting FileNotFound
// (and having to retry), we open segments in
// reverse because IndexWriter merges & deletes
// the newest segments first.
SegmentReader[] readers = new SegmentReader[sis.size()];
for (int i = sis.size()-1; i >= 0; i--) {
boolean success = false;
try {
readers[i] = SegmentReader.get(readOnly, sis.info(i), termInfosIndexDivisor);
success = true;
} finally {
if (!success) {
// Close all readers we had opened:
for(i++;i<sis.size();i++) {
try {
readers[i].close();
} catch (Throwable ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
initialize(readers);
}
// Used by near real-time search
DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor) throws IOException {
this.directory = writer.getDirectory();
this.readOnly = true;
segmentInfos = infos;
segmentInfosStart = (SegmentInfos) infos.clone();
this.termInfosIndexDivisor = termInfosIndexDivisor;
if (!readOnly) {
// We assume that this segments_N was previously
// properly sync'd:
synced.addAll(infos.files(directory, true));
}
// IndexWriter synchronizes externally before calling
// us, which ensures infos will not change; so there's
// no need to process segments in reverse order
final int numSegments = infos.size();
SegmentReader[] readers = new SegmentReader[numSegments];
final Directory dir = writer.getDirectory();
int upto = 0;
for (int i=0;i<numSegments;i++) {
boolean success = false;
try {
final SegmentInfo info = infos.info(i);
if (info.dir == dir) {
readers[upto++] = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor);
}
success = true;
} finally {
if (!success) {
// Close all readers we had opened:
for(upto--;upto>=0;upto--) {
try {
readers[upto].close();
} catch (Throwable ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
this.writer = writer;
if (upto < readers.length) {
// This means some segments were in a foreign Directory
SegmentReader[] newReaders = new SegmentReader[upto];
System.arraycopy(readers, 0, newReaders, 0, upto);
readers = newReaders;
}
initialize(readers);
}
/** This constructor is only used for {@link #reopen()} */
DirectoryReader(Directory directory, SegmentInfos infos, SegmentReader[] oldReaders, int[] oldStarts,
Map<String,byte[]> oldNormsCache, boolean readOnly, boolean doClone, int termInfosIndexDivisor) throws IOException {
this.directory = directory;
this.readOnly = readOnly;
this.segmentInfos = infos;
this.termInfosIndexDivisor = termInfosIndexDivisor;
if (!readOnly) {
// We assume that this segments_N was previously
// properly sync'd:
synced.addAll(infos.files(directory, true));
}
// we put the old SegmentReaders in a map, that allows us
// to lookup a reader using its segment name
Map<String,Integer> segmentReaders = new HashMap<String,Integer>();
if (oldReaders != null) {
// create a Map SegmentName->SegmentReader
for (int i = 0; i < oldReaders.length; i++) {
segmentReaders.put(oldReaders[i].getSegmentName(), Integer.valueOf(i));
}
}
SegmentReader[] newReaders = new SegmentReader[infos.size()];
// remember which readers are shared between the old and the re-opened
// DirectoryReader - we have to incRef those readers
boolean[] readerShared = new boolean[infos.size()];
for (int i = infos.size() - 1; i>=0; i--) {
// find SegmentReader for this segment
Integer oldReaderIndex = segmentReaders.get(infos.info(i).name);
if (oldReaderIndex == null) {
// this is a new segment, no old SegmentReader can be reused
newReaders[i] = null;
} else {
// there is an old reader for this segment - we'll try to reopen it
newReaders[i] = oldReaders[oldReaderIndex.intValue()];
}
boolean success = false;
try {
SegmentReader newReader;
if (newReaders[i] == null || infos.info(i).getUseCompoundFile() != newReaders[i].getSegmentInfo().getUseCompoundFile()) {
// We should never see a totally new segment during cloning
assert !doClone;
// this is a new reader; in case we hit an exception we can close it safely
newReader = SegmentReader.get(readOnly, infos.info(i), termInfosIndexDivisor);
} else {
newReader = newReaders[i].reopenSegment(infos.info(i), doClone, readOnly);
}
if (newReader == newReaders[i]) {
// this reader will be shared between the old and the new one,
// so we must incRef it
readerShared[i] = true;
newReader.incRef();
} else {
readerShared[i] = false;
newReaders[i] = newReader;
}
success = true;
} finally {
if (!success) {
for (i++; i < infos.size(); i++) {
if (newReaders[i] != null) {
try {
if (!readerShared[i]) {
// this is a new subReader that is not used by the old one,
// we can close it
newReaders[i].close();
} else {
// this subReader is also used by the old reader, so instead
// closing we must decRef it
newReaders[i].decRef();
}
} catch (IOException ignore) {
// keep going - we want to clean up as much as possible
}
}
}
}
}
}
// initialize the readers to calculate maxDoc before we try to reuse the old normsCache
initialize(newReaders);
// try to copy unchanged norms from the old normsCache to the new one
if (oldNormsCache != null) {
for (Map.Entry<String,byte[]> entry: oldNormsCache.entrySet()) {
String field = entry.getKey();
if (!hasNorms(field)) {
continue;
}
byte[] oldBytes = entry.getValue();
byte[] bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++) {
Integer oldReaderIndex = segmentReaders.get(subReaders[i].getSegmentName());
// this SegmentReader was not re-opened, we can copy all of its norms
if (oldReaderIndex != null &&
(oldReaders[oldReaderIndex.intValue()] == subReaders[i]
|| oldReaders[oldReaderIndex.intValue()].norms.get(field) == subReaders[i].norms.get(field))) {
// we don't have to synchronize here: either this constructor is called from a SegmentReader,
// in which case no old norms cache is present, or it is called from MultiReader.reopen(),
// which is synchronized
System.arraycopy(oldBytes, oldStarts[oldReaderIndex.intValue()], bytes, starts[i], starts[i+1] - starts[i]);
} else {
subReaders[i].norms(field, bytes, starts[i]);
}
}
normsCache.put(field, bytes); // update cache
}
}
}
private void initialize(SegmentReader[] subReaders) {
this.subReaders = subReaders;
starts = new int[subReaders.length + 1]; // build starts array
for (int i = 0; i < subReaders.length; i++) {
starts[i] = maxDoc;
maxDoc += subReaders[i].maxDoc(); // compute maxDocs
if (subReaders[i].hasDeletions())
hasDeletions = true;
}
starts[subReaders.length] = maxDoc;
}
@Override
public final synchronized Object clone() {
try {
return clone(readOnly); // Preserve current readOnly
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
@Override
public final synchronized IndexReader clone(boolean openReadOnly) throws CorruptIndexException, IOException {
DirectoryReader newReader = doReopen((SegmentInfos) segmentInfos.clone(), true, openReadOnly);
if (this != newReader) {
newReader.deletionPolicy = deletionPolicy;
}
newReader.writer = writer;
// If we're cloning a non-readOnly reader, move the
// writeLock (if there is one) to the new reader:
if (!openReadOnly && writeLock != null) {
// In near real-time search, reader is always readonly
assert writer == null;
newReader.writeLock = writeLock;
newReader.hasChanges = hasChanges;
newReader.hasDeletions = hasDeletions;
writeLock = null;
hasChanges = false;
}
return newReader;
}
@Override
public final IndexReader reopen() throws CorruptIndexException, IOException {
// Preserve current readOnly
return doReopen(readOnly, null);
}
@Override
public final IndexReader reopen(boolean openReadOnly) throws CorruptIndexException, IOException {
return doReopen(openReadOnly, null);
}
@Override
public final IndexReader reopen(final IndexCommit commit) throws CorruptIndexException, IOException {
return doReopen(true, commit);
}
private final IndexReader doReopenFromWriter(boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
assert readOnly;
if (!openReadOnly) {
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() can only be reopened with openReadOnly=true (got false)");
}
if (commit != null) {
throw new IllegalArgumentException("a reader obtained from IndexWriter.getReader() cannot currently accept a commit");
}
// TODO: right now we *always* make a new reader; in
// the future we could have write make some effort to
// detect that no changes have occurred
return writer.getReader();
}
private IndexReader doReopen(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
ensureOpen();
assert commit == null || openReadOnly;
// If we were obtained by writer.getReader(), re-ask the
// writer to get a new reader.
if (writer != null) {
return doReopenFromWriter(openReadOnly, commit);
} else {
return doReopenNoWriter(openReadOnly, commit);
}
}
private synchronized IndexReader doReopenNoWriter(final boolean openReadOnly, IndexCommit commit) throws CorruptIndexException, IOException {
if (commit == null) {
if (hasChanges) {
// We have changes, which means we are not readOnly:
assert readOnly == false;
// and we hold the write lock:
assert writeLock != null;
// so no other writer holds the write lock, which
// means no changes could have been done to the index:
assert isCurrent();
if (openReadOnly) {
return clone(openReadOnly);
} else {
return this;
}
} else if (isCurrent()) {
if (openReadOnly != readOnly) {
// Just fallback to clone
return clone(openReadOnly);
} else {
return this;
}
}
} else {
if (directory != commit.getDirectory())
throw new IOException("the specified commit does not match the specified Directory");
if (segmentInfos != null && commit.getSegmentsFileName().equals(segmentInfos.getCurrentSegmentFileName())) {
if (readOnly != openReadOnly) {
// Just fallback to clone
return clone(openReadOnly);
} else {
return this;
}
}
}
return (IndexReader) new SegmentInfos.FindSegmentsFile(directory) {
@Override
protected Object doBody(String segmentFileName) throws CorruptIndexException, IOException {
SegmentInfos infos = new SegmentInfos();
infos.read(directory, segmentFileName);
return doReopen(infos, false, openReadOnly);
}
}.run(commit);
}
private synchronized DirectoryReader doReopen(SegmentInfos infos, boolean doClone, boolean openReadOnly) throws CorruptIndexException, IOException {
DirectoryReader reader;
if (openReadOnly) {
reader = new ReadOnlyDirectoryReader(directory, infos, subReaders, starts, normsCache, doClone, termInfosIndexDivisor);
} else {
reader = new DirectoryReader(directory, infos, subReaders, starts, normsCache, false, doClone, termInfosIndexDivisor);
}
return reader;
}
/** Version number when this IndexReader was opened. */
@Override
public long getVersion() {
ensureOpen();
return segmentInfos.getVersion();
}
@Override
public TermFreqVector[] getTermFreqVectors(int n) throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment
}
@Override
public TermFreqVector getTermFreqVector(int n, String field)
throws IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].getTermFreqVector(n - starts[i], field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
int i = readerIndex(docNumber); // find segment num
subReaders[i].getTermFreqVector(docNumber - starts[i], mapper);
}
/**
* Checks is the index is optimized (if it has a single segment and no deletions)
* @return <code>true</code> if the index is optimized; <code>false</code> otherwise
*/
@Override
public boolean isOptimized() {
ensureOpen();
return segmentInfos.size() == 1 && !hasDeletions();
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
// NOTE: multiple threads may wind up init'ing
// numDocs... but that's harmless
if (numDocs == -1) { // check cache
int n = 0; // cache miss--recompute
for (int i = 0; i < subReaders.length; i++)
n += subReaders[i].numDocs(); // sum from readers
numDocs = n;
}
return numDocs;
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return maxDoc;
}
// inherit javadoc
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
int i = readerIndex(n); // find segment num
return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader
}
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
final int i = readerIndex(n); // find segment num
return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return hasDeletions;
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].undeleteAll();
hasDeletions = false;
numDocs = -1; // invalidate cache
}
private int readerIndex(int n) { // find reader for doc n:
return readerIndex(n, this.starts, this.subReaders.length);
}
final static int readerIndex(int n, int[] starts, int numSubReaders) { // find reader for doc n:
int lo = 0; // search starts array
int hi = numSubReaders - 1; // for first element less
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int midValue = starts[mid];
if (n < midValue)
hi = mid - 1;
else if (n > midValue)
lo = mid + 1;
else { // found a match
while (mid+1 < numSubReaders && starts[mid+1] == midValue) {
mid++; // scan to last match
}
return mid;
}
}
return hi;
}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
for (int i = 0; i < subReaders.length; i++) {
if (subReaders[i].hasNorms(field)) return true;
}
return false;
}
@Override
public synchronized byte[] norms(String field) throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
if (bytes != null)
return bytes; // cache hit
if (!hasNorms(field))
return null;
bytes = new byte[maxDoc()];
for (int i = 0; i < subReaders.length; i++)
subReaders[i].norms(field, bytes, starts[i]);
normsCache.put(field, bytes); // update cache
return bytes;
}
@Override
public synchronized void norms(String field, byte[] result, int offset)
throws IOException {
ensureOpen();
byte[] bytes = normsCache.get(field);
if (bytes==null && !hasNorms(field)) {
Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f));
} else if (bytes != null) { // cache hit
System.arraycopy(bytes, 0, result, offset, maxDoc());
} else {
for (int i = 0; i < subReaders.length; i++) { // read from segments
subReaders[i].norms(field, result, offset + starts[i]);
}
}
}
@Override
protected void doSetNorm(int n, String field, byte value)
throws CorruptIndexException, IOException {
synchronized (normsCache) {
normsCache.remove(field); // clear cache
}
int i = readerIndex(n); // find segment num
subReaders[i].setNorm(n-starts[i], field, value); // dispatch
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, null);
}
@Override
public TermEnum terms(Term term) throws IOException {
ensureOpen();
return new MultiTermEnum(this, subReaders, starts, term);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
int total = 0; // sum freqs in segments
for (int i = 0; i < subReaders.length; i++)
total += subReaders[i].docFreq(t);
return total;
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return new MultiTermDocs(this, subReaders, starts);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return new MultiTermPositions(this, subReaders, starts);
}
/**
* Tries to acquire the WriteLock on this directory. this method is only valid if this IndexReader is directory
* owner.
*
* @throws StaleReaderException if the index has changed since this reader was opened
* @throws CorruptIndexException if the index is corrupt
* @throws org.apache.lucene.store.LockObtainFailedException
* if another writer has this index open (<code>write.lock</code> could not be
* obtained)
* @throws IOException if there is a low-level IO error
*/
@Override
protected void acquireWriteLock() throws StaleReaderException, CorruptIndexException, LockObtainFailedException, IOException {
if (readOnly) {
// NOTE: we should not reach this code w/ the core
// IndexReader classes; however, an external subclass
// of IndexReader could reach this.
ReadOnlySegmentReader.noWrite();
}
if (segmentInfos != null) {
ensureOpen();
if (stale)
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
if (writeLock == null) {
Lock writeLock = directory.makeLock(IndexWriter.WRITE_LOCK_NAME);
if (!writeLock.obtain(IndexWriter.WRITE_LOCK_TIMEOUT)) // obtain write lock
throw new LockObtainFailedException("Index locked for write: " + writeLock);
this.writeLock = writeLock;
// we have to check whether index has changed since this reader was opened.
// if so, this reader is no longer valid for deletion
if (SegmentInfos.readCurrentVersion(directory) > segmentInfos.getVersion()) {
stale = true;
this.writeLock.release();
this.writeLock = null;
throw new StaleReaderException("IndexReader out of date and no longer valid for delete, undelete, or setNorm operations");
}
}
}
}
/**
* Commit changes resulting from delete, undeleteAll, or setNorm operations
* <p/>
* If an exception is hit, then either no changes or all changes will have been committed to the index (transactional
* semantics).
*
* @throws IOException if there is a low-level IO error
*/
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException {
if (hasChanges) {
segmentInfos.setUserData(commitUserData);
// Default deleter (for backwards compatibility) is
// KeepOnlyLastCommitDeleter:
IndexFileDeleter deleter = new IndexFileDeleter(directory,
deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
segmentInfos, null, null);
// Checkpoint the state we are about to change, in
// case we have to roll back:
startCommit();
boolean success = false;
try {
for (int i = 0; i < subReaders.length; i++)
subReaders[i].commit();
// Sync all files we just wrote
final Collection<String> files = segmentInfos.files(directory, false);
for (final String fileName : files) {
if (!synced.contains(fileName)) {
assert directory.fileExists(fileName);
directory.sync(fileName);
synced.add(fileName);
}
}
segmentInfos.commit(directory);
success = true;
} finally {
if (!success) {
// Rollback changes that were made to
// SegmentInfos but failed to get [fully]
// committed. This way this reader instance
// remains consistent (matched to what's
// actually in the index):
rollbackCommit();
// Recompute deletable files & remove them (so
// partially written .del files, etc, are
// removed):
deleter.refresh();
}
}
// Have the deleter remove any now unreferenced
// files due to this commit:
deleter.checkpoint(segmentInfos, true);
deleter.close();
if (writeLock != null) {
writeLock.release(); // release write lock
writeLock = null;
}
}
hasChanges = false;
}
void startCommit() {
rollbackHasChanges = hasChanges;
rollbackSegmentInfos = (SegmentInfos) segmentInfos.clone();
for (int i = 0; i < subReaders.length; i++) {
subReaders[i].startCommit();
}
}
void rollbackCommit() {
hasChanges = rollbackHasChanges;
for (int i = 0; i < segmentInfos.size(); i++) {
// Rollback each segmentInfo. Because the
// SegmentReader holds a reference to the
// SegmentInfo we can't [easily] just replace
// segmentInfos, so we reset it in place instead:
segmentInfos.info(i).reset(rollbackSegmentInfos.info(i));
}
rollbackSegmentInfos = null;
for (int i = 0; i < subReaders.length; i++) {
subReaders[i].rollbackCommit();
}
}
@Override
public Map<String,String> getCommitUserData() {
ensureOpen();
return segmentInfos.getUserData();
}
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
if (writer == null || writer.isClosed()) {
// we loaded SegmentInfos from the directory
return SegmentInfos.readCurrentVersion(directory) == segmentInfos.getVersion();
} else {
return writer.nrtIsCurrent(segmentInfosStart);
}
}
@Override
protected synchronized void doClose() throws IOException {
IOException ioe = null;
normsCache = null;
for (int i = 0; i < subReaders.length; i++) {
// try to close each reader, even if an exception is thrown
try {
subReaders[i].decRef();
} catch (IOException e) {
if (ioe == null) ioe = e;
}
}
// NOTE: only needed in case someone had asked for
// FieldCache for top-level reader (which is generally
// not a good idea):
FieldCache.DEFAULT.purge(this);
// throw the first exception
if (ioe != null) throw ioe;
}
@Override
public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) {
ensureOpen();
return getFieldNames(fieldNames, this.subReaders);
}
static Collection<String> getFieldNames (IndexReader.FieldOption fieldNames, IndexReader[] subReaders) {
// maintain a unique set of field names
Set<String> fieldSet = new HashSet<String>();
for (IndexReader reader : subReaders) {
Collection<String> names = reader.getFieldNames(fieldNames);
fieldSet.addAll(names);
}
return fieldSet;
}
@Override
public IndexReader[] getSequentialSubReaders() {
return subReaders;
}
/** Returns the directory this index resides in. */
@Override
public Directory directory() {
// Don't ensureOpen here -- in certain cases, when a
// cloned/reopened reader needs to commit, it may call
// this method on the closed original reader
return directory;
}
@Override
public int getTermInfosIndexDivisor() {
return termInfosIndexDivisor;
}
/**
* Expert: return the IndexCommit that this reader has opened.
* <p/>
* <p><b>WARNING</b>: this API is new and experimental and may suddenly change.</p>
*/
@Override
public IndexCommit getIndexCommit() throws IOException {
return new ReaderCommit(segmentInfos, directory);
}
/** @see org.apache.lucene.index.IndexReader#listCommits */
public static Collection<IndexCommit> listCommits(Directory dir) throws IOException {
final String[] files = dir.listAll();
Collection<IndexCommit> commits = new ArrayList<IndexCommit>();
SegmentInfos latest = new SegmentInfos();
latest.read(dir);
final long currentGen = latest.getGeneration();
commits.add(new ReaderCommit(latest, dir));
for(int i=0;i<files.length;i++) {
final String fileName = files[i];
if (fileName.startsWith(IndexFileNames.SEGMENTS) &&
!fileName.equals(IndexFileNames.SEGMENTS_GEN) &&
SegmentInfos.generationFromSegmentsFileName(fileName) < currentGen) {
SegmentInfos sis = new SegmentInfos();
try {
// IOException allowed to throw there, in case
// segments_N is corrupt
sis.read(dir, fileName);
} catch (FileNotFoundException fnfe) {
// LUCENE-948: on NFS (and maybe others), if
// you have writers switching back and forth
// between machines, it's very likely that the
// dir listing will be stale and will claim a
// file segments_X exists when in fact it
// doesn't. So, we catch this and handle it
// as if the file does not exist
sis = null;
}
if (sis != null)
commits.add(new ReaderCommit(sis, dir));
}
}
return commits;
}
private static final class ReaderCommit extends IndexCommit {
private String segmentsFileName;
Collection<String> files;
Directory dir;
long generation;
long version;
final boolean isOptimized;
final Map<String,String> userData;
ReaderCommit(SegmentInfos infos, Directory dir) throws IOException {
segmentsFileName = infos.getCurrentSegmentFileName();
this.dir = dir;
userData = infos.getUserData();
files = Collections.unmodifiableCollection(infos.files(dir, true));
version = infos.getVersion();
generation = infos.getGeneration();
isOptimized = infos.size() == 1 && !infos.info(0).hasDeletions();
}
@Override
public boolean isOptimized() {
return isOptimized;
}
@Override
public String getSegmentsFileName() {
return segmentsFileName;
}
@Override
public Collection<String> getFileNames() {
return files;
}
@Override
public Directory getDirectory() {
return dir;
}
@Override
public long getVersion() {
return version;
}
@Override
public long getGeneration() {
return generation;
}
@Override
public boolean isDeleted() {
return false;
}
@Override
public Map<String,String> getUserData() {
return userData;
}
@Override
public void delete() {
throw new UnsupportedOperationException("This IndexCommit does not support deletions");
}
}
static class MultiTermEnum extends TermEnum {
IndexReader topReader; // used for matching TermEnum to TermDocs
private SegmentMergeQueue queue;
private Term term;
private int docFreq;
final SegmentMergeInfo[] matchingSegments; // null terminated array of matching segments
public MultiTermEnum(IndexReader topReader, IndexReader[] readers, int[] starts, Term t)
throws IOException {
this.topReader = topReader;
queue = new SegmentMergeQueue(readers.length);
matchingSegments = new SegmentMergeInfo[readers.length+1];
for (int i = 0; i < readers.length; i++) {
IndexReader reader = readers[i];
TermEnum termEnum;
if (t != null) {
termEnum = reader.terms(t);
} else
termEnum = reader.terms();
SegmentMergeInfo smi = new SegmentMergeInfo(starts[i], termEnum, reader);
smi.ord = i;
if (t == null ? smi.next() : termEnum.term() != null)
queue.add(smi); // initialize queue
else
smi.close();
}
if (t != null && queue.size() > 0) {
next();
}
}
@Override
public boolean next() throws IOException {
for (int i=0; i<matchingSegments.length; i++) {
SegmentMergeInfo smi = matchingSegments[i];
if (smi==null) break;
if (smi.next())
queue.add(smi);
else
smi.close(); // done with segment
}
int numMatchingSegments = 0;
matchingSegments[0] = null;
SegmentMergeInfo top = queue.top();
if (top == null) {
term = null;
return false;
}
term = top.term;
docFreq = 0;
while (top != null && term.compareTo(top.term) == 0) {
matchingSegments[numMatchingSegments++] = top;
queue.pop();
docFreq += top.termEnum.docFreq(); // increment freq
top = queue.top();
}
matchingSegments[numMatchingSegments] = null;
return true;
}
@Override
public Term term() {
return term;
}
@Override
public int docFreq() {
return docFreq;
}
@Override
public void close() throws IOException {
queue.close();
}
}
static class MultiTermDocs implements TermDocs {
IndexReader topReader; // used for matching TermEnum to TermDocs
protected IndexReader[] readers;
protected int[] starts;
protected Term term;
protected int base = 0;
protected int pointer = 0;
private TermDocs[] readerTermDocs;
protected TermDocs current; // == readerTermDocs[pointer]
private MultiTermEnum tenum; // the term enum used for seeking... can be null
int matchingSegmentPos; // position into the matching segments from tenum
SegmentMergeInfo smi; // current segment mere info... can be null
public MultiTermDocs(IndexReader topReader, IndexReader[] r, int[] s) {
this.topReader = topReader;
readers = r;
starts = s;
readerTermDocs = new TermDocs[r.length];
}
public int doc() {
return base + current.doc();
}
public int freq() {
return current.freq();
}
public void seek(Term term) {
this.term = term;
this.base = 0;
this.pointer = 0;
this.current = null;
this.tenum = null;
this.smi = null;
this.matchingSegmentPos = 0;
}
public void seek(TermEnum termEnum) throws IOException {
seek(termEnum.term());
if (termEnum instanceof MultiTermEnum) {
tenum = (MultiTermEnum)termEnum;
if (topReader != tenum.topReader)
tenum = null;
}
}
public boolean next() throws IOException {
for(;;) {
if (current!=null && current.next()) {
return true;
}
else if (pointer < readers.length) {
if (tenum != null) {
smi = tenum.matchingSegments[matchingSegmentPos++];
if (smi==null) {
pointer = readers.length;
return false;
}
pointer = smi.ord;
}
base = starts[pointer];
current = termDocs(pointer++);
} else {
return false;
}
}
}
/** Optimized implementation. */
public int read(final int[] docs, final int[] freqs) throws IOException {
while (true) {
while (current == null) {
if (pointer < readers.length) { // try next segment
if (tenum != null) {
smi = tenum.matchingSegments[matchingSegmentPos++];
if (smi==null) {
pointer = readers.length;
return 0;
}
pointer = smi.ord;
}
base = starts[pointer];
current = termDocs(pointer++);
} else {
return 0;
}
}
int end = current.read(docs, freqs);
if (end == 0) { // none left in segment
current = null;
} else { // got some
final int b = base; // adjust doc numbers
for (int i = 0; i < end; i++)
docs[i] += b;
return end;
}
}
}
/* A Possible future optimization could skip entire segments */
public boolean skipTo(int target) throws IOException {
for(;;) {
if (current != null && current.skipTo(target-base)) {
return true;
} else if (pointer < readers.length) {
if (tenum != null) {
SegmentMergeInfo smi = tenum.matchingSegments[matchingSegmentPos++];
if (smi==null) {
pointer = readers.length;
return false;
}
pointer = smi.ord;
}
base = starts[pointer];
current = termDocs(pointer++);
} else
return false;
}
}
private TermDocs termDocs(int i) throws IOException {
TermDocs result = readerTermDocs[i];
if (result == null)
result = readerTermDocs[i] = termDocs(readers[i]);
if (smi != null) {
assert(smi.ord == i);
assert(smi.termEnum.term().equals(term));
result.seek(smi.termEnum);
} else {
result.seek(term);
}
return result;
}
protected TermDocs termDocs(IndexReader reader)
throws IOException {
return term==null ? reader.termDocs(null) : reader.termDocs();
}
public void close() throws IOException {
for (int i = 0; i < readerTermDocs.length; i++) {
if (readerTermDocs[i] != null)
readerTermDocs[i].close();
}
}
}
static class MultiTermPositions extends MultiTermDocs implements TermPositions {
public MultiTermPositions(IndexReader topReader, IndexReader[] r, int[] s) {
super(topReader,r,s);
}
@Override
protected TermDocs termDocs(IndexReader reader) throws IOException {
return reader.termPositions();
}
public int nextPosition() throws IOException {
return ((TermPositions)current).nextPosition();
}
public int getPayloadLength() {
return ((TermPositions)current).getPayloadLength();
}
public byte[] getPayload(byte[] data, int offset) throws IOException {
return ((TermPositions)current).getPayload(data, offset);
}
// TODO: Remove warning after API has been finalized
public boolean isPayloadAvailable() {
return ((TermPositions) current).isPayloadAvailable();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DirectoryReader.java | Java | art | 41,205 |
package org.apache.lucene.index;
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The TermVectorMapper can be used to map Term Vectors into your own
* structure instead of the parallel array structure used by
* {@link org.apache.lucene.index.IndexReader#getTermFreqVector(int,String)}.
* <p/>
* It is up to the implementation to make sure it is thread-safe.
*
*
**/
public abstract class TermVectorMapper {
private boolean ignoringPositions;
private boolean ignoringOffsets;
protected TermVectorMapper() {
}
/**
*
* @param ignoringPositions true if this mapper should tell Lucene to ignore positions even if they are stored
* @param ignoringOffsets similar to ignoringPositions
*/
protected TermVectorMapper(boolean ignoringPositions, boolean ignoringOffsets) {
this.ignoringPositions = ignoringPositions;
this.ignoringOffsets = ignoringOffsets;
}
/**
* Tell the mapper what to expect in regards to field, number of terms, offset and position storage.
* This method will be called once before retrieving the vector for a field.
*
* This method will be called before {@link #map(String,int,TermVectorOffsetInfo[],int[])}.
* @param field The field the vector is for
* @param numTerms The number of terms that need to be mapped
* @param storeOffsets true if the mapper should expect offset information
* @param storePositions true if the mapper should expect positions info
*/
public abstract void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions);
/**
* Map the Term Vector information into your own structure
* @param term The term to add to the vector
* @param frequency The frequency of the term in the document
* @param offsets null if the offset is not specified, otherwise the offset into the field of the term
* @param positions null if the position is not specified, otherwise the position in the field of the term
*/
public abstract void map(String term, int frequency, TermVectorOffsetInfo [] offsets, int [] positions);
/**
* Indicate to Lucene that even if there are positions stored, this mapper is not interested in them and they
* can be skipped over. Derived classes should set this to true if they want to ignore positions. The default
* is false, meaning positions will be loaded if they are stored.
* @return false
*/
public boolean isIgnoringPositions()
{
return ignoringPositions;
}
/**
*
* @see #isIgnoringPositions() Same principal as {@link #isIgnoringPositions()}, but applied to offsets. false by default.
* @return false
*/
public boolean isIgnoringOffsets()
{
return ignoringOffsets;
}
/**
* Passes down the index of the document whose term vector is currently being mapped,
* once for each top level call to a term vector reader.
*<p/>
* Default implementation IGNORES the document number. Override if your implementation needs the document number.
* <p/>
* NOTE: Document numbers are internal to Lucene and subject to change depending on indexing operations.
*
* @param documentNumber index of document currently being mapped
*/
public void setDocumentNumber(int documentNumber) {
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorMapper.java | Java | art | 3,820 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.UnicodeUtil;
import java.io.IOException;
final class TermVectorsWriter {
private IndexOutput tvx = null, tvd = null, tvf = null;
private FieldInfos fieldInfos;
final UnicodeUtil.UTF8Result[] utf8Results = new UnicodeUtil.UTF8Result[] {new UnicodeUtil.UTF8Result(),
new UnicodeUtil.UTF8Result()};
public TermVectorsWriter(Directory directory, String segment,
FieldInfos fieldInfos)
throws IOException {
// Open files for TermVector storage
tvx = directory.createOutput(segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
tvx.writeInt(TermVectorsReader.FORMAT_CURRENT);
tvd = directory.createOutput(segment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
tvd.writeInt(TermVectorsReader.FORMAT_CURRENT);
tvf = directory.createOutput(segment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
tvf.writeInt(TermVectorsReader.FORMAT_CURRENT);
this.fieldInfos = fieldInfos;
}
/**
* Add a complete document specified by all its term vectors. If document has no
* term vectors, add value for tvx.
*
* @param vectors
* @throws IOException
*/
public final void addAllDocVectors(TermFreqVector[] vectors)
throws IOException {
tvx.writeLong(tvd.getFilePointer());
tvx.writeLong(tvf.getFilePointer());
if (vectors != null) {
final int numFields = vectors.length;
tvd.writeVInt(numFields);
long[] fieldPointers = new long[numFields];
for (int i=0; i<numFields; i++) {
fieldPointers[i] = tvf.getFilePointer();
final int fieldNumber = fieldInfos.fieldNumber(vectors[i].getField());
// 1st pass: write field numbers to tvd
tvd.writeVInt(fieldNumber);
final int numTerms = vectors[i].size();
tvf.writeVInt(numTerms);
final TermPositionVector tpVector;
final byte bits;
final boolean storePositions;
final boolean storeOffsets;
if (vectors[i] instanceof TermPositionVector) {
// May have positions & offsets
tpVector = (TermPositionVector) vectors[i];
storePositions = tpVector.size() > 0 && tpVector.getTermPositions(0) != null;
storeOffsets = tpVector.size() > 0 && tpVector.getOffsets(0) != null;
bits = (byte) ((storePositions ? TermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR : 0) +
(storeOffsets ? TermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR : 0));
} else {
tpVector = null;
bits = 0;
storePositions = false;
storeOffsets = false;
}
tvf.writeVInt(bits);
final String[] terms = vectors[i].getTerms();
final int[] freqs = vectors[i].getTermFrequencies();
int utf8Upto = 0;
utf8Results[1].length = 0;
for (int j=0; j<numTerms; j++) {
UnicodeUtil.UTF16toUTF8(terms[j], 0, terms[j].length(), utf8Results[utf8Upto]);
int start = StringHelper.bytesDifference(utf8Results[1-utf8Upto].result,
utf8Results[1-utf8Upto].length,
utf8Results[utf8Upto].result,
utf8Results[utf8Upto].length);
int length = utf8Results[utf8Upto].length - start;
tvf.writeVInt(start); // write shared prefix length
tvf.writeVInt(length); // write delta length
tvf.writeBytes(utf8Results[utf8Upto].result, start, length); // write delta bytes
utf8Upto = 1-utf8Upto;
final int termFreq = freqs[j];
tvf.writeVInt(termFreq);
if (storePositions) {
final int[] positions = tpVector.getTermPositions(j);
if (positions == null)
throw new IllegalStateException("Trying to write positions that are null!");
assert positions.length == termFreq;
// use delta encoding for positions
int lastPosition = 0;
for(int k=0;k<positions.length;k++) {
final int position = positions[k];
tvf.writeVInt(position-lastPosition);
lastPosition = position;
}
}
if (storeOffsets) {
final TermVectorOffsetInfo[] offsets = tpVector.getOffsets(j);
if (offsets == null)
throw new IllegalStateException("Trying to write offsets that are null!");
assert offsets.length == termFreq;
// use delta encoding for offsets
int lastEndOffset = 0;
for(int k=0;k<offsets.length;k++) {
final int startOffset = offsets[k].getStartOffset();
final int endOffset = offsets[k].getEndOffset();
tvf.writeVInt(startOffset-lastEndOffset);
tvf.writeVInt(endOffset-startOffset);
lastEndOffset = endOffset;
}
}
}
}
// 2nd pass: write field pointers to tvd
if (numFields > 1) {
long lastFieldPointer = fieldPointers[0];
for (int i=1; i<numFields; i++) {
final long fieldPointer = fieldPointers[i];
tvd.writeVLong(fieldPointer-lastFieldPointer);
lastFieldPointer = fieldPointer;
}
}
} else
tvd.writeVInt(0);
}
/**
* Do a bulk copy of numDocs documents from reader to our
* streams. This is used to expedite merging, if the
* field numbers are congruent.
*/
final void addRawDocuments(TermVectorsReader reader, int[] tvdLengths, int[] tvfLengths, int numDocs) throws IOException {
long tvdPosition = tvd.getFilePointer();
long tvfPosition = tvf.getFilePointer();
long tvdStart = tvdPosition;
long tvfStart = tvfPosition;
for(int i=0;i<numDocs;i++) {
tvx.writeLong(tvdPosition);
tvdPosition += tvdLengths[i];
tvx.writeLong(tvfPosition);
tvfPosition += tvfLengths[i];
}
tvd.copyBytes(reader.getTvdStream(), tvdPosition-tvdStart);
tvf.copyBytes(reader.getTvfStream(), tvfPosition-tvfStart);
assert tvd.getFilePointer() == tvdPosition;
assert tvf.getFilePointer() == tvfPosition;
}
/** Close all streams. */
final void close() throws IOException {
// make an effort to close all streams we can but remember and re-throw
// the first exception encountered in this process
IOException keep = null;
if (tvx != null)
try {
tvx.close();
} catch (IOException e) {
if (keep == null) keep = e;
}
if (tvd != null)
try {
tvd.close();
} catch (IOException e) {
if (keep == null) keep = e;
}
if (tvf != null)
try {
tvf.close();
} catch (IOException e) {
if (keep == null) keep = e;
}
if (keep != null) throw (IOException) keep.fillInStackTrace();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorsWriter.java | Java | art | 7,985 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
/** Abstract class for enumerating terms.
<p>Term enumerations are always ordered by Term.compareTo(). Each term in
the enumeration is greater than all that precede it. */
public abstract class TermEnum implements Closeable {
/** Increments the enumeration to the next element. True if one exists.*/
public abstract boolean next() throws IOException;
/** Returns the current Term in the enumeration.*/
public abstract Term term();
/** Returns the docFreq of the current Term in the enumeration.*/
public abstract int docFreq();
/** Closes the enumeration to further activity, freeing resources. */
public abstract void close() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermEnum.java | Java | art | 1,570 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.IndexInput;
import java.io.IOException;
final class SegmentTermPositions
extends SegmentTermDocs implements TermPositions {
private IndexInput proxStream;
private int proxCount;
private int position;
// the current payload length
private int payloadLength;
// indicates whether the payload of the current position has
// been read from the proxStream yet
private boolean needToLoadPayload;
// these variables are being used to remember information
// for a lazy skip
private long lazySkipPointer = -1;
private int lazySkipProxCount = 0;
SegmentTermPositions(SegmentReader p) {
super(p);
this.proxStream = null; // the proxStream will be cloned lazily when nextPosition() is called for the first time
}
@Override
final void seek(TermInfo ti, Term term) throws IOException {
super.seek(ti, term);
if (ti != null)
lazySkipPointer = ti.proxPointer;
lazySkipProxCount = 0;
proxCount = 0;
payloadLength = 0;
needToLoadPayload = false;
}
@Override
public final void close() throws IOException {
super.close();
if (proxStream != null) proxStream.close();
}
public final int nextPosition() throws IOException {
if (currentFieldOmitTermFreqAndPositions)
// This field does not store term freq, positions, payloads
return 0;
// perform lazy skips if necessary
lazySkip();
proxCount--;
return position += readDeltaPosition();
}
private final int readDeltaPosition() throws IOException {
int delta = proxStream.readVInt();
if (currentFieldStoresPayloads) {
// if the current field stores payloads then
// the position delta is shifted one bit to the left.
// if the LSB is set, then we have to read the current
// payload length
if ((delta & 1) != 0) {
payloadLength = proxStream.readVInt();
}
delta >>>= 1;
needToLoadPayload = true;
}
return delta;
}
@Override
protected final void skippingDoc() throws IOException {
// we remember to skip a document lazily
lazySkipProxCount += freq;
}
@Override
public final boolean next() throws IOException {
// we remember to skip the remaining positions of the current
// document lazily
lazySkipProxCount += proxCount;
if (super.next()) { // run super
proxCount = freq; // note frequency
position = 0; // reset position
return true;
}
return false;
}
@Override
public final int read(final int[] docs, final int[] freqs) {
throw new UnsupportedOperationException("TermPositions does not support processing multiple documents in one call. Use TermDocs instead.");
}
/** Called by super.skipTo(). */
@Override
protected void skipProx(long proxPointer, int payloadLength) throws IOException {
// we save the pointer, we might have to skip there lazily
lazySkipPointer = proxPointer;
lazySkipProxCount = 0;
proxCount = 0;
this.payloadLength = payloadLength;
needToLoadPayload = false;
}
private void skipPositions(int n) throws IOException {
assert !currentFieldOmitTermFreqAndPositions;
for (int f = n; f > 0; f--) { // skip unread positions
readDeltaPosition();
skipPayload();
}
}
private void skipPayload() throws IOException {
if (needToLoadPayload && payloadLength > 0) {
proxStream.seek(proxStream.getFilePointer() + payloadLength);
}
needToLoadPayload = false;
}
// It is not always necessary to move the prox pointer
// to a new document after the freq pointer has been moved.
// Consider for example a phrase query with two terms:
// the freq pointer for term 1 has to move to document x
// to answer the question if the term occurs in that document. But
// only if term 2 also matches document x, the positions have to be
// read to figure out if term 1 and term 2 appear next
// to each other in document x and thus satisfy the query.
// So we move the prox pointer lazily to the document
// as soon as positions are requested.
private void lazySkip() throws IOException {
if (proxStream == null) {
// clone lazily
proxStream = (IndexInput) parent.core.proxStream.clone();
}
// we might have to skip the current payload
// if it was not read yet
skipPayload();
if (lazySkipPointer != -1) {
proxStream.seek(lazySkipPointer);
lazySkipPointer = -1;
}
if (lazySkipProxCount != 0) {
skipPositions(lazySkipProxCount);
lazySkipProxCount = 0;
}
}
public int getPayloadLength() {
return payloadLength;
}
public byte[] getPayload(byte[] data, int offset) throws IOException {
if (!needToLoadPayload) {
throw new IOException("Either no payload exists at this term position or an attempt was made to load it more than once.");
}
// read payloads lazily
byte[] retArray;
int retOffset;
if (data == null || data.length - offset < payloadLength) {
// the array is too small to store the payload data,
// so we allocate a new one
retArray = new byte[payloadLength];
retOffset = 0;
} else {
retArray = data;
retOffset = offset;
}
proxStream.readBytes(retArray, retOffset, payloadLength);
needToLoadPayload = false;
return retArray;
}
public boolean isPayloadAvailable() {
return needToLoadPayload && payloadLength > 0;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentTermPositions.java | Java | art | 6,382 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
abstract class InvertedDocConsumerPerThread {
abstract void startDocument() throws IOException;
abstract InvertedDocConsumerPerField addField(DocInverterPerField docInverterPerField, FieldInfo fieldInfo);
abstract DocumentsWriter.DocWriter finishDocument() throws IOException;
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/InvertedDocConsumerPerThread.java | Java | art | 1,177 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RAMOutputStream;
import org.apache.lucene.util.ArrayUtil;
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
final class TermVectorsTermsWriter extends TermsHashConsumer {
final DocumentsWriter docWriter;
TermVectorsWriter termVectorsWriter;
PerDoc[] docFreeList = new PerDoc[1];
int freeCount;
IndexOutput tvx;
IndexOutput tvd;
IndexOutput tvf;
int lastDocID;
public TermVectorsTermsWriter(DocumentsWriter docWriter) {
this.docWriter = docWriter;
}
@Override
public TermsHashConsumerPerThread addThread(TermsHashPerThread termsHashPerThread) {
return new TermVectorsTermsWriterPerThread(termsHashPerThread, this);
}
@Override
void createPostings(RawPostingList[] postings, int start, int count) {
final int end = start + count;
for(int i=start;i<end;i++)
postings[i] = new PostingList();
}
@Override
synchronized void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
if (tvx != null) {
if (state.numDocsInStore > 0)
// In case there are some final documents that we
// didn't see (because they hit a non-aborting exception):
fill(state.numDocsInStore - docWriter.getDocStoreOffset());
tvx.flush();
tvd.flush();
tvf.flush();
}
for (Map.Entry<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> entry : threadsAndFields.entrySet()) {
for (final TermsHashConsumerPerField field : entry.getValue() ) {
TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField) field;
perField.termsHashPerField.reset();
perField.shrinkHash();
}
TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread) entry.getKey();
perThread.termsHashPerThread.reset(true);
}
}
@Override
synchronized void closeDocStore(final SegmentWriteState state) throws IOException {
if (tvx != null) {
// At least one doc in this run had term vectors
// enabled
fill(state.numDocsInStore - docWriter.getDocStoreOffset());
tvx.close();
tvf.close();
tvd.close();
tvx = null;
assert state.docStoreSegmentName != null;
final String fileName = state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION;
if (4+((long) state.numDocsInStore)*16 != state.directory.fileLength(fileName))
throw new RuntimeException("after flush: tvx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(fileName) + " length in bytes of " + fileName + " file exists?=" + state.directory.fileExists(fileName));
state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
lastDocID = 0;
}
}
int allocCount;
synchronized PerDoc getPerDoc() {
if (freeCount == 0) {
allocCount++;
if (allocCount > docFreeList.length) {
// Grow our free list up front to make sure we have
// enough space to recycle all outstanding PerDoc
// instances
assert allocCount == 1+docFreeList.length;
docFreeList = new PerDoc[ArrayUtil.getNextSize(allocCount)];
}
return new PerDoc();
} else
return docFreeList[--freeCount];
}
/** Fills in no-term-vectors for all docs we haven't seen
* since the last doc that had term vectors. */
void fill(int docID) throws IOException {
final int docStoreOffset = docWriter.getDocStoreOffset();
final int end = docID+docStoreOffset;
if (lastDocID < end) {
final long tvfPosition = tvf.getFilePointer();
while(lastDocID < end) {
tvx.writeLong(tvd.getFilePointer());
tvd.writeVInt(0);
tvx.writeLong(tvfPosition);
lastDocID++;
}
}
}
synchronized void initTermVectorsWriter() throws IOException {
if (tvx == null) {
final String docStoreSegment = docWriter.getDocStoreSegment();
if (docStoreSegment == null)
return;
assert docStoreSegment != null;
// If we hit an exception while init'ing the term
// vector output files, we must abort this segment
// because those files will be in an unknown
// state:
tvx = docWriter.directory.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
tvd = docWriter.directory.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
tvf = docWriter.directory.createOutput(docStoreSegment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
tvx.writeInt(TermVectorsReader.FORMAT_CURRENT);
tvd.writeInt(TermVectorsReader.FORMAT_CURRENT);
tvf.writeInt(TermVectorsReader.FORMAT_CURRENT);
docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION);
docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.VECTORS_FIELDS_EXTENSION);
docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.VECTORS_DOCUMENTS_EXTENSION);
lastDocID = 0;
}
}
synchronized void finishDocument(PerDoc perDoc) throws IOException {
assert docWriter.writer.testPoint("TermVectorsTermsWriter.finishDocument start");
initTermVectorsWriter();
fill(perDoc.docID);
// Append term vectors to the real outputs:
tvx.writeLong(tvd.getFilePointer());
tvx.writeLong(tvf.getFilePointer());
tvd.writeVInt(perDoc.numVectorFields);
if (perDoc.numVectorFields > 0) {
for(int i=0;i<perDoc.numVectorFields;i++)
tvd.writeVInt(perDoc.fieldNumbers[i]);
assert 0 == perDoc.fieldPointers[0];
long lastPos = perDoc.fieldPointers[0];
for(int i=1;i<perDoc.numVectorFields;i++) {
long pos = perDoc.fieldPointers[i];
tvd.writeVLong(pos-lastPos);
lastPos = pos;
}
perDoc.perDocTvf.writeTo(tvf);
perDoc.numVectorFields = 0;
}
assert lastDocID == perDoc.docID + docWriter.getDocStoreOffset();
lastDocID++;
perDoc.reset();
free(perDoc);
assert docWriter.writer.testPoint("TermVectorsTermsWriter.finishDocument end");
}
public boolean freeRAM() {
// We don't hold any state beyond one doc, so we don't
// free persistent RAM here
return false;
}
@Override
public void abort() {
if (tvx != null) {
try {
tvx.close();
} catch (Throwable t) {
}
tvx = null;
}
if (tvd != null) {
try {
tvd.close();
} catch (Throwable t) {
}
tvd = null;
}
if (tvf != null) {
try {
tvf.close();
} catch (Throwable t) {
}
tvf = null;
}
lastDocID = 0;
}
synchronized void free(PerDoc doc) {
assert freeCount < docFreeList.length;
docFreeList[freeCount++] = doc;
}
class PerDoc extends DocumentsWriter.DocWriter {
final DocumentsWriter.PerDocBuffer buffer = docWriter.newPerDocBuffer();
RAMOutputStream perDocTvf = new RAMOutputStream(buffer);
int numVectorFields;
int[] fieldNumbers = new int[1];
long[] fieldPointers = new long[1];
void reset() {
perDocTvf.reset();
buffer.recycle();
numVectorFields = 0;
}
@Override
void abort() {
reset();
free(this);
}
void addField(final int fieldNumber) {
if (numVectorFields == fieldNumbers.length) {
fieldNumbers = ArrayUtil.grow(fieldNumbers);
fieldPointers = ArrayUtil.grow(fieldPointers);
}
fieldNumbers[numVectorFields] = fieldNumber;
fieldPointers[numVectorFields] = perDocTvf.getFilePointer();
numVectorFields++;
}
@Override
public long sizeInBytes() {
return buffer.getSizeInBytes();
}
@Override
public void finish() throws IOException {
finishDocument(this);
}
}
static final class PostingList extends RawPostingList {
int freq; // How many times this term occurred in the current doc
int lastOffset; // Last offset we saw
int lastPosition; // Last position where this term occurred
}
@Override
int bytesPerPosting() {
return RawPostingList.BYTES_SIZE + 3 * DocumentsWriter.INT_NUM_BYTE;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorsTermsWriter.java | Java | art | 9,815 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.search.FieldCache; // not great (circular); used only to purge FieldCache entry on close
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
/** A <code>FilterIndexReader</code> contains another IndexReader, which it
* uses as its basic source of data, possibly transforming the data along the
* way or providing additional functionality. The class
* <code>FilterIndexReader</code> itself simply implements all abstract methods
* of <code>IndexReader</code> with versions that pass all requests to the
* contained index reader. Subclasses of <code>FilterIndexReader</code> may
* further override some of these methods and may also provide additional
* methods and fields.
*/
public class FilterIndexReader extends IndexReader {
/** Base class for filtering {@link TermDocs} implementations. */
public static class FilterTermDocs implements TermDocs {
protected TermDocs in;
public FilterTermDocs(TermDocs in) { this.in = in; }
public void seek(Term term) throws IOException { in.seek(term); }
public void seek(TermEnum termEnum) throws IOException { in.seek(termEnum); }
public int doc() { return in.doc(); }
public int freq() { return in.freq(); }
public boolean next() throws IOException { return in.next(); }
public int read(int[] docs, int[] freqs) throws IOException {
return in.read(docs, freqs);
}
public boolean skipTo(int i) throws IOException { return in.skipTo(i); }
public void close() throws IOException { in.close(); }
}
/** Base class for filtering {@link TermPositions} implementations. */
public static class FilterTermPositions
extends FilterTermDocs implements TermPositions {
public FilterTermPositions(TermPositions in) { super(in); }
public int nextPosition() throws IOException {
return ((TermPositions) this.in).nextPosition();
}
public int getPayloadLength() {
return ((TermPositions) this.in).getPayloadLength();
}
public byte[] getPayload(byte[] data, int offset) throws IOException {
return ((TermPositions) this.in).getPayload(data, offset);
}
// TODO: Remove warning after API has been finalized
public boolean isPayloadAvailable() {
return ((TermPositions)this.in).isPayloadAvailable();
}
}
/** Base class for filtering {@link TermEnum} implementations. */
public static class FilterTermEnum extends TermEnum {
protected TermEnum in;
public FilterTermEnum(TermEnum in) { this.in = in; }
@Override
public boolean next() throws IOException { return in.next(); }
@Override
public Term term() { return in.term(); }
@Override
public int docFreq() { return in.docFreq(); }
@Override
public void close() throws IOException { in.close(); }
}
protected IndexReader in;
/**
* <p>Construct a FilterIndexReader based on the specified base reader.
* Directory locking for delete, undeleteAll, and setNorm operations is
* left to the base reader.</p>
* <p>Note that base reader is closed if this FilterIndexReader is closed.</p>
* @param in specified base reader.
*/
public FilterIndexReader(IndexReader in) {
super();
this.in = in;
}
@Override
public Directory directory() {
return in.directory();
}
@Override
public TermFreqVector[] getTermFreqVectors(int docNumber)
throws IOException {
ensureOpen();
return in.getTermFreqVectors(docNumber);
}
@Override
public TermFreqVector getTermFreqVector(int docNumber, String field)
throws IOException {
ensureOpen();
return in.getTermFreqVector(docNumber, field);
}
@Override
public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException {
ensureOpen();
in.getTermFreqVector(docNumber, field, mapper);
}
@Override
public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException {
ensureOpen();
in.getTermFreqVector(docNumber, mapper);
}
@Override
public int numDocs() {
// Don't call ensureOpen() here (it could affect performance)
return in.numDocs();
}
@Override
public int maxDoc() {
// Don't call ensureOpen() here (it could affect performance)
return in.maxDoc();
}
@Override
public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
ensureOpen();
return in.document(n, fieldSelector);
}
@Override
public boolean isDeleted(int n) {
// Don't call ensureOpen() here (it could affect performance)
return in.isDeleted(n);
}
@Override
public boolean hasDeletions() {
// Don't call ensureOpen() here (it could affect performance)
return in.hasDeletions();
}
@Override
protected void doUndeleteAll() throws CorruptIndexException, IOException {in.undeleteAll();}
@Override
public boolean hasNorms(String field) throws IOException {
ensureOpen();
return in.hasNorms(field);
}
@Override
public byte[] norms(String f) throws IOException {
ensureOpen();
return in.norms(f);
}
@Override
public void norms(String f, byte[] bytes, int offset) throws IOException {
ensureOpen();
in.norms(f, bytes, offset);
}
@Override
protected void doSetNorm(int d, String f, byte b) throws CorruptIndexException, IOException {
in.setNorm(d, f, b);
}
@Override
public TermEnum terms() throws IOException {
ensureOpen();
return in.terms();
}
@Override
public TermEnum terms(Term t) throws IOException {
ensureOpen();
return in.terms(t);
}
@Override
public int docFreq(Term t) throws IOException {
ensureOpen();
return in.docFreq(t);
}
@Override
public TermDocs termDocs() throws IOException {
ensureOpen();
return in.termDocs();
}
@Override
public TermDocs termDocs(Term term) throws IOException {
ensureOpen();
return in.termDocs(term);
}
@Override
public TermPositions termPositions() throws IOException {
ensureOpen();
return in.termPositions();
}
@Override
protected void doDelete(int n) throws CorruptIndexException, IOException { in.deleteDocument(n); }
@Override
protected void doCommit(Map<String,String> commitUserData) throws IOException { in.commit(commitUserData); }
@Override
protected void doClose() throws IOException {
in.close();
// NOTE: only needed in case someone had asked for
// FieldCache for top-level reader (which is generally
// not a good idea):
FieldCache.DEFAULT.purge(this);
}
@Override
public Collection<String> getFieldNames(IndexReader.FieldOption fieldNames) {
ensureOpen();
return in.getFieldNames(fieldNames);
}
@Override
public long getVersion() {
ensureOpen();
return in.getVersion();
}
@Override
public boolean isCurrent() throws CorruptIndexException, IOException {
ensureOpen();
return in.isCurrent();
}
@Override
public boolean isOptimized() {
ensureOpen();
return in.isOptimized();
}
@Override
public IndexReader[] getSequentialSubReaders() {
return in.getSequentialSubReaders();
}
/** If the subclass of FilteredIndexReader modifies the
* contents of the FieldCache, you must override this
* method to provide a different key */
@Override
public Object getFieldCacheKey() {
return in.getFieldCacheKey();
}
/** If the subclass of FilteredIndexReader modifies the
* deleted docs, you must override this method to provide
* a different key */
@Override
public Object getDeletesCacheKey() {
return in.getDeletesCacheKey();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FilterIndexReader.java | Java | art | 8,668 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.store.IndexInput;
/**
* Implements the skip list reader for the default posting list format
* that stores positions and payloads.
*
*/
class DefaultSkipListReader extends MultiLevelSkipListReader {
private boolean currentFieldStoresPayloads;
private long freqPointer[];
private long proxPointer[];
private int payloadLength[];
private long lastFreqPointer;
private long lastProxPointer;
private int lastPayloadLength;
DefaultSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) {
super(skipStream, maxSkipLevels, skipInterval);
freqPointer = new long[maxSkipLevels];
proxPointer = new long[maxSkipLevels];
payloadLength = new int[maxSkipLevels];
}
void init(long skipPointer, long freqBasePointer, long proxBasePointer, int df, boolean storesPayloads) {
super.init(skipPointer, df);
this.currentFieldStoresPayloads = storesPayloads;
lastFreqPointer = freqBasePointer;
lastProxPointer = proxBasePointer;
Arrays.fill(freqPointer, freqBasePointer);
Arrays.fill(proxPointer, proxBasePointer);
Arrays.fill(payloadLength, 0);
}
/** Returns the freq pointer of the doc to which the last call of
* {@link MultiLevelSkipListReader#skipTo(int)} has skipped. */
long getFreqPointer() {
return lastFreqPointer;
}
/** Returns the prox pointer of the doc to which the last call of
* {@link MultiLevelSkipListReader#skipTo(int)} has skipped. */
long getProxPointer() {
return lastProxPointer;
}
/** Returns the payload length of the payload stored just before
* the doc to which the last call of {@link MultiLevelSkipListReader#skipTo(int)}
* has skipped. */
int getPayloadLength() {
return lastPayloadLength;
}
@Override
protected void seekChild(int level) throws IOException {
super.seekChild(level);
freqPointer[level] = lastFreqPointer;
proxPointer[level] = lastProxPointer;
payloadLength[level] = lastPayloadLength;
}
@Override
protected void setLastSkipData(int level) {
super.setLastSkipData(level);
lastFreqPointer = freqPointer[level];
lastProxPointer = proxPointer[level];
lastPayloadLength = payloadLength[level];
}
@Override
protected int readSkipData(int level, IndexInput skipStream) throws IOException {
int delta;
if (currentFieldStoresPayloads) {
// the current field stores payloads.
// if the doc delta is odd then we have
// to read the current payload length
// because it differs from the length of the
// previous payload
delta = skipStream.readVInt();
if ((delta & 1) != 0) {
payloadLength[level] = skipStream.readVInt();
}
delta >>>= 1;
} else {
delta = skipStream.readVInt();
}
freqPointer[level] += skipStream.readVInt();
proxPointer[level] += skipStream.readVInt();
return delta;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DefaultSkipListReader.java | Java | art | 3,856 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RAMOutputStream;
/**
* This abstract class writes skip lists with multiple levels.
*
* Example for skipInterval = 3:
* c (skip level 2)
* c c c (skip level 1)
* x x x x x x x x x x (skip level 0)
* d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
* 3 6 9 12 15 18 21 24 27 30 (df)
*
* d - document
* x - skip data
* c - skip data with child pointer
*
* Skip level i contains every skipInterval-th entry from skip level i-1.
* Therefore the number of entries on level i is: floor(df / ((skipInterval ^ (i + 1))).
*
* Each skip entry on a level i>0 contains a pointer to the corresponding skip entry in list i-1.
* This guarantees a logarithmic amount of skips to find the target document.
*
* While this class takes care of writing the different skip levels,
* subclasses must define the actual format of the skip data.
*
*/
abstract class MultiLevelSkipListWriter {
// number of levels in this skip list
private int numberOfSkipLevels;
// the skip interval in the list with level = 0
private int skipInterval;
// for every skip level a different buffer is used
private RAMOutputStream[] skipBuffer;
protected MultiLevelSkipListWriter(int skipInterval, int maxSkipLevels, int df) {
this.skipInterval = skipInterval;
// calculate the maximum number of skip levels for this document frequency
numberOfSkipLevels = df == 0 ? 0 : (int) Math.floor(Math.log(df) / Math.log(skipInterval));
// make sure it does not exceed maxSkipLevels
if (numberOfSkipLevels > maxSkipLevels) {
numberOfSkipLevels = maxSkipLevels;
}
}
protected void init() {
skipBuffer = new RAMOutputStream[numberOfSkipLevels];
for (int i = 0; i < numberOfSkipLevels; i++) {
skipBuffer[i] = new RAMOutputStream();
}
}
protected void resetSkip() {
// creates new buffers or empties the existing ones
if (skipBuffer == null) {
init();
} else {
for (int i = 0; i < skipBuffer.length; i++) {
skipBuffer[i].reset();
}
}
}
/**
* Subclasses must implement the actual skip data encoding in this method.
*
* @param level the level skip data shall be writing for
* @param skipBuffer the skip buffer to write to
*/
protected abstract void writeSkipData(int level, IndexOutput skipBuffer) throws IOException;
/**
* Writes the current skip data to the buffers. The current document frequency determines
* the max level is skip data is to be written to.
*
* @param df the current document frequency
* @throws IOException
*/
void bufferSkip(int df) throws IOException {
int numLevels;
// determine max level
for (numLevels = 0; (df % skipInterval) == 0 && numLevels < numberOfSkipLevels; df /= skipInterval) {
numLevels++;
}
long childPointer = 0;
for (int level = 0; level < numLevels; level++) {
writeSkipData(level, skipBuffer[level]);
long newChildPointer = skipBuffer[level].getFilePointer();
if (level != 0) {
// store child pointers for all levels except the lowest
skipBuffer[level].writeVLong(childPointer);
}
//remember the childPointer for the next level
childPointer = newChildPointer;
}
}
/**
* Writes the buffered skip lists to the given output.
*
* @param output the IndexOutput the skip lists shall be written to
* @return the pointer the skip list starts
*/
long writeSkip(IndexOutput output) throws IOException {
long skipPointer = output.getFilePointer();
if (skipBuffer == null || skipBuffer.length == 0) return skipPointer;
for (int level = numberOfSkipLevels - 1; level > 0; level--) {
long length = skipBuffer[level].getFilePointer();
if (length > 0) {
output.writeVLong(length);
skipBuffer[level].writeTo(output);
}
}
skipBuffer[0].writeTo(output);
return skipPointer;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MultiLevelSkipListWriter.java | Java | art | 5,159 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** <p>Expert: {@link IndexWriter} uses an instance
* implementing this interface to execute the merges
* selected by a {@link MergePolicy}. The default
* MergeScheduler is {@link ConcurrentMergeScheduler}.</p>
*
* <p><b>NOTE:</b> This API is new and still experimental
* (subject to change suddenly in the next release)</p>
*
* <p><b>NOTE</b>: This class typically requires access to
* package-private APIs (eg, SegmentInfos) to do its job;
* if you implement your own MergePolicy, you'll need to put
* it in package org.apache.lucene.index in order to use
* these APIs.
*/
public abstract class MergeScheduler {
/** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
abstract void merge(IndexWriter writer)
throws CorruptIndexException, IOException;
/** Close this MergeScheduler. */
abstract void close()
throws CorruptIndexException, IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MergeScheduler.java | Java | art | 1,770 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
// TODO: break into separate freq and prox writers as
// codecs; make separate container (tii/tis/skip/*) that can
// be configured as any number of files 1..N
final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implements Comparable<FreqProxTermsWriterPerField> {
final FreqProxTermsWriterPerThread perThread;
final TermsHashPerField termsHashPerField;
final FieldInfo fieldInfo;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
boolean omitTermFreqAndPositions;
PayloadAttribute payloadAttribute;
public FreqProxTermsWriterPerField(TermsHashPerField termsHashPerField, FreqProxTermsWriterPerThread perThread, FieldInfo fieldInfo) {
this.termsHashPerField = termsHashPerField;
this.perThread = perThread;
this.fieldInfo = fieldInfo;
docState = termsHashPerField.docState;
fieldState = termsHashPerField.fieldState;
omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
}
@Override
int getStreamCount() {
if (fieldInfo.omitTermFreqAndPositions)
return 1;
else
return 2;
}
@Override
void finish() {}
boolean hasPayloads;
@Override
void skippingLongTerm() throws IOException {}
public int compareTo(FreqProxTermsWriterPerField other) {
return fieldInfo.name.compareTo(other.fieldInfo.name);
}
void reset() {
// Record, up front, whether our in-RAM format will be
// with or without term freqs:
omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
payloadAttribute = null;
}
@Override
boolean start(Fieldable[] fields, int count) {
for(int i=0;i<count;i++)
if (fields[i].isIndexed())
return true;
return false;
}
@Override
void start(Fieldable f) {
if (fieldState.attributeSource.hasAttribute(PayloadAttribute.class)) {
payloadAttribute = fieldState.attributeSource.getAttribute(PayloadAttribute.class);
} else {
payloadAttribute = null;
}
}
final void writeProx(FreqProxTermsWriter.PostingList p, int proxCode) {
final Payload payload;
if (payloadAttribute == null) {
payload = null;
} else {
payload = payloadAttribute.getPayload();
}
if (payload != null && payload.length > 0) {
termsHashPerField.writeVInt(1, (proxCode<<1)|1);
termsHashPerField.writeVInt(1, payload.length);
termsHashPerField.writeBytes(1, payload.data, payload.offset, payload.length);
hasPayloads = true;
} else
termsHashPerField.writeVInt(1, proxCode<<1);
p.lastPosition = fieldState.position;
}
@Override
final void newTerm(RawPostingList p0) {
// First time we're seeing this term since the last
// flush
assert docState.testPoint("FreqProxTermsWriterPerField.newTerm start");
FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
p.lastDocID = docState.docID;
if (omitTermFreqAndPositions) {
p.lastDocCode = docState.docID;
} else {
p.lastDocCode = docState.docID << 1;
p.docFreq = 1;
writeProx(p, fieldState.position);
}
}
@Override
final void addTerm(RawPostingList p0) {
assert docState.testPoint("FreqProxTermsWriterPerField.addTerm start");
FreqProxTermsWriter.PostingList p = (FreqProxTermsWriter.PostingList) p0;
assert omitTermFreqAndPositions || p.docFreq > 0;
if (omitTermFreqAndPositions) {
if (docState.docID != p.lastDocID) {
assert docState.docID > p.lastDocID;
termsHashPerField.writeVInt(0, p.lastDocCode);
p.lastDocCode = docState.docID - p.lastDocID;
p.lastDocID = docState.docID;
}
} else {
if (docState.docID != p.lastDocID) {
assert docState.docID > p.lastDocID;
// Term not yet seen in the current doc but previously
// seen in other doc(s) since the last flush
// Now that we know doc freq for previous doc,
// write it & lastDocCode
if (1 == p.docFreq)
termsHashPerField.writeVInt(0, p.lastDocCode|1);
else {
termsHashPerField.writeVInt(0, p.lastDocCode);
termsHashPerField.writeVInt(0, p.docFreq);
}
p.docFreq = 1;
p.lastDocCode = (docState.docID - p.lastDocID) << 1;
p.lastDocID = docState.docID;
writeProx(p, fieldState.position);
} else {
p.docFreq++;
writeProx(p, fieldState.position-p.lastPosition);
}
}
}
public void abort() {}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java | Java | art | 5,477 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.BitVector;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.HashMap;
import java.util.ArrayList;
import java.util.Collections;
/**
* Information about a segment such as it's name, directory, and files related
* to the segment.
*
* * <p><b>NOTE:</b> This API is new and still experimental
* (subject to change suddenly in the next release)</p>
*/
public final class SegmentInfo {
static final int NO = -1; // e.g. no norms; no deletes;
static final int YES = 1; // e.g. have norms; have deletes;
static final int CHECK_DIR = 0; // e.g. must check dir to see if there are norms/deletions
static final int WITHOUT_GEN = 0; // a file name that has no GEN in it.
public String name; // unique name in dir
public int docCount; // number of docs in seg
public Directory dir; // where segment resides
private boolean preLockless; // true if this is a segments file written before
// lock-less commits (2.1)
private long delGen; // current generation of del file; NO if there
// are no deletes; CHECK_DIR if it's a pre-2.1 segment
// (and we must check filesystem); YES or higher if
// there are deletes at generation N
private long[] normGen; // current generation of each field's norm file.
// If this array is null, for lockLess this means no
// separate norms. For preLockLess this means we must
// check filesystem. If this array is not null, its
// values mean: NO says this field has no separate
// norms; CHECK_DIR says it is a preLockLess segment and
// filesystem must be checked; >= YES says this field
// has separate norms with the specified generation
private byte isCompoundFile; // NO if it is not; YES if it is; CHECK_DIR if it's
// pre-2.1 (ie, must check file system to see
// if <name>.cfs and <name>.nrm exist)
private boolean hasSingleNormFile; // true if this segment maintains norms in a single file;
// false otherwise
// this is currently false for segments populated by DocumentWriter
// and true for newly created merged segments (both
// compound and non compound).
private List<String> files; // cached list of files that this segment uses
// in the Directory
long sizeInBytes = -1; // total byte size of all of our files (computed on demand)
private int docStoreOffset; // if this segment shares stored fields & vectors, this
// offset is where in that file this segment's docs begin
private String docStoreSegment; // name used to derive fields/vectors file we share with
// other segments
private boolean docStoreIsCompoundFile; // whether doc store files are stored in compound file (*.cfx)
private int delCount; // How many deleted docs in this segment, or -1 if not yet known
// (if it's an older index)
private boolean hasProx; // True if this segment has any fields with omitTermFreqAndPositions==false
private Map<String,String> diagnostics;
@Override
public String toString() {
return "si: "+dir.toString()+" "+name+" docCount: "+docCount+" delCount: "+delCount+" delFileName: "+getDelFileName();
}
public SegmentInfo(String name, int docCount, Directory dir) {
this.name = name;
this.docCount = docCount;
this.dir = dir;
delGen = NO;
isCompoundFile = CHECK_DIR;
preLockless = true;
hasSingleNormFile = false;
docStoreOffset = -1;
docStoreSegment = name;
docStoreIsCompoundFile = false;
delCount = 0;
hasProx = true;
}
public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, boolean hasSingleNormFile) {
this(name, docCount, dir, isCompoundFile, hasSingleNormFile, -1, null, false, true);
}
public SegmentInfo(String name, int docCount, Directory dir, boolean isCompoundFile, boolean hasSingleNormFile,
int docStoreOffset, String docStoreSegment, boolean docStoreIsCompoundFile, boolean hasProx) {
this(name, docCount, dir);
this.isCompoundFile = (byte) (isCompoundFile ? YES : NO);
this.hasSingleNormFile = hasSingleNormFile;
preLockless = false;
this.docStoreOffset = docStoreOffset;
this.docStoreSegment = docStoreSegment;
this.docStoreIsCompoundFile = docStoreIsCompoundFile;
this.hasProx = hasProx;
delCount = 0;
assert docStoreOffset == -1 || docStoreSegment != null: "dso=" + docStoreOffset + " dss=" + docStoreSegment + " docCount=" + docCount;
}
/**
* Copy everything from src SegmentInfo into our instance.
*/
void reset(SegmentInfo src) {
clearFiles();
name = src.name;
docCount = src.docCount;
dir = src.dir;
preLockless = src.preLockless;
delGen = src.delGen;
docStoreOffset = src.docStoreOffset;
docStoreIsCompoundFile = src.docStoreIsCompoundFile;
if (src.normGen == null) {
normGen = null;
} else {
normGen = new long[src.normGen.length];
System.arraycopy(src.normGen, 0, normGen, 0, src.normGen.length);
}
isCompoundFile = src.isCompoundFile;
hasSingleNormFile = src.hasSingleNormFile;
delCount = src.delCount;
}
void setDiagnostics(Map<String, String> diagnostics) {
this.diagnostics = diagnostics;
}
public Map<String, String> getDiagnostics() {
return diagnostics;
}
/**
* Construct a new SegmentInfo instance by reading a
* previously saved SegmentInfo from input.
*
* @param dir directory to load from
* @param format format of the segments info file
* @param input input handle to read segment info from
*/
SegmentInfo(Directory dir, int format, IndexInput input) throws IOException {
this.dir = dir;
name = input.readString();
docCount = input.readInt();
if (format <= SegmentInfos.FORMAT_LOCKLESS) {
delGen = input.readLong();
if (format <= SegmentInfos.FORMAT_SHARED_DOC_STORE) {
docStoreOffset = input.readInt();
if (docStoreOffset != -1) {
docStoreSegment = input.readString();
docStoreIsCompoundFile = (1 == input.readByte());
} else {
docStoreSegment = name;
docStoreIsCompoundFile = false;
}
} else {
docStoreOffset = -1;
docStoreSegment = name;
docStoreIsCompoundFile = false;
}
if (format <= SegmentInfos.FORMAT_SINGLE_NORM_FILE) {
hasSingleNormFile = (1 == input.readByte());
} else {
hasSingleNormFile = false;
}
int numNormGen = input.readInt();
if (numNormGen == NO) {
normGen = null;
} else {
normGen = new long[numNormGen];
for(int j=0;j<numNormGen;j++) {
normGen[j] = input.readLong();
}
}
isCompoundFile = input.readByte();
preLockless = (isCompoundFile == CHECK_DIR);
if (format <= SegmentInfos.FORMAT_DEL_COUNT) {
delCount = input.readInt();
assert delCount <= docCount;
} else
delCount = -1;
if (format <= SegmentInfos.FORMAT_HAS_PROX)
hasProx = input.readByte() == 1;
else
hasProx = true;
if (format <= SegmentInfos.FORMAT_DIAGNOSTICS) {
diagnostics = input.readStringStringMap();
} else {
diagnostics = Collections.<String,String>emptyMap();
}
} else {
delGen = CHECK_DIR;
normGen = null;
isCompoundFile = CHECK_DIR;
preLockless = true;
hasSingleNormFile = false;
docStoreOffset = -1;
docStoreIsCompoundFile = false;
docStoreSegment = null;
delCount = -1;
hasProx = true;
diagnostics = Collections.<String,String>emptyMap();
}
}
void setNumFields(int numFields) {
if (normGen == null) {
// normGen is null if we loaded a pre-2.1 segment
// file, or, if this segments file hasn't had any
// norms set against it yet:
normGen = new long[numFields];
if (preLockless) {
// Do nothing: thus leaving normGen[k]==CHECK_DIR (==0), so that later we know
// we have to check filesystem for norm files, because this is prelockless.
} else {
// This is a FORMAT_LOCKLESS segment, which means
// there are no separate norms:
for(int i=0;i<numFields;i++) {
normGen[i] = NO;
}
}
}
}
/** Returns total size in bytes of all of files used by
* this segment. */
public long sizeInBytes() throws IOException {
if (sizeInBytes == -1) {
List<String> files = files();
final int size = files.size();
sizeInBytes = 0;
for(int i=0;i<size;i++) {
final String fileName = files.get(i);
// We don't count bytes used by a shared doc store
// against this segment:
if (docStoreOffset == -1 || !IndexFileNames.isDocStoreFile(fileName))
sizeInBytes += dir.fileLength(fileName);
}
}
return sizeInBytes;
}
public boolean hasDeletions()
throws IOException {
// Cases:
//
// delGen == NO: this means this segment was written
// by the LOCKLESS code and for certain does not have
// deletions yet
//
// delGen == CHECK_DIR: this means this segment was written by
// pre-LOCKLESS code which means we must check
// directory to see if .del file exists
//
// delGen >= YES: this means this segment was written by
// the LOCKLESS code and for certain has
// deletions
//
if (delGen == NO) {
return false;
} else if (delGen >= YES) {
return true;
} else {
return dir.fileExists(getDelFileName());
}
}
void advanceDelGen() {
// delGen 0 is reserved for pre-LOCKLESS format
if (delGen == NO) {
delGen = YES;
} else {
delGen++;
}
clearFiles();
}
void clearDelGen() {
delGen = NO;
clearFiles();
}
@Override
public Object clone () {
SegmentInfo si = new SegmentInfo(name, docCount, dir);
si.isCompoundFile = isCompoundFile;
si.delGen = delGen;
si.delCount = delCount;
si.hasProx = hasProx;
si.preLockless = preLockless;
si.hasSingleNormFile = hasSingleNormFile;
si.diagnostics = new HashMap<String, String>(diagnostics);
if (normGen != null) {
si.normGen = (long[]) normGen.clone();
}
si.docStoreOffset = docStoreOffset;
si.docStoreSegment = docStoreSegment;
si.docStoreIsCompoundFile = docStoreIsCompoundFile;
return si;
}
public String getDelFileName() {
if (delGen == NO) {
// In this case we know there is no deletion filename
// against this segment
return null;
} else {
// If delGen is CHECK_DIR, it's the pre-lockless-commit file format
return IndexFileNames.fileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
}
}
/**
* Returns true if this field for this segment has saved a separate norms file (_<segment>_N.sX).
*
* @param fieldNumber the field index to check
*/
public boolean hasSeparateNorms(int fieldNumber)
throws IOException {
if ((normGen == null && preLockless) || (normGen != null && normGen[fieldNumber] == CHECK_DIR)) {
// Must fallback to directory file exists check:
String fileName = name + ".s" + fieldNumber;
return dir.fileExists(fileName);
} else if (normGen == null || normGen[fieldNumber] == NO) {
return false;
} else {
return true;
}
}
/**
* Returns true if any fields in this segment have separate norms.
*/
public boolean hasSeparateNorms()
throws IOException {
if (normGen == null) {
if (!preLockless) {
// This means we were created w/ LOCKLESS code and no
// norms are written yet:
return false;
} else {
// This means this segment was saved with pre-LOCKLESS
// code. So we must fallback to the original
// directory list check:
String[] result = dir.listAll();
if (result == null)
throw new IOException("cannot read directory " + dir + ": listAll() returned null");
final IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
String pattern;
pattern = name + ".s";
int patternLength = pattern.length();
for(int i = 0; i < result.length; i++){
String fileName = result[i];
if (filter.accept(null, fileName) && fileName.startsWith(pattern) && Character.isDigit(fileName.charAt(patternLength)))
return true;
}
return false;
}
} else {
// This means this segment was saved with LOCKLESS
// code so we first check whether any normGen's are >= 1
// (meaning they definitely have separate norms):
for(int i=0;i<normGen.length;i++) {
if (normGen[i] >= YES) {
return true;
}
}
// Next we look for any == 0. These cases were
// pre-LOCKLESS and must be checked in directory:
for(int i=0;i<normGen.length;i++) {
if (normGen[i] == CHECK_DIR) {
if (hasSeparateNorms(i)) {
return true;
}
}
}
}
return false;
}
/**
* Increment the generation count for the norms file for
* this field.
*
* @param fieldIndex field whose norm file will be rewritten
*/
void advanceNormGen(int fieldIndex) {
if (normGen[fieldIndex] == NO) {
normGen[fieldIndex] = YES;
} else {
normGen[fieldIndex]++;
}
clearFiles();
}
/**
* Get the file name for the norms file for this field.
*
* @param number field index
*/
public String getNormFileName(int number) throws IOException {
String prefix;
long gen;
if (normGen == null) {
gen = CHECK_DIR;
} else {
gen = normGen[number];
}
if (hasSeparateNorms(number)) {
// case 1: separate norm
prefix = ".s";
return IndexFileNames.fileNameFromGeneration(name, prefix + number, gen);
}
if (hasSingleNormFile) {
// case 2: lockless (or nrm file exists) - single file for all norms
prefix = "." + IndexFileNames.NORMS_EXTENSION;
return IndexFileNames.fileNameFromGeneration(name, prefix, WITHOUT_GEN);
}
// case 3: norm file for each field
prefix = ".f";
return IndexFileNames.fileNameFromGeneration(name, prefix + number, WITHOUT_GEN);
}
/**
* Mark whether this segment is stored as a compound file.
*
* @param isCompoundFile true if this is a compound file;
* else, false
*/
void setUseCompoundFile(boolean isCompoundFile) {
if (isCompoundFile) {
this.isCompoundFile = YES;
} else {
this.isCompoundFile = NO;
}
clearFiles();
}
/**
* Returns true if this segment is stored as a compound
* file; else, false.
*/
public boolean getUseCompoundFile() throws IOException {
if (isCompoundFile == NO) {
return false;
} else if (isCompoundFile == YES) {
return true;
} else {
return dir.fileExists(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
}
}
public int getDelCount() throws IOException {
if (delCount == -1) {
if (hasDeletions()) {
final String delFileName = getDelFileName();
delCount = new BitVector(dir, delFileName).count();
} else
delCount = 0;
}
assert delCount <= docCount;
return delCount;
}
void setDelCount(int delCount) {
this.delCount = delCount;
assert delCount <= docCount;
}
public int getDocStoreOffset() {
return docStoreOffset;
}
public boolean getDocStoreIsCompoundFile() {
return docStoreIsCompoundFile;
}
void setDocStoreIsCompoundFile(boolean v) {
docStoreIsCompoundFile = v;
clearFiles();
}
public String getDocStoreSegment() {
return docStoreSegment;
}
void setDocStoreOffset(int offset) {
docStoreOffset = offset;
clearFiles();
}
void setDocStore(int offset, String segment, boolean isCompoundFile) {
docStoreOffset = offset;
docStoreSegment = segment;
docStoreIsCompoundFile = isCompoundFile;
}
/**
* Save this segment's info.
*/
void write(IndexOutput output)
throws IOException {
output.writeString(name);
output.writeInt(docCount);
output.writeLong(delGen);
output.writeInt(docStoreOffset);
if (docStoreOffset != -1) {
output.writeString(docStoreSegment);
output.writeByte((byte) (docStoreIsCompoundFile ? 1:0));
}
output.writeByte((byte) (hasSingleNormFile ? 1:0));
if (normGen == null) {
output.writeInt(NO);
} else {
output.writeInt(normGen.length);
for(int j = 0; j < normGen.length; j++) {
output.writeLong(normGen[j]);
}
}
output.writeByte(isCompoundFile);
output.writeInt(delCount);
output.writeByte((byte) (hasProx ? 1:0));
output.writeStringStringMap(diagnostics);
}
void setHasProx(boolean hasProx) {
this.hasProx = hasProx;
clearFiles();
}
public boolean getHasProx() {
return hasProx;
}
private void addIfExists(List<String> files, String fileName) throws IOException {
if (dir.fileExists(fileName))
files.add(fileName);
}
/*
* Return all files referenced by this SegmentInfo. The
* returns List is a locally cached List so you should not
* modify it.
*/
public List<String> files() throws IOException {
if (files != null) {
// Already cached:
return files;
}
files = new ArrayList<String>();
boolean useCompoundFile = getUseCompoundFile();
if (useCompoundFile) {
files.add(name + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
} else {
final String[] exts = IndexFileNames.NON_STORE_INDEX_EXTENSIONS;
for(int i=0;i<exts.length;i++)
addIfExists(files, name + "." + exts[i]);
}
if (docStoreOffset != -1) {
// We are sharing doc stores (stored fields, term
// vectors) with other segments
assert docStoreSegment != null;
if (docStoreIsCompoundFile) {
files.add(docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
} else {
final String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
for(int i=0;i<exts.length;i++)
addIfExists(files, docStoreSegment + "." + exts[i]);
}
} else if (!useCompoundFile) {
// We are not sharing, and, these files were not
// included in the compound file
final String[] exts = IndexFileNames.STORE_INDEX_EXTENSIONS;
for(int i=0;i<exts.length;i++)
addIfExists(files, name + "." + exts[i]);
}
String delFileName = IndexFileNames.fileNameFromGeneration(name, "." + IndexFileNames.DELETES_EXTENSION, delGen);
if (delFileName != null && (delGen >= YES || dir.fileExists(delFileName))) {
files.add(delFileName);
}
// Careful logic for norms files
if (normGen != null) {
for(int i=0;i<normGen.length;i++) {
long gen = normGen[i];
if (gen >= YES) {
// Definitely a separate norm file, with generation:
files.add(IndexFileNames.fileNameFromGeneration(name, "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i, gen));
} else if (NO == gen) {
// No separate norms but maybe plain norms
// in the non compound file case:
if (!hasSingleNormFile && !useCompoundFile) {
String fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
if (dir.fileExists(fileName)) {
files.add(fileName);
}
}
} else if (CHECK_DIR == gen) {
// Pre-2.1: we have to check file existence
String fileName = null;
if (useCompoundFile) {
fileName = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION + i;
} else if (!hasSingleNormFile) {
fileName = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION + i;
}
if (fileName != null && dir.fileExists(fileName)) {
files.add(fileName);
}
}
}
} else if (preLockless || (!hasSingleNormFile && !useCompoundFile)) {
// Pre-2.1: we have to scan the dir to find all
// matching _X.sN/_X.fN files for our segment:
String prefix;
if (useCompoundFile)
prefix = name + "." + IndexFileNames.SEPARATE_NORMS_EXTENSION;
else
prefix = name + "." + IndexFileNames.PLAIN_NORMS_EXTENSION;
int prefixLength = prefix.length();
String[] allFiles = dir.listAll();
final IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
for(int i=0;i<allFiles.length;i++) {
String fileName = allFiles[i];
if (filter.accept(null, fileName) && fileName.length() > prefixLength && Character.isDigit(fileName.charAt(prefixLength)) && fileName.startsWith(prefix)) {
files.add(fileName);
}
}
}
return files;
}
/* Called whenever any change is made that affects which
* files this segment has. */
private void clearFiles() {
files = null;
sizeInBytes = -1;
}
/** Used for debugging */
public String segString(Directory dir) {
String cfs;
try {
if (getUseCompoundFile())
cfs = "c";
else
cfs = "C";
} catch (IOException ioe) {
cfs = "?";
}
String docStore;
if (docStoreOffset != -1)
docStore = "->" + docStoreSegment;
else
docStore = "";
return name + ":" +
cfs +
(this.dir == dir ? "" : "x") +
docCount + docStore;
}
/** We consider another SegmentInfo instance equal if it
* has the same dir and same name. */
@Override
public boolean equals(Object obj) {
if (this == obj) return true;
if (obj instanceof SegmentInfo) {
final SegmentInfo other = (SegmentInfo) obj;
return other.dir == dir && other.name.equals(name);
} else {
return false;
}
}
@Override
public int hashCode() {
return dir.hashCode() + name.hashCode();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentInfo.java | Java | art | 24,326 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DocumentsWriter.IndexingChain;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.ThreadInterruptedException;
import java.io.IOException;
import java.io.Closeable;
import java.io.PrintStream;
import java.util.List;
import java.util.Collection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Set;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Iterator;
import java.util.Map;
/**
An <code>IndexWriter</code> creates and maintains an index.
<p>The <code>create</code> argument to the {@link
#IndexWriter(Directory, Analyzer, boolean, MaxFieldLength) constructor} determines
whether a new index is created, or whether an existing index is
opened. Note that you can open an index with <code>create=true</code>
even while readers are using the index. The old readers will
continue to search the "point in time" snapshot they had opened,
and won't see the newly created index until they re-open. There are
also {@link #IndexWriter(Directory, Analyzer, MaxFieldLength) constructors}
with no <code>create</code> argument which will create a new index
if there is not already an index at the provided path and otherwise
open the existing index.</p>
<p>In either case, documents are added with {@link #addDocument(Document)
addDocument} and removed with {@link #deleteDocuments(Term)} or {@link
#deleteDocuments(Query)}. A document can be updated with {@link
#updateDocument(Term, Document) updateDocument} (which just deletes
and then adds the entire document). When finished adding, deleting
and updating documents, {@link #close() close} should be called.</p>
<a name="flush"></a>
<p>These changes are buffered in memory and periodically
flushed to the {@link Directory} (during the above method
calls). A flush is triggered when there are enough
buffered deletes (see {@link #setMaxBufferedDeleteTerms})
or enough added documents since the last flush, whichever
is sooner. For the added documents, flushing is triggered
either by RAM usage of the documents (see {@link
#setRAMBufferSizeMB}) or the number of added documents.
The default is to flush when RAM usage hits 16 MB. For
best indexing speed you should flush by RAM usage with a
large RAM buffer. Note that flushing just moves the
internal buffered state in IndexWriter into the index, but
these changes are not visible to IndexReader until either
{@link #commit()} or {@link #close} is called. A flush may
also trigger one or more segment merges which by default
run with a background thread so as not to block the
addDocument calls (see <a href="#mergePolicy">below</a>
for changing the {@link MergeScheduler}).</p>
<p>If an index will not have more documents added for a while and optimal search
performance is desired, then either the full {@link #optimize() optimize}
method or partial {@link #optimize(int)} method should be
called before the index is closed.</p>
<p>Opening an <code>IndexWriter</code> creates a lock file for the directory in use. Trying to open
another <code>IndexWriter</code> on the same directory will lead to a
{@link LockObtainFailedException}. The {@link LockObtainFailedException}
is also thrown if an IndexReader on the same directory is used to delete documents
from the index.</p>
<a name="deletionPolicy"></a>
<p>Expert: <code>IndexWriter</code> allows an optional
{@link IndexDeletionPolicy} implementation to be
specified. You can use this to control when prior commits
are deleted from the index. The default policy is {@link
KeepOnlyLastCommitDeletionPolicy} which removes all prior
commits as soon as a new commit is done (this matches
behavior before 2.2). Creating your own policy can allow
you to explicitly keep previous "point in time" commits
alive in the index for some time, to allow readers to
refresh to the new commit without having the old commit
deleted out from under them. This is necessary on
filesystems like NFS that do not support "delete on last
close" semantics, which Lucene's "point in time" search
normally relies on. </p>
<a name="mergePolicy"></a> <p>Expert:
<code>IndexWriter</code> allows you to separately change
the {@link MergePolicy} and the {@link MergeScheduler}.
The {@link MergePolicy} is invoked whenever there are
changes to the segments in the index. Its role is to
select which merges to do, if any, and return a {@link
MergePolicy.MergeSpecification} describing the merges. It
also selects merges to do for optimize(). (The default is
{@link LogByteSizeMergePolicy}. Then, the {@link
MergeScheduler} is invoked with the requested merges and
it decides when and how to run the merges. The default is
{@link ConcurrentMergeScheduler}. </p>
<a name="OOME"></a><p><b>NOTE</b>: if you hit an
OutOfMemoryError then IndexWriter will quietly record this
fact and block all future segment commits. This is a
defensive measure in case any internal state (buffered
documents and deletions) were corrupted. Any subsequent
calls to {@link #commit()} will throw an
IllegalStateException. The only course of action is to
call {@link #close()}, which internally will call {@link
#rollback()}, to undo any changes to the index since the
last commit. You can also just call {@link #rollback()}
directly.</p>
<a name="thread-safety"></a><p><b>NOTE</b>: {@link
<code>IndexWriter</code>} instances are completely thread
safe, meaning multiple threads can call any of its
methods, concurrently. If your application requires
external synchronization, you should <b>not</b>
synchronize on the <code>IndexWriter</code> instance as
this may cause deadlock; use your own (non-Lucene) objects
instead. </p>
<p><b>NOTE</b>: If you call
<code>Thread.interrupt()</code> on a thread that's within
IndexWriter, IndexWriter will try to catch this (eg, if
it's in a wait() or Thread.sleep()), and will then throw
the unchecked exception {@link ThreadInterruptedException}
and <b>clear</b> the interrupt status on the thread.</p>
*/
/*
* Clarification: Check Points (and commits)
* IndexWriter writes new index files to the directory without writing a new segments_N
* file which references these new files. It also means that the state of
* the in memory SegmentInfos object is different than the most recent
* segments_N file written to the directory.
*
* Each time the SegmentInfos is changed, and matches the (possibly
* modified) directory files, we have a new "check point".
* If the modified/new SegmentInfos is written to disk - as a new
* (generation of) segments_N file - this check point is also an
* IndexCommit.
*
* A new checkpoint always replaces the previous checkpoint and
* becomes the new "front" of the index. This allows the IndexFileDeleter
* to delete files that are referenced only by stale checkpoints.
* (files that were created since the last commit, but are no longer
* referenced by the "front" of the index). For this, IndexFileDeleter
* keeps track of the last non commit checkpoint.
*/
public class IndexWriter implements Closeable {
/**
* Default value for the write lock timeout (1,000).
* @see #setDefaultWriteLockTimeout
*/
public static long WRITE_LOCK_TIMEOUT = 1000;
private long writeLockTimeout = WRITE_LOCK_TIMEOUT;
/**
* Name of the write lock in the index.
*/
public static final String WRITE_LOCK_NAME = "write.lock";
/**
* Value to denote a flush trigger is disabled
*/
public final static int DISABLE_AUTO_FLUSH = -1;
/**
* Disabled by default (because IndexWriter flushes by RAM usage
* by default). Change using {@link #setMaxBufferedDocs(int)}.
*/
public final static int DEFAULT_MAX_BUFFERED_DOCS = DISABLE_AUTO_FLUSH;
/**
* Default value is 16 MB (which means flush when buffered
* docs consume 16 MB RAM). Change using {@link #setRAMBufferSizeMB}.
*/
public final static double DEFAULT_RAM_BUFFER_SIZE_MB = 16.0;
/**
* Disabled by default (because IndexWriter flushes by RAM usage
* by default). Change using {@link #setMaxBufferedDeleteTerms(int)}.
*/
public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = DISABLE_AUTO_FLUSH;
/**
* Default value is 10,000. Change using {@link #setMaxFieldLength(int)}.
*/
public final static int DEFAULT_MAX_FIELD_LENGTH = 10000;
/**
* Default value is 128. Change using {@link #setTermIndexInterval(int)}.
*/
public final static int DEFAULT_TERM_INDEX_INTERVAL = 128;
/**
* Absolute hard maximum length for a term. If a term
* arrives from the analyzer longer than this length, it
* is skipped and a message is printed to infoStream, if
* set (see {@link #setInfoStream}).
*/
public final static int MAX_TERM_LENGTH = DocumentsWriter.MAX_TERM_LENGTH;
// The normal read buffer size defaults to 1024, but
// increasing this during merging seems to yield
// performance gains. However we don't want to increase
// it too much because there are quite a few
// BufferedIndexInputs created during merging. See
// LUCENE-888 for details.
private final static int MERGE_READ_BUFFER_SIZE = 4096;
// Used for printing messages
private static Object MESSAGE_ID_LOCK = new Object();
private static int MESSAGE_ID = 0;
private int messageID = -1;
volatile private boolean hitOOM;
private Directory directory; // where this index resides
private Analyzer analyzer; // how to analyze text
private Similarity similarity = Similarity.getDefault(); // how to normalize
private volatile long changeCount; // increments every time a change is completed
private long lastCommitChangeCount; // last changeCount that was committed
private SegmentInfos rollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
private HashMap<SegmentInfo,Integer> rollbackSegments;
volatile SegmentInfos pendingCommit; // set when a commit is pending (after prepareCommit() & before commit())
volatile long pendingCommitChangeCount;
private SegmentInfos localRollbackSegmentInfos; // segmentInfos we will fallback to if the commit fails
private int localFlushedDocCount; // saved docWriter.getFlushedDocCount during local transaction
private SegmentInfos segmentInfos = new SegmentInfos(); // the segments
private DocumentsWriter docWriter;
private IndexFileDeleter deleter;
private Set<SegmentInfo> segmentsToOptimize = new HashSet<SegmentInfo>(); // used by optimize to note those needing optimization
private Lock writeLock;
private int termIndexInterval = DEFAULT_TERM_INDEX_INTERVAL;
private boolean closed;
private boolean closing;
// Holds all SegmentInfo instances currently involved in
// merges
private HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
private MergePolicy mergePolicy = new LogByteSizeMergePolicy(this);
private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
private Set<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
private List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<MergePolicy.OneMerge>();
private long mergeGen;
private boolean stopMerges;
private int flushCount;
private int flushDeletesCount;
// Used to only allow one addIndexes to proceed at once
// TODO: use ReadWriteLock once we are on 5.0
private int readCount; // count of how many threads are holding read lock
private Thread writeThread; // non-null if any thread holds write lock
final ReaderPool readerPool = new ReaderPool();
private int upgradeCount;
private int readerTermsIndexDivisor = IndexReader.DEFAULT_TERMS_INDEX_DIVISOR;
// This is a "write once" variable (like the organic dye
// on a DVD-R that may or may not be heated by a laser and
// then cooled to permanently record the event): it's
// false, until getReader() is called for the first time,
// at which point it's switched to true and never changes
// back to false. Once this is true, we hold open and
// reuse SegmentReader instances internally for applying
// deletes, doing merges, and reopening near real-time
// readers.
private volatile boolean poolReaders;
/**
* Expert: returns a readonly reader, covering all
* committed as well as un-committed changes to the index.
* This provides "near real-time" searching, in that
* changes made during an IndexWriter session can be
* quickly made available for searching without closing
* the writer nor calling {@link #commit}.
*
* <p>Note that this is functionally equivalent to calling
* {#commit} and then using {@link IndexReader#open} to
* open a new reader. But the turarnound time of this
* method should be faster since it avoids the potentially
* costly {@link #commit}.</p>
*
* <p>You must close the {@link IndexReader} returned by
* this method once you are done using it.</p>
*
* <p>It's <i>near</i> real-time because there is no hard
* guarantee on how quickly you can get a new reader after
* making changes with IndexWriter. You'll have to
* experiment in your situation to determine if it's
* fast enough. As this is a new and experimental
* feature, please report back on your findings so we can
* learn, improve and iterate.</p>
*
* <p>The resulting reader supports {@link
* IndexReader#reopen}, but that call will simply forward
* back to this method (though this may change in the
* future).</p>
*
* <p>The very first time this method is called, this
* writer instance will make every effort to pool the
* readers that it opens for doing merges, applying
* deletes, etc. This means additional resources (RAM,
* file descriptors, CPU time) will be consumed.</p>
*
* <p>For lower latency on reopening a reader, you should
* call {@link #setMergedSegmentWarmer} to
* pre-warm a newly merged segment before it's committed
* to the index. This is important for minimizing
* index-to-search delay after a large merge. </p>
*
* <p>If an addIndexes* call is running in another thread,
* then this reader will only search those segments from
* the foreign index that have been successfully copied
* over, so far</p>.
*
* <p><b>NOTE</b>: Once the writer is closed, any
* outstanding readers may continue to be used. However,
* if you attempt to reopen any of those readers, you'll
* hit an {@link AlreadyClosedException}.</p>
*
* <p><b>NOTE:</b> This API is experimental and might
* change in incompatible ways in the next release.</p>
*
* @return IndexReader that covers entire index plus all
* changes made so far by this IndexWriter instance
*
* @throws IOException
*/
public IndexReader getReader() throws IOException {
return getReader(readerTermsIndexDivisor);
}
/** Expert: like {@link #getReader}, except you can
* specify which termInfosIndexDivisor should be used for
* any newly opened readers.
* @param termInfosIndexDivisor Subsamples which indexed
* terms are loaded into RAM. This has the same effect as {@link
* IndexWriter#setTermIndexInterval} except that setting
* must be done at indexing time while this setting can be
* set per reader. When set to N, then one in every
* N*termIndexInterval terms in the index is loaded into
* memory. By setting this to a value > 1 you can reduce
* memory usage, at the expense of higher latency when
* loading a TermInfo. The default value is 1. Set this
* to -1 to skip loading the terms index entirely. */
public IndexReader getReader(int termInfosIndexDivisor) throws IOException {
ensureOpen();
if (infoStream != null) {
message("flush at getReader");
}
// Do this up front before flushing so that the readers
// obtained during this flush are pooled, the first time
// this method is called:
poolReaders = true;
flush(true, true, false);
// Prevent segmentInfos from changing while opening the
// reader; in theory we could do similar retry logic,
// just like we do when loading segments_N
synchronized(this) {
applyDeletes();
return new ReadOnlyDirectoryReader(this, segmentInfos, termInfosIndexDivisor);
}
}
/** Holds shared SegmentReader instances. IndexWriter uses
* SegmentReaders for 1) applying deletes, 2) doing
* merges, 3) handing out a real-time reader. This pool
* reuses instances of the SegmentReaders in all these
* places if it is in "near real-time mode" (getReader()
* has been called on this instance). */
class ReaderPool {
private final Map<SegmentInfo,SegmentReader> readerMap = new HashMap<SegmentInfo,SegmentReader>();
/** Forcefully clear changes for the specified segments,
* and remove from the pool. This is called on successful merge. */
synchronized void clear(SegmentInfos infos) throws IOException {
if (infos == null) {
for (Map.Entry<SegmentInfo,SegmentReader> ent: readerMap.entrySet()) {
ent.getValue().hasChanges = false;
}
} else {
for (final SegmentInfo info: infos) {
if (readerMap.containsKey(info)) {
readerMap.get(info).hasChanges = false;
}
}
}
}
// used only by asserts
public synchronized boolean infoIsLive(SegmentInfo info) {
int idx = segmentInfos.indexOf(info);
assert idx != -1;
assert segmentInfos.get(idx) == info;
return true;
}
public synchronized SegmentInfo mapToLive(SegmentInfo info) {
int idx = segmentInfos.indexOf(info);
if (idx != -1) {
info = segmentInfos.get(idx);
}
return info;
}
/**
* Release the segment reader (i.e. decRef it and close if there
* are no more references.
* @param sr
* @throws IOException
*/
public synchronized void release(SegmentReader sr) throws IOException {
release(sr, false);
}
/**
* Release the segment reader (i.e. decRef it and close if there
* are no more references.
* @param sr
* @throws IOException
*/
public synchronized void release(SegmentReader sr, boolean drop) throws IOException {
final boolean pooled = readerMap.containsKey(sr.getSegmentInfo());
assert !pooled | readerMap.get(sr.getSegmentInfo()) == sr;
// Drop caller's ref; for an external reader (not
// pooled), this decRef will close it
sr.decRef();
if (pooled && (drop || (!poolReaders && sr.getRefCount() == 1))) {
// We are the last ref to this reader; since we're
// not pooling readers, we release it:
readerMap.remove(sr.getSegmentInfo());
assert !sr.hasChanges || Thread.holdsLock(IndexWriter.this);
// Drop our ref -- this will commit any pending
// changes to the dir
boolean success = false;
try {
sr.close();
success = true;
} finally {
if (!success && sr.hasChanges) {
// Abandon the changes & retry closing:
sr.hasChanges = false;
try {
sr.close();
} catch (Throwable ignore) {
// Keep throwing original exception
}
}
}
}
}
/** Remove all our references to readers, and commits
* any pending changes. */
synchronized void close() throws IOException {
Iterator<Map.Entry<SegmentInfo,SegmentReader>> iter = readerMap.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<SegmentInfo,SegmentReader> ent = iter.next();
SegmentReader sr = ent.getValue();
if (sr.hasChanges) {
assert infoIsLive(sr.getSegmentInfo());
sr.startCommit();
boolean success = false;
try {
sr.doCommit(null);
success = true;
} finally {
if (!success) {
sr.rollbackCommit();
}
}
}
iter.remove();
// NOTE: it is allowed that this decRef does not
// actually close the SR; this can happen when a
// near real-time reader is kept open after the
// IndexWriter instance is closed
sr.decRef();
}
}
/**
* Commit all segment reader in the pool.
* @throws IOException
*/
synchronized void commit() throws IOException {
for (Map.Entry<SegmentInfo,SegmentReader> ent : readerMap.entrySet()) {
SegmentReader sr = ent.getValue();
if (sr.hasChanges) {
assert infoIsLive(sr.getSegmentInfo());
sr.startCommit();
boolean success = false;
try {
sr.doCommit(null);
success = true;
} finally {
if (!success) {
sr.rollbackCommit();
}
}
}
}
}
/**
* Returns a ref to a clone. NOTE: this clone is not
* enrolled in the pool, so you should simply close()
* it when you're done (ie, do not call release()).
*/
public synchronized SegmentReader getReadOnlyClone(SegmentInfo info, boolean doOpenStores, int termInfosIndexDivisor) throws IOException {
SegmentReader sr = get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, termInfosIndexDivisor);
try {
return (SegmentReader) sr.clone(true);
} finally {
sr.decRef();
}
}
/**
* Obtain a SegmentReader from the readerPool. The reader
* must be returned by calling {@link #release(SegmentReader)}
* @see #release(SegmentReader)
* @param info
* @param doOpenStores
* @throws IOException
*/
public synchronized SegmentReader get(SegmentInfo info, boolean doOpenStores) throws IOException {
return get(info, doOpenStores, BufferedIndexInput.BUFFER_SIZE, readerTermsIndexDivisor);
}
/**
* Obtain a SegmentReader from the readerPool. The reader
* must be returned by calling {@link #release(SegmentReader)}
*
* @see #release(SegmentReader)
* @param info
* @param doOpenStores
* @param readBufferSize
* @param termsIndexDivisor
* @throws IOException
*/
public synchronized SegmentReader get(SegmentInfo info, boolean doOpenStores, int readBufferSize, int termsIndexDivisor) throws IOException {
if (poolReaders) {
readBufferSize = BufferedIndexInput.BUFFER_SIZE;
}
SegmentReader sr = readerMap.get(info);
if (sr == null) {
// TODO: we may want to avoid doing this while
// synchronized
// Returns a ref, which we xfer to readerMap:
sr = SegmentReader.get(false, info.dir, info, readBufferSize, doOpenStores, termsIndexDivisor);
if (info.dir == directory) {
// Only pool if reader is not external
readerMap.put(info, sr);
}
} else {
if (doOpenStores) {
sr.openDocStores();
}
if (termsIndexDivisor != -1 && !sr.termsIndexLoaded()) {
// If this reader was originally opened because we
// needed to merge it, we didn't load the terms
// index. But now, if the caller wants the terms
// index (eg because it's doing deletes, or an NRT
// reader is being opened) we ask the reader to
// load its terms index.
sr.loadTermsIndex(termsIndexDivisor);
}
}
// Return a ref to our caller
if (info.dir == directory) {
// Only incRef if we pooled (reader is not external)
sr.incRef();
}
return sr;
}
// Returns a ref
public synchronized SegmentReader getIfExists(SegmentInfo info) throws IOException {
SegmentReader sr = readerMap.get(info);
if (sr != null) {
sr.incRef();
}
return sr;
}
}
/**
* Obtain the number of deleted docs for a pooled reader.
* If the reader isn't being pooled, the segmentInfo's
* delCount is returned.
*/
public int numDeletedDocs(SegmentInfo info) throws IOException {
SegmentReader reader = readerPool.getIfExists(info);
try {
if (reader != null) {
return reader.numDeletedDocs();
} else {
return info.getDelCount();
}
} finally {
if (reader != null) {
readerPool.release(reader);
}
}
}
synchronized void acquireWrite() {
assert writeThread != Thread.currentThread();
while(writeThread != null || readCount > 0)
doWait();
// We could have been closed while we were waiting:
ensureOpen();
writeThread = Thread.currentThread();
}
synchronized void releaseWrite() {
assert Thread.currentThread() == writeThread;
writeThread = null;
notifyAll();
}
synchronized void acquireRead() {
final Thread current = Thread.currentThread();
while(writeThread != null && writeThread != current)
doWait();
readCount++;
}
// Allows one readLock to upgrade to a writeLock even if
// there are other readLocks as long as all other
// readLocks are also blocked in this method:
synchronized void upgradeReadToWrite() {
assert readCount > 0;
upgradeCount++;
while(readCount > upgradeCount || writeThread != null) {
doWait();
}
writeThread = Thread.currentThread();
readCount--;
upgradeCount--;
}
synchronized void releaseRead() {
readCount--;
assert readCount >= 0;
notifyAll();
}
synchronized final boolean isOpen(boolean includePendingClose) {
return !(closed || (includePendingClose && closing));
}
/**
* Used internally to throw an {@link
* AlreadyClosedException} if this IndexWriter has been
* closed.
* @throws AlreadyClosedException if this IndexWriter is
*/
protected synchronized final void ensureOpen(boolean includePendingClose) throws AlreadyClosedException {
if (!isOpen(includePendingClose)) {
throw new AlreadyClosedException("this IndexWriter is closed");
}
}
protected synchronized final void ensureOpen() throws AlreadyClosedException {
ensureOpen(true);
}
/**
* Prints a message to the infoStream (if non-null),
* prefixed with the identifying information for this
* writer and the thread that's calling it.
*/
public void message(String message) {
if (infoStream != null)
infoStream.println("IW " + messageID + " [" + Thread.currentThread().getName() + "]: " + message);
}
private synchronized void setMessageID(PrintStream infoStream) {
if (infoStream != null && messageID == -1) {
synchronized(MESSAGE_ID_LOCK) {
messageID = MESSAGE_ID++;
}
}
this.infoStream = infoStream;
}
/**
* Casts current mergePolicy to LogMergePolicy, and throws
* an exception if the mergePolicy is not a LogMergePolicy.
*/
private LogMergePolicy getLogMergePolicy() {
if (mergePolicy instanceof LogMergePolicy)
return (LogMergePolicy) mergePolicy;
else
throw new IllegalArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
}
/** <p>Get the current setting of whether newly flushed
* segments will use the compound file format. Note that
* this just returns the value previously set with
* setUseCompoundFile(boolean), or the default value
* (true). You cannot use this to query the status of
* previously flushed segments.</p>
*
* <p>Note that this method is a convenience method: it
* just calls mergePolicy.getUseCompoundFile as long as
* mergePolicy is an instance of {@link LogMergePolicy}.
* Otherwise an IllegalArgumentException is thrown.</p>
*
* @see #setUseCompoundFile(boolean)
*/
public boolean getUseCompoundFile() {
return getLogMergePolicy().getUseCompoundFile();
}
/** <p>Setting to turn on usage of a compound file. When on,
* multiple files for each segment are merged into a
* single file when a new segment is flushed.</p>
*
* <p>Note that this method is a convenience method: it
* just calls mergePolicy.setUseCompoundFile as long as
* mergePolicy is an instance of {@link LogMergePolicy}.
* Otherwise an IllegalArgumentException is thrown.</p>
*/
public void setUseCompoundFile(boolean value) {
getLogMergePolicy().setUseCompoundFile(value);
getLogMergePolicy().setUseCompoundDocStore(value);
}
/** Expert: Set the Similarity implementation used by this IndexWriter.
*
* @see Similarity#setDefault(Similarity)
*/
public void setSimilarity(Similarity similarity) {
ensureOpen();
this.similarity = similarity;
docWriter.setSimilarity(similarity);
}
/** Expert: Return the Similarity implementation used by this IndexWriter.
*
* <p>This defaults to the current value of {@link Similarity#getDefault()}.
*/
public Similarity getSimilarity() {
ensureOpen();
return this.similarity;
}
/** Expert: Set the interval between indexed terms. Large values cause less
* memory to be used by IndexReader, but slow random-access to terms. Small
* values cause more memory to be used by an IndexReader, and speed
* random-access to terms.
*
* This parameter determines the amount of computation required per query
* term, regardless of the number of documents that contain that term. In
* particular, it is the maximum number of other terms that must be
* scanned before a term is located and its frequency and position information
* may be processed. In a large index with user-entered query terms, query
* processing time is likely to be dominated not by term lookup but rather
* by the processing of frequency and positional data. In a small index
* or when many uncommon query terms are generated (e.g., by wildcard
* queries) term lookup may become a dominant cost.
*
* In particular, <code>numUniqueTerms/interval</code> terms are read into
* memory by an IndexReader, and, on average, <code>interval/2</code> terms
* must be scanned for each random term access.
*
* @see #DEFAULT_TERM_INDEX_INTERVAL
*/
public void setTermIndexInterval(int interval) {
ensureOpen();
this.termIndexInterval = interval;
}
/** Expert: Return the interval between indexed terms.
*
* @see #setTermIndexInterval(int)
*/
public int getTermIndexInterval() {
// We pass false because this method is called by SegmentMerger while we are in the process of closing
ensureOpen(false);
return termIndexInterval;
}
/**
* Constructs an IndexWriter for the index in <code>d</code>.
* Text will be analyzed with <code>a</code>. If <code>create</code>
* is true, then a new, empty index will be created in
* <code>d</code>, replacing the index already there, if any.
*
* @param d the index directory
* @param a the analyzer to use
* @param create <code>true</code> to create the index or overwrite
* the existing one; <code>false</code> to append to the existing
* index
* @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
* via the MaxFieldLength constructor.
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if the directory cannot be read/written to, or
* if it does not exist and <code>create</code> is
* <code>false</code> or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
init(d, a, create, null, mfl.getLimit(), null, null);
}
/**
* Constructs an IndexWriter for the index in
* <code>d</code>, first creating it if it does not
* already exist. Text will be analyzed with
* <code>a</code>.
*
* @param d the index directory
* @param a the analyzer to use
* @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
* via the MaxFieldLength constructor.
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if the directory cannot be
* read/written to or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
init(d, a, null, mfl.getLimit(), null, null);
}
/**
* Expert: constructs an IndexWriter with a custom {@link
* IndexDeletionPolicy}, for the index in <code>d</code>,
* first creating it if it does not already exist. Text
* will be analyzed with <code>a</code>.
*
* @param d the index directory
* @param a the analyzer to use
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
* @param mfl whether or not to limit field lengths
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if the directory cannot be
* read/written to or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
init(d, a, deletionPolicy, mfl.getLimit(), null, null);
}
/**
* Expert: constructs an IndexWriter with a custom {@link
* IndexDeletionPolicy}, for the index in <code>d</code>.
* Text will be analyzed with <code>a</code>. If
* <code>create</code> is true, then a new, empty index
* will be created in <code>d</code>, replacing the index
* already there, if any.
*
* @param d the index directory
* @param a the analyzer to use
* @param create <code>true</code> to create the index or overwrite
* the existing one; <code>false</code> to append to the existing
* index
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
* @param mfl {@link org.apache.lucene.index.IndexWriter.MaxFieldLength}, whether or not to limit field lengths. Value is in number of terms/tokens
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if the directory cannot be read/written to, or
* if it does not exist and <code>create</code> is
* <code>false</code> or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
throws CorruptIndexException, LockObtainFailedException, IOException {
init(d, a, create, deletionPolicy, mfl.getLimit(), null, null);
}
/**
* Expert: constructs an IndexWriter with a custom {@link
* IndexDeletionPolicy} and {@link IndexingChain},
* for the index in <code>d</code>.
* Text will be analyzed with <code>a</code>. If
* <code>create</code> is true, then a new, empty index
* will be created in <code>d</code>, replacing the index
* already there, if any.
*
* @param d the index directory
* @param a the analyzer to use
* @param create <code>true</code> to create the index or overwrite
* the existing one; <code>false</code> to append to the existing
* index
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
* @param mfl whether or not to limit field lengths, value is in number of terms/tokens. See {@link org.apache.lucene.index.IndexWriter.MaxFieldLength}.
* @param indexingChain the {@link DocConsumer} chain to be used to
* process documents
* @param commit which commit to open
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if the directory cannot be read/written to, or
* if it does not exist and <code>create</code> is
* <code>false</code> or if there is any other low-level
* IO error
*/
IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexingChain indexingChain, IndexCommit commit)
throws CorruptIndexException, LockObtainFailedException, IOException {
init(d, a, create, deletionPolicy, mfl.getLimit(), indexingChain, commit);
}
/**
* Expert: constructs an IndexWriter on specific commit
* point, with a custom {@link IndexDeletionPolicy}, for
* the index in <code>d</code>. Text will be analyzed
* with <code>a</code>.
*
* <p> This is only meaningful if you've used a {@link
* IndexDeletionPolicy} in that past that keeps more than
* just the last commit.
*
* <p>This operation is similar to {@link #rollback()},
* except that method can only rollback what's been done
* with the current instance of IndexWriter since its last
* commit, whereas this method can rollback to an
* arbitrary commit point from the past, assuming the
* {@link IndexDeletionPolicy} has preserved past
* commits.
*
* @param d the index directory
* @param a the analyzer to use
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
* @param mfl whether or not to limit field lengths, value is in number of terms/tokens. See {@link org.apache.lucene.index.IndexWriter.MaxFieldLength}.
* @param commit which commit to open
* @throws CorruptIndexException if the index is corrupt
* @throws LockObtainFailedException if another writer
* has this index open (<code>write.lock</code> could not
* be obtained)
* @throws IOException if the directory cannot be read/written to, or
* if it does not exist and <code>create</code> is
* <code>false</code> or if there is any other low-level
* IO error
*/
public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
throws CorruptIndexException, LockObtainFailedException, IOException {
init(d, a, false, deletionPolicy, mfl.getLimit(), null, commit);
}
private void init(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy,
int maxFieldLength, IndexingChain indexingChain, IndexCommit commit)
throws CorruptIndexException, LockObtainFailedException, IOException {
if (IndexReader.indexExists(d)) {
init(d, a, false, deletionPolicy, maxFieldLength, indexingChain, commit);
} else {
init(d, a, true, deletionPolicy, maxFieldLength, indexingChain, commit);
}
}
private void init(Directory d, Analyzer a, final boolean create,
IndexDeletionPolicy deletionPolicy, int maxFieldLength,
IndexingChain indexingChain, IndexCommit commit)
throws CorruptIndexException, LockObtainFailedException, IOException {
directory = d;
analyzer = a;
setMessageID(defaultInfoStream);
this.maxFieldLength = maxFieldLength;
if (indexingChain == null)
indexingChain = DocumentsWriter.DefaultIndexingChain;
if (create) {
// Clear the write lock in case it's leftover:
directory.clearLock(WRITE_LOCK_NAME);
}
Lock writeLock = directory.makeLock(WRITE_LOCK_NAME);
if (!writeLock.obtain(writeLockTimeout)) // obtain write lock
throw new LockObtainFailedException("Index locked for write: " + writeLock);
this.writeLock = writeLock; // save it
boolean success = false;
try {
if (create) {
// Try to read first. This is to allow create
// against an index that's currently open for
// searching. In this case we write the next
// segments_N file with no segments:
boolean doCommit;
try {
segmentInfos.read(directory);
segmentInfos.clear();
doCommit = false;
} catch (IOException e) {
// Likely this means it's a fresh directory
doCommit = true;
}
if (doCommit) {
// Only commit if there is no segments file in
// this dir already.
segmentInfos.commit(directory);
synced.addAll(segmentInfos.files(directory, true));
} else {
// Record that we have a change (zero out all
// segments) pending:
changeCount++;
}
} else {
segmentInfos.read(directory);
if (commit != null) {
// Swap out all segments, but, keep metadata in
// SegmentInfos, like version & generation, to
// preserve write-once. This is important if
// readers are open against the future commit
// points.
if (commit.getDirectory() != directory)
throw new IllegalArgumentException("IndexCommit's directory doesn't match my directory");
SegmentInfos oldInfos = new SegmentInfos();
oldInfos.read(directory, commit.getSegmentsFileName());
segmentInfos.replace(oldInfos);
changeCount++;
if (infoStream != null)
message("init: loaded commit \"" + commit.getSegmentsFileName() + "\"");
}
// We assume that this segments_N was previously
// properly sync'd:
synced.addAll(segmentInfos.files(directory, true));
}
setRollbackSegmentInfos(segmentInfos);
docWriter = new DocumentsWriter(directory, this, indexingChain);
docWriter.setInfoStream(infoStream);
docWriter.setMaxFieldLength(maxFieldLength);
// Default deleter (for backwards compatibility) is
// KeepOnlyLastCommitDeleter:
deleter = new IndexFileDeleter(directory,
deletionPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : deletionPolicy,
segmentInfos, infoStream, docWriter);
if (deleter.startingCommitDeleted)
// Deletion policy deleted the "head" commit point.
// We have to mark ourself as changed so that if we
// are closed w/o any further changes we write a new
// segments_N file.
changeCount++;
pushMaxBufferedDocs();
if (infoStream != null) {
message("init: create=" + create);
messageState();
}
success = true;
} finally {
if (!success) {
if (infoStream != null) {
message("init: hit exception on init; releasing write lock");
}
try {
writeLock.release();
} catch (Throwable t) {
// don't mask the original exception
}
writeLock = null;
}
}
}
private synchronized void setRollbackSegmentInfos(SegmentInfos infos) {
rollbackSegmentInfos = (SegmentInfos) infos.clone();
assert !rollbackSegmentInfos.hasExternalSegments(directory);
rollbackSegments = new HashMap<SegmentInfo,Integer>();
final int size = rollbackSegmentInfos.size();
for(int i=0;i<size;i++)
rollbackSegments.put(rollbackSegmentInfos.info(i), Integer.valueOf(i));
}
/**
* Expert: set the merge policy used by this writer.
*/
public void setMergePolicy(MergePolicy mp) {
ensureOpen();
if (mp == null)
throw new NullPointerException("MergePolicy must be non-null");
if (mergePolicy != mp)
mergePolicy.close();
mergePolicy = mp;
pushMaxBufferedDocs();
if (infoStream != null)
message("setMergePolicy " + mp);
}
/**
* Expert: returns the current MergePolicy in use by this writer.
* @see #setMergePolicy
*/
public MergePolicy getMergePolicy() {
ensureOpen();
return mergePolicy;
}
/**
* Expert: set the merge scheduler used by this writer.
*/
synchronized public void setMergeScheduler(MergeScheduler mergeScheduler) throws CorruptIndexException, IOException {
ensureOpen();
if (mergeScheduler == null)
throw new NullPointerException("MergeScheduler must be non-null");
if (this.mergeScheduler != mergeScheduler) {
finishMerges(true);
this.mergeScheduler.close();
}
this.mergeScheduler = mergeScheduler;
if (infoStream != null)
message("setMergeScheduler " + mergeScheduler);
}
/**
* Expert: returns the current MergePolicy in use by this
* writer.
* @see #setMergePolicy
*/
public MergeScheduler getMergeScheduler() {
ensureOpen();
return mergeScheduler;
}
/** <p>Determines the largest segment (measured by
* document count) that may be merged with other segments.
* Small values (e.g., less than 10,000) are best for
* interactive indexing, as this limits the length of
* pauses while indexing to a few seconds. Larger values
* are best for batched indexing and speedier
* searches.</p>
*
* <p>The default value is {@link Integer#MAX_VALUE}.</p>
*
* <p>Note that this method is a convenience method: it
* just calls mergePolicy.setMaxMergeDocs as long as
* mergePolicy is an instance of {@link LogMergePolicy}.
* Otherwise an IllegalArgumentException is thrown.</p>
*
* <p>The default merge policy ({@link
* LogByteSizeMergePolicy}) also allows you to set this
* limit by net size (in MB) of the segment, using {@link
* LogByteSizeMergePolicy#setMaxMergeMB}.</p>
*/
public void setMaxMergeDocs(int maxMergeDocs) {
getLogMergePolicy().setMaxMergeDocs(maxMergeDocs);
}
/**
* <p>Returns the largest segment (measured by document
* count) that may be merged with other segments.</p>
*
* <p>Note that this method is a convenience method: it
* just calls mergePolicy.getMaxMergeDocs as long as
* mergePolicy is an instance of {@link LogMergePolicy}.
* Otherwise an IllegalArgumentException is thrown.</p>
*
* @see #setMaxMergeDocs
*/
public int getMaxMergeDocs() {
return getLogMergePolicy().getMaxMergeDocs();
}
/**
* The maximum number of terms that will be indexed for a single field in a
* document. This limits the amount of memory required for indexing, so that
* collections with very large files will not crash the indexing process by
* running out of memory. This setting refers to the number of running terms,
* not to the number of different terms.<p/>
* <strong>Note:</strong> this silently truncates large documents, excluding from the
* index all terms that occur further in the document. If you know your source
* documents are large, be sure to set this value high enough to accomodate
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
* By default, no more than {@link #DEFAULT_MAX_FIELD_LENGTH} terms
* will be indexed for a field.
*/
public void setMaxFieldLength(int maxFieldLength) {
ensureOpen();
this.maxFieldLength = maxFieldLength;
docWriter.setMaxFieldLength(maxFieldLength);
if (infoStream != null)
message("setMaxFieldLength " + maxFieldLength);
}
/**
* Returns the maximum number of terms that will be
* indexed for a single field in a document.
* @see #setMaxFieldLength
*/
public int getMaxFieldLength() {
ensureOpen();
return maxFieldLength;
}
/** Sets the termsIndexDivisor passed to any readers that
* IndexWriter opens, for example when applying deletes
* or creating a near-real-time reader in {@link
* IndexWriter#getReader}. Default value is {@link
* IndexReader#DEFAULT_TERMS_INDEX_DIVISOR}. */
public void setReaderTermsIndexDivisor(int divisor) {
ensureOpen();
if (divisor <= 0) {
throw new IllegalArgumentException("divisor must be >= 1 (got " + divisor + ")");
}
readerTermsIndexDivisor = divisor;
if (infoStream != null) {
message("setReaderTermsIndexDivisor " + readerTermsIndexDivisor);
}
}
/** @see #setReaderTermsIndexDivisor() */
public int getReaderTermsIndexDivisor() {
ensureOpen();
return readerTermsIndexDivisor;
}
/** Determines the minimal number of documents required
* before the buffered in-memory documents are flushed as
* a new Segment. Large values generally gives faster
* indexing.
*
* <p>When this is set, the writer will flush every
* maxBufferedDocs added documents. Pass in {@link
* #DISABLE_AUTO_FLUSH} to prevent triggering a flush due
* to number of buffered documents. Note that if flushing
* by RAM usage is also enabled, then the flush will be
* triggered by whichever comes first.</p>
*
* <p>Disabled by default (writer flushes by RAM usage).</p>
*
* @throws IllegalArgumentException if maxBufferedDocs is
* enabled but smaller than 2, or it disables maxBufferedDocs
* when ramBufferSize is already disabled
* @see #setRAMBufferSizeMB
*/
public void setMaxBufferedDocs(int maxBufferedDocs) {
ensureOpen();
if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2)
throw new IllegalArgumentException(
"maxBufferedDocs must at least be 2 when enabled");
if (maxBufferedDocs == DISABLE_AUTO_FLUSH
&& getRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
throw new IllegalArgumentException(
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
docWriter.setMaxBufferedDocs(maxBufferedDocs);
pushMaxBufferedDocs();
if (infoStream != null)
message("setMaxBufferedDocs " + maxBufferedDocs);
}
/**
* If we are flushing by doc count (not by RAM usage), and
* using LogDocMergePolicy then push maxBufferedDocs down
* as its minMergeDocs, to keep backwards compatibility.
*/
private void pushMaxBufferedDocs() {
if (docWriter.getMaxBufferedDocs() != DISABLE_AUTO_FLUSH) {
final MergePolicy mp = mergePolicy;
if (mp instanceof LogDocMergePolicy) {
LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
final int maxBufferedDocs = docWriter.getMaxBufferedDocs();
if (lmp.getMinMergeDocs() != maxBufferedDocs) {
if (infoStream != null)
message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
lmp.setMinMergeDocs(maxBufferedDocs);
}
}
}
}
/**
* Returns the number of buffered added documents that will
* trigger a flush if enabled.
* @see #setMaxBufferedDocs
*/
public int getMaxBufferedDocs() {
ensureOpen();
return docWriter.getMaxBufferedDocs();
}
/** Determines the amount of RAM that may be used for
* buffering added documents and deletions before they are
* flushed to the Directory. Generally for faster
* indexing performance it's best to flush by RAM usage
* instead of document count and use as large a RAM buffer
* as you can.
*
* <p>When this is set, the writer will flush whenever
* buffered documents and deletions use this much RAM.
* Pass in {@link #DISABLE_AUTO_FLUSH} to prevent
* triggering a flush due to RAM usage. Note that if
* flushing by document count is also enabled, then the
* flush will be triggered by whichever comes first.</p>
*
* <p> <b>NOTE</b>: the account of RAM usage for pending
* deletions is only approximate. Specifically, if you
* delete by Query, Lucene currently has no way to measure
* the RAM usage if individual Queries so the accounting
* will under-estimate and you should compensate by either
* calling commit() periodically yourself, or by using
* {@link #setMaxBufferedDeleteTerms} to flush by count
* instead of RAM usage (each buffered delete Query counts
* as one).
*
* <p> <b>NOTE</b>: because IndexWriter uses
* <code>int</code>s when managing its internal storage,
* the absolute maximum value for this setting is somewhat
* less than 2048 MB. The precise limit depends on
* various factors, such as how large your documents are,
* how many fields have norms, etc., so it's best to set
* this value comfortably under 2048.</p>
*
* <p> The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.</p>
*
* @throws IllegalArgumentException if ramBufferSize is
* enabled but non-positive, or it disables ramBufferSize
* when maxBufferedDocs is already disabled
*/
public void setRAMBufferSizeMB(double mb) {
if (mb > 2048.0) {
throw new IllegalArgumentException("ramBufferSize " + mb + " is too large; should be comfortably less than 2048");
}
if (mb != DISABLE_AUTO_FLUSH && mb <= 0.0)
throw new IllegalArgumentException(
"ramBufferSize should be > 0.0 MB when enabled");
if (mb == DISABLE_AUTO_FLUSH && getMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
throw new IllegalArgumentException(
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
docWriter.setRAMBufferSizeMB(mb);
if (infoStream != null)
message("setRAMBufferSizeMB " + mb);
}
/**
* Returns the value set by {@link #setRAMBufferSizeMB} if enabled.
*/
public double getRAMBufferSizeMB() {
return docWriter.getRAMBufferSizeMB();
}
/**
* <p>Determines the minimal number of delete terms required before the buffered
* in-memory delete terms are applied and flushed. If there are documents
* buffered in memory at the time, they are merged and a new segment is
* created.</p>
* <p>Disabled by default (writer flushes by RAM usage).</p>
*
* @throws IllegalArgumentException if maxBufferedDeleteTerms
* is enabled but smaller than 1
* @see #setRAMBufferSizeMB
*/
public void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
ensureOpen();
if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH
&& maxBufferedDeleteTerms < 1)
throw new IllegalArgumentException(
"maxBufferedDeleteTerms must at least be 1 when enabled");
docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
if (infoStream != null)
message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
}
/**
* Returns the number of buffered deleted terms that will
* trigger a flush if enabled.
* @see #setMaxBufferedDeleteTerms
*/
public int getMaxBufferedDeleteTerms() {
ensureOpen();
return docWriter.getMaxBufferedDeleteTerms();
}
/** Determines how often segment indices are merged by addDocument(). With
* smaller values, less RAM is used while indexing, and searches on
* unoptimized indices are faster, but indexing speed is slower. With larger
* values, more RAM is used during indexing, and while searches on unoptimized
* indices are slower, indexing is faster. Thus larger values (> 10) are best
* for batch index creation, and smaller values (< 10) for indices that are
* interactively maintained.
*
* <p>Note that this method is a convenience method: it
* just calls mergePolicy.setMergeFactor as long as
* mergePolicy is an instance of {@link LogMergePolicy}.
* Otherwise an IllegalArgumentException is thrown.</p>
*
* <p>This must never be less than 2. The default value is 10.
*/
public void setMergeFactor(int mergeFactor) {
getLogMergePolicy().setMergeFactor(mergeFactor);
}
/**
* <p>Returns the number of segments that are merged at
* once and also controls the total number of segments
* allowed to accumulate in the index.</p>
*
* <p>Note that this method is a convenience method: it
* just calls mergePolicy.getMergeFactor as long as
* mergePolicy is an instance of {@link LogMergePolicy}.
* Otherwise an IllegalArgumentException is thrown.</p>
*
* @see #setMergeFactor
*/
public int getMergeFactor() {
return getLogMergePolicy().getMergeFactor();
}
/** If non-null, this will be the default infoStream used
* by a newly instantiated IndexWriter.
* @see #setInfoStream
*/
public static void setDefaultInfoStream(PrintStream infoStream) {
IndexWriter.defaultInfoStream = infoStream;
}
/**
* Returns the current default infoStream for newly
* instantiated IndexWriters.
* @see #setDefaultInfoStream
*/
public static PrintStream getDefaultInfoStream() {
return IndexWriter.defaultInfoStream;
}
/** If non-null, information about merges, deletes and a
* message when maxFieldLength is reached will be printed
* to this.
*/
public void setInfoStream(PrintStream infoStream) {
ensureOpen();
setMessageID(infoStream);
docWriter.setInfoStream(infoStream);
deleter.setInfoStream(infoStream);
if (infoStream != null)
messageState();
}
private void messageState() {
message("setInfoStream: dir=" + directory +
" mergePolicy=" + mergePolicy +
" mergeScheduler=" + mergeScheduler +
" ramBufferSizeMB=" + docWriter.getRAMBufferSizeMB() +
" maxBufferedDocs=" + docWriter.getMaxBufferedDocs() +
" maxBuffereDeleteTerms=" + docWriter.getMaxBufferedDeleteTerms() +
" maxFieldLength=" + maxFieldLength +
" index=" + segString());
}
/**
* Returns the current infoStream in use by this writer.
* @see #setInfoStream
*/
public PrintStream getInfoStream() {
ensureOpen();
return infoStream;
}
/** Returns true if verbosing is enabled (i.e., infoStream != null). */
public boolean verbose() {
return infoStream != null;
}
/**
* Sets the maximum time to wait for a write lock (in milliseconds) for this instance of IndexWriter. @see
* @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter.
*/
public void setWriteLockTimeout(long writeLockTimeout) {
ensureOpen();
this.writeLockTimeout = writeLockTimeout;
}
/**
* Returns allowed timeout when acquiring the write lock.
* @see #setWriteLockTimeout
*/
public long getWriteLockTimeout() {
ensureOpen();
return writeLockTimeout;
}
/**
* Sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in
* milliseconds).
*/
public static void setDefaultWriteLockTimeout(long writeLockTimeout) {
IndexWriter.WRITE_LOCK_TIMEOUT = writeLockTimeout;
}
/**
* Returns default write lock timeout for newly
* instantiated IndexWriters.
* @see #setDefaultWriteLockTimeout
*/
public static long getDefaultWriteLockTimeout() {
return IndexWriter.WRITE_LOCK_TIMEOUT;
}
/**
* Commits all changes to an index and closes all
* associated files. Note that this may be a costly
* operation, so, try to re-use a single writer instead of
* closing and opening a new one. See {@link #commit()} for
* caveats about write caching done by some IO devices.
*
* <p> If an Exception is hit during close, eg due to disk
* full or some other reason, then both the on-disk index
* and the internal state of the IndexWriter instance will
* be consistent. However, the close will not be complete
* even though part of it (flushing buffered documents)
* may have succeeded, so the write lock will still be
* held.</p>
*
* <p> If you can correct the underlying cause (eg free up
* some disk space) then you can call close() again.
* Failing that, if you want to force the write lock to be
* released (dangerous, because you may then lose buffered
* docs in the IndexWriter instance) then you can do
* something like this:</p>
*
* <pre>
* try {
* writer.close();
* } finally {
* if (IndexWriter.isLocked(directory)) {
* IndexWriter.unlock(directory);
* }
* }
* </pre>
*
* after which, you must be certain not to use the writer
* instance anymore.</p>
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer, again. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void close() throws CorruptIndexException, IOException {
close(true);
}
/**
* Closes the index with or without waiting for currently
* running merges to finish. This is only meaningful when
* using a MergeScheduler that runs merges in background
* threads.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer, again. See <a
* href="#OOME">above</a> for details.</p>
*
* <p><b>NOTE</b>: it is dangerous to always call
* close(false), especially when IndexWriter is not open
* for very long, because this can result in "merge
* starvation" whereby long merges will never have a
* chance to finish. This will cause too many segments in
* your index over time.</p>
*
* @param waitForMerges if true, this call will block
* until all merges complete; else, it will ask all
* running merges to abort, wait until those merges have
* finished (which should be at most a few seconds), and
* then return.
*/
public void close(boolean waitForMerges) throws CorruptIndexException, IOException {
// Ensure that only one thread actually gets to do the closing:
if (shouldClose()) {
// If any methods have hit OutOfMemoryError, then abort
// on close, in case the internal state of IndexWriter
// or DocumentsWriter is corrupt
if (hitOOM)
rollbackInternal();
else
closeInternal(waitForMerges);
}
}
// Returns true if this thread should attempt to close, or
// false if IndexWriter is now closed; else, waits until
// another thread finishes closing
synchronized private boolean shouldClose() {
while(true) {
if (!closed) {
if (!closing) {
closing = true;
return true;
} else {
// Another thread is presently trying to close;
// wait until it finishes one way (closes
// successfully) or another (fails to close)
doWait();
}
} else
return false;
}
}
private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException {
docWriter.pauseAllThreads();
try {
if (infoStream != null)
message("now flush at close");
docWriter.close();
// Only allow a new merge to be triggered if we are
// going to wait for merges:
if (!hitOOM) {
flush(waitForMerges, true, true);
}
if (waitForMerges)
// Give merge scheduler last chance to run, in case
// any pending merges are waiting:
mergeScheduler.merge(this);
mergePolicy.close();
finishMerges(waitForMerges);
stopMerges = true;
mergeScheduler.close();
if (infoStream != null)
message("now call final commit()");
if (!hitOOM) {
commit(0);
}
if (infoStream != null)
message("at close: " + segString());
synchronized(this) {
readerPool.close();
docWriter = null;
deleter.close();
}
if (writeLock != null) {
writeLock.release(); // release write lock
writeLock = null;
}
synchronized(this) {
closed = true;
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "closeInternal");
} finally {
synchronized(this) {
closing = false;
notifyAll();
if (!closed) {
if (docWriter != null)
docWriter.resumeAllThreads();
if (infoStream != null)
message("hit exception while closing");
}
}
}
}
/** Tells the docWriter to close its currently open shared
* doc stores (stored fields & vectors files).
* Return value specifices whether new doc store files are compound or not.
*/
private synchronized boolean flushDocStores() throws IOException {
boolean useCompoundDocStore = false;
String docStoreSegment;
boolean success = false;
try {
docStoreSegment = docWriter.closeDocStore();
success = true;
} finally {
if (!success && infoStream != null) {
message("hit exception closing doc store segment");
}
}
useCompoundDocStore = mergePolicy.useCompoundDocStore(segmentInfos);
if (useCompoundDocStore && docStoreSegment != null && docWriter.closedFiles().size() != 0) {
// Now build compound doc store file
if (infoStream != null) {
message("create compound file " + docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION);
}
success = false;
final int numSegments = segmentInfos.size();
final String compoundFileName = docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;
try {
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);
for (final String file : docWriter.closedFiles() ) {
cfsWriter.addFile(file);
}
// Perform the merge
cfsWriter.close();
success = true;
} finally {
if (!success) {
if (infoStream != null)
message("hit exception building compound file doc store for segment " + docStoreSegment);
deleter.deleteFile(compoundFileName);
docWriter.abort();
}
}
for(int i=0;i<numSegments;i++) {
SegmentInfo si = segmentInfos.info(i);
if (si.getDocStoreOffset() != -1 &&
si.getDocStoreSegment().equals(docStoreSegment))
si.setDocStoreIsCompoundFile(true);
}
checkpoint();
// In case the files we just merged into a CFS were
// not previously checkpointed:
deleter.deleteNewFiles(docWriter.closedFiles());
}
return useCompoundDocStore;
}
/** Returns the Directory used by this index. */
public Directory getDirectory() {
// Pass false because the flush during closing calls getDirectory
ensureOpen(false);
return directory;
}
/** Returns the analyzer used by this index. */
public Analyzer getAnalyzer() {
ensureOpen();
return analyzer;
}
/** Returns total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer),
* not counting deletions.
* @see #numDocs */
public synchronized int maxDoc() {
int count;
if (docWriter != null)
count = docWriter.getNumDocsInRAM();
else
count = 0;
for (int i = 0; i < segmentInfos.size(); i++)
count += segmentInfos.info(i).docCount;
return count;
}
/** Returns total number of docs in this index, including
* docs not yet flushed (still in the RAM buffer), and
* including deletions. <b>NOTE:</b> buffered deletions
* are not counted. If you really need these to be
* counted you should call {@link #commit()} first.
* @see #numDocs */
public synchronized int numDocs() throws IOException {
int count;
if (docWriter != null)
count = docWriter.getNumDocsInRAM();
else
count = 0;
for (int i = 0; i < segmentInfos.size(); i++) {
final SegmentInfo info = segmentInfos.info(i);
count += info.docCount - info.getDelCount();
}
return count;
}
public synchronized boolean hasDeletions() throws IOException {
ensureOpen();
if (docWriter.hasDeletes())
return true;
for (int i = 0; i < segmentInfos.size(); i++)
if (segmentInfos.info(i).hasDeletions())
return true;
return false;
}
/**
* The maximum number of terms that will be indexed for a single field in a
* document. This limits the amount of memory required for indexing, so that
* collections with very large files will not crash the indexing process by
* running out of memory.<p/>
* Note that this effectively truncates large documents, excluding from the
* index terms that occur further in the document. If you know your source
* documents are large, be sure to set this value high enough to accommodate
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
* By default, no more than 10,000 terms will be indexed for a field.
*
* @see MaxFieldLength
*/
private int maxFieldLength;
/**
* Adds a document to this index. If the document contains more than
* {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
* discarded.
*
* <p> Note that if an Exception is hit (for example disk full)
* then the index will be consistent, but this document
* may not have been added. Furthermore, it's possible
* the index will have one segment in non-compound format
* even when using compound files (when a merge has
* partially succeeded).</p>
*
* <p> This method periodically flushes pending documents
* to the Directory (see <a href="#flush">above</a>), and
* also periodically triggers segment merges in the index
* according to the {@link MergePolicy} in use.</p>
*
* <p>Merges temporarily consume space in the
* directory. The amount of space required is up to 1X the
* size of all segments being merged, when no
* readers/searchers are open against the index, and up to
* 2X the size of all segments being merged when
* readers/searchers are open against the index (see
* {@link #optimize()} for details). The sequence of
* primitive merge operations performed is governed by the
* merge policy.
*
* <p>Note that each term in the document can be no longer
* than 16383 characters, otherwise an
* IllegalArgumentException will be thrown.</p>
*
* <p>Note that it's possible to create an invalid Unicode
* string in java if a UTF16 surrogate pair is malformed.
* In this case, the invalid characters are silently
* replaced with the Unicode replacement character
* U+FFFD.</p>
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addDocument(Document doc) throws CorruptIndexException, IOException {
addDocument(doc, analyzer);
}
/**
* Adds a document to this index, using the provided analyzer instead of the
* value of {@link #getAnalyzer()}. If the document contains more than
* {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
* discarded.
*
* <p>See {@link #addDocument(Document)} for details on
* index and IndexWriter state after an Exception, and
* flushing/merging temporary free space requirements.</p>
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addDocument(Document doc, Analyzer analyzer) throws CorruptIndexException, IOException {
ensureOpen();
boolean doFlush = false;
boolean success = false;
try {
try {
doFlush = docWriter.addDocument(doc, analyzer);
success = true;
} finally {
if (!success) {
if (infoStream != null)
message("hit exception adding document");
synchronized (this) {
// If docWriter has some aborted files that were
// never incref'd, then we clean them up here
if (docWriter != null) {
final Collection<String> files = docWriter.abortedFiles();
if (files != null)
deleter.deleteNewFiles(files);
}
}
}
}
if (doFlush)
flush(true, false, false);
} catch (OutOfMemoryError oom) {
handleOOM(oom, "addDocument");
}
}
/**
* Deletes the document(s) containing <code>term</code>.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param term the term to identify the documents to be deleted
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Term term) throws CorruptIndexException, IOException {
ensureOpen();
try {
boolean doFlush = docWriter.bufferDeleteTerm(term);
if (doFlush)
flush(true, false, false);
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteDocuments(Term)");
}
}
/**
* Deletes the document(s) containing any of the
* terms. All deletes are flushed at the same time.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param terms array of terms to identify the documents
* to be deleted
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Term... terms) throws CorruptIndexException, IOException {
ensureOpen();
try {
boolean doFlush = docWriter.bufferDeleteTerms(terms);
if (doFlush)
flush(true, false, false);
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteDocuments(Term..)");
}
}
/**
* Deletes the document(s) matching the provided query.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param query the query to identify the documents to be deleted
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Query query) throws CorruptIndexException, IOException {
ensureOpen();
boolean doFlush = docWriter.bufferDeleteQuery(query);
if (doFlush)
flush(true, false, false);
}
/**
* Deletes the document(s) matching any of the provided queries.
* All deletes are flushed at the same time.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param queries array of queries to identify the documents
* to be deleted
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void deleteDocuments(Query... queries) throws CorruptIndexException, IOException {
ensureOpen();
boolean doFlush = docWriter.bufferDeleteQueries(queries);
if (doFlush)
flush(true, false, false);
}
/**
* Updates a document by first deleting the document(s)
* containing <code>term</code> and then adding the new
* document. The delete and then add are atomic as seen
* by a reader on the same index (flush may happen only after
* the add).
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param term the term to identify the document(s) to be
* deleted
* @param doc the document to be added
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void updateDocument(Term term, Document doc) throws CorruptIndexException, IOException {
ensureOpen();
updateDocument(term, doc, getAnalyzer());
}
/**
* Updates a document by first deleting the document(s)
* containing <code>term</code> and then adding the new
* document. The delete and then add are atomic as seen
* by a reader on the same index (flush may happen only after
* the add).
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param term the term to identify the document(s) to be
* deleted
* @param doc the document to be added
* @param analyzer the analyzer to use when analyzing the document
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void updateDocument(Term term, Document doc, Analyzer analyzer)
throws CorruptIndexException, IOException {
ensureOpen();
try {
boolean doFlush = false;
boolean success = false;
try {
doFlush = docWriter.updateDocument(term, doc, analyzer);
success = true;
} finally {
if (!success) {
if (infoStream != null)
message("hit exception updating document");
synchronized (this) {
// If docWriter has some aborted files that were
// never incref'd, then we clean them up here
final Collection<String> files = docWriter.abortedFiles();
if (files != null)
deleter.deleteNewFiles(files);
}
}
}
if (doFlush)
flush(true, false, false);
} catch (OutOfMemoryError oom) {
handleOOM(oom, "updateDocument");
}
}
// for test purpose
final synchronized int getSegmentCount(){
return segmentInfos.size();
}
// for test purpose
final synchronized int getNumBufferedDocuments(){
return docWriter.getNumDocsInRAM();
}
// for test purpose
final synchronized int getDocCount(int i) {
if (i >= 0 && i < segmentInfos.size()) {
return segmentInfos.info(i).docCount;
} else {
return -1;
}
}
// for test purpose
final synchronized int getFlushCount() {
return flushCount;
}
// for test purpose
final synchronized int getFlushDeletesCount() {
return flushDeletesCount;
}
final String newSegmentName() {
// Cannot synchronize on IndexWriter because that causes
// deadlock
synchronized(segmentInfos) {
// Important to increment changeCount so that the
// segmentInfos is written on close. Otherwise we
// could close, re-open and re-return the same segment
// name that was previously returned which can cause
// problems at least with ConcurrentMergeScheduler.
changeCount++;
return "_" + Integer.toString(segmentInfos.counter++, Character.MAX_RADIX);
}
}
/** If non-null, information about merges will be printed to this.
*/
private PrintStream infoStream = null;
private static PrintStream defaultInfoStream = null;
/**
* Requests an "optimize" operation on an index, priming the index
* for the fastest available search. Traditionally this has meant
* merging all segments into a single segment as is done in the
* default merge policy, but individual merge policies may implement
* optimize in different ways.
*
* <p>It is recommended that this method be called upon completion of indexing. In
* environments with frequent updates, optimize is best done during low volume times, if at all.
*
* </p>
* <p>See http://www.gossamer-threads.com/lists/lucene/java-dev/47895 for more discussion. </p>
*
* <p>Note that optimize requires 2X the index size free
* space in your Directory. For example, if your index
* size is 10 MB then you need 20 MB free for optimize to
* complete.</p>
*
* <p>If some but not all readers re-open while an
* optimize is underway, this will cause > 2X temporary
* space to be consumed as those new readers will then
* hold open the partially optimized segments at that
* time. It is best not to re-open readers while optimize
* is running.</p>
*
* <p>The actual temporary usage could be much less than
* these figures (it depends on many factors).</p>
*
* <p>In general, once the optimize completes, the total size of the
* index will be less than the size of the starting index.
* It could be quite a bit smaller (if there were many
* pending deletes) or just slightly smaller.</p>
*
* <p>If an Exception is hit during optimize(), for example
* due to disk full, the index will not be corrupt and no
* documents will have been lost. However, it may have
* been partially optimized (some segments were merged but
* not all), and it's possible that one of the segments in
* the index will be in non-compound format even when
* using compound file format. This will occur when the
* Exception is hit during conversion of the segment into
* compound format.</p>
*
* <p>This call will optimize those segments present in
* the index when the call started. If other threads are
* still adding documents and flushing segments, those
* newly created segments will not be optimized unless you
* call optimize again.</p>
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @see LogMergePolicy#findMergesForOptimize
*/
public void optimize() throws CorruptIndexException, IOException {
optimize(true);
}
/**
* Optimize the index down to <= maxNumSegments. If
* maxNumSegments==1 then this is the same as {@link
* #optimize()}.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param maxNumSegments maximum number of segments left
* in the index after optimization finishes
*/
public void optimize(int maxNumSegments) throws CorruptIndexException, IOException {
optimize(maxNumSegments, true);
}
/** Just like {@link #optimize()}, except you can specify
* whether the call should block until the optimize
* completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void optimize(boolean doWait) throws CorruptIndexException, IOException {
optimize(1, doWait);
}
/** Just like {@link #optimize(int)}, except you can
* specify whether the call should block until the
* optimize completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void optimize(int maxNumSegments, boolean doWait) throws CorruptIndexException, IOException {
ensureOpen();
if (maxNumSegments < 1)
throw new IllegalArgumentException("maxNumSegments must be >= 1; got " + maxNumSegments);
if (infoStream != null)
message("optimize: index now " + segString());
flush(true, false, true);
synchronized(this) {
resetMergeExceptions();
segmentsToOptimize = new HashSet<SegmentInfo>();
final int numSegments = segmentInfos.size();
for(int i=0;i<numSegments;i++)
segmentsToOptimize.add(segmentInfos.info(i));
// Now mark all pending & running merges as optimize
// merge:
for(final MergePolicy.OneMerge merge : pendingMerges) {
merge.optimize = true;
merge.maxNumSegmentsOptimize = maxNumSegments;
}
for ( final MergePolicy.OneMerge merge: runningMerges ) {
merge.optimize = true;
merge.maxNumSegmentsOptimize = maxNumSegments;
}
}
maybeMerge(maxNumSegments, true);
if (doWait) {
synchronized(this) {
while(true) {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete optimize");
}
if (mergeExceptions.size() > 0) {
// Forward any exceptions in background merge
// threads to the current thread:
final int size = mergeExceptions.size();
for(int i=0;i<size;i++) {
final MergePolicy.OneMerge merge = mergeExceptions.get(i);
if (merge.optimize) {
IOException err = new IOException("background merge hit exception: " + merge.segString(directory));
final Throwable t = merge.getException();
if (t != null)
err.initCause(t);
throw err;
}
}
}
if (optimizeMergesPending())
doWait();
else
break;
}
}
// If close is called while we are still
// running, throw an exception so the calling
// thread will know the optimize did not
// complete
ensureOpen();
}
// NOTE: in the ConcurrentMergeScheduler case, when
// doWait is false, we can return immediately while
// background threads accomplish the optimization
}
/** Returns true if any merges in pendingMerges or
* runningMerges are optimization merges. */
private synchronized boolean optimizeMergesPending() {
for (final MergePolicy.OneMerge merge : pendingMerges) {
if (merge.optimize)
return true;
}
for (final MergePolicy.OneMerge merge : runningMerges) {
if (merge.optimize)
return true;
}
return false;
}
/** Just like {@link #expungeDeletes()}, except you can
* specify whether the call should block until the
* operation completes. This is only meaningful with a
* {@link MergeScheduler} that is able to run merges in
* background threads.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void expungeDeletes(boolean doWait)
throws CorruptIndexException, IOException {
ensureOpen();
if (infoStream != null)
message("expungeDeletes: index now " + segString());
MergePolicy.MergeSpecification spec;
synchronized(this) {
spec = mergePolicy.findMergesToExpungeDeletes(segmentInfos);
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++)
registerMerge(spec.merges.get(i));
}
}
mergeScheduler.merge(this);
if (spec != null && doWait) {
final int numMerges = spec.merges.size();
synchronized(this) {
boolean running = true;
while(running) {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete expungeDeletes");
}
// Check each merge that MergePolicy asked us to
// do, to see if any of them are still running and
// if any of them have hit an exception.
running = false;
for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = spec.merges.get(i);
if (pendingMerges.contains(merge) || runningMerges.contains(merge))
running = true;
Throwable t = merge.getException();
if (t != null) {
IOException ioe = new IOException("background merge hit exception: " + merge.segString(directory));
ioe.initCause(t);
throw ioe;
}
}
// If any of our merges are still running, wait:
if (running)
doWait();
}
}
}
// NOTE: in the ConcurrentMergeScheduler case, when
// doWait is false, we can return immediately while
// background threads accomplish the optimization
}
/** Expunges all deletes from the index. When an index
* has many document deletions (or updates to existing
* documents), it's best to either call optimize or
* expungeDeletes to remove all unused data in the index
* associated with the deleted documents. To see how
* many deletions you have pending in your index, call
* {@link IndexReader#numDeletedDocs}
* This saves disk space and memory usage while
* searching. expungeDeletes should be somewhat faster
* than optimize since it does not insist on reducing the
* index to a single segment (though, this depends on the
* {@link MergePolicy}; see {@link
* MergePolicy#findMergesToExpungeDeletes}.). Note that
* this call does not first commit any buffered
* documents, so you must do so yourself if necessary.
* See also {@link #expungeDeletes(boolean)}
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public void expungeDeletes() throws CorruptIndexException, IOException {
expungeDeletes(true);
}
/**
* Expert: asks the mergePolicy whether any merges are
* necessary now and if so, runs the requested merges and
* then iterate (test again if merges are needed) until no
* more merges are returned by the mergePolicy.
*
* Explicit calls to maybeMerge() are usually not
* necessary. The most common case is when merge policy
* parameters have changed.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public final void maybeMerge() throws CorruptIndexException, IOException {
maybeMerge(false);
}
private final void maybeMerge(boolean optimize) throws CorruptIndexException, IOException {
maybeMerge(1, optimize);
}
private final void maybeMerge(int maxNumSegmentsOptimize, boolean optimize) throws CorruptIndexException, IOException {
updatePendingMerges(maxNumSegmentsOptimize, optimize);
mergeScheduler.merge(this);
}
private synchronized void updatePendingMerges(int maxNumSegmentsOptimize, boolean optimize)
throws CorruptIndexException, IOException {
assert !optimize || maxNumSegmentsOptimize > 0;
if (stopMerges)
return;
// Do not start new merges if we've hit OOME
if (hitOOM) {
return;
}
final MergePolicy.MergeSpecification spec;
if (optimize) {
spec = mergePolicy.findMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, segmentsToOptimize);
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++) {
final MergePolicy.OneMerge merge = ( spec.merges.get(i));
merge.optimize = true;
merge.maxNumSegmentsOptimize = maxNumSegmentsOptimize;
}
}
} else
spec = mergePolicy.findMerges(segmentInfos);
if (spec != null) {
final int numMerges = spec.merges.size();
for(int i=0;i<numMerges;i++)
registerMerge(spec.merges.get(i));
}
}
/** Expert: the {@link MergeScheduler} calls this method
* to retrieve the next merge requested by the
* MergePolicy */
synchronized MergePolicy.OneMerge getNextMerge() {
if (pendingMerges.size() == 0)
return null;
else {
// Advance the merge from pending to running
MergePolicy.OneMerge merge = pendingMerges.removeFirst();
runningMerges.add(merge);
return merge;
}
}
/** Like getNextMerge() except only returns a merge if it's
* external. */
private synchronized MergePolicy.OneMerge getNextExternalMerge() {
if (pendingMerges.size() == 0)
return null;
else {
Iterator<MergePolicy.OneMerge> it = pendingMerges.iterator();
while(it.hasNext()) {
MergePolicy.OneMerge merge = it.next();
if (merge.isExternal) {
// Advance the merge from pending to running
it.remove();
runningMerges.add(merge);
return merge;
}
}
// All existing merges do not involve external segments
return null;
}
}
/*
* Begin a transaction. During a transaction, any segment
* merges that happen (or ram segments flushed) will not
* write a new segments file and will not remove any files
* that were present at the start of the transaction. You
* must make a matched (try/finally) call to
* commitTransaction() or rollbackTransaction() to finish
* the transaction.
*
* Note that buffered documents and delete terms are not handled
* within the transactions, so they must be flushed before the
* transaction is started.
*/
private synchronized void startTransaction(boolean haveReadLock) throws IOException {
boolean success = false;
try {
if (infoStream != null)
message("now start transaction");
assert docWriter.getNumBufferedDeleteTerms() == 0 :
"calling startTransaction with buffered delete terms not supported: numBufferedDeleteTerms=" + docWriter.getNumBufferedDeleteTerms();
assert docWriter.getNumDocsInRAM() == 0 :
"calling startTransaction with buffered documents not supported: numDocsInRAM=" + docWriter.getNumDocsInRAM();
ensureOpen();
// If a transaction is trying to roll back (because
// addIndexes hit an exception) then wait here until
// that's done:
synchronized(this) {
while(stopMerges)
doWait();
}
success = true;
} finally {
// Release the write lock if our caller held it, on
// hitting an exception
if (!success && haveReadLock)
releaseRead();
}
if (haveReadLock) {
upgradeReadToWrite();
} else {
acquireWrite();
}
success = false;
try {
localRollbackSegmentInfos = (SegmentInfos) segmentInfos.clone();
assert !hasExternalSegments();
localFlushedDocCount = docWriter.getFlushedDocCount();
// We must "protect" our files at this point from
// deletion in case we need to rollback:
deleter.incRef(segmentInfos, false);
success = true;
} finally {
if (!success)
finishAddIndexes();
}
}
/*
* Rolls back the transaction and restores state to where
* we were at the start.
*/
private synchronized void rollbackTransaction() throws IOException {
if (infoStream != null)
message("now rollback transaction");
if (docWriter != null) {
docWriter.setFlushedDocCount(localFlushedDocCount);
}
// Must finish merges before rolling back segmentInfos
// so merges don't hit exceptions on trying to commit
// themselves, don't get files deleted out from under
// them, etc:
finishMerges(false);
// Keep the same segmentInfos instance but replace all
// of its SegmentInfo instances. This is so the next
// attempt to commit using this instance of IndexWriter
// will always write to a new generation ("write once").
segmentInfos.clear();
segmentInfos.addAll(localRollbackSegmentInfos);
localRollbackSegmentInfos = null;
// This must come after we rollback segmentInfos, so
// that if a commit() kicks off it does not see the
// segmentInfos with external segments
finishAddIndexes();
// Ask deleter to locate unreferenced files we had
// created & remove them:
deleter.checkpoint(segmentInfos, false);
// Remove the incRef we did in startTransaction:
deleter.decRef(segmentInfos);
// Also ask deleter to remove any newly created files
// that were never incref'd; this "garbage" is created
// when a merge kicks off but aborts part way through
// before it had a chance to incRef the files it had
// partially created
deleter.refresh();
notifyAll();
assert !hasExternalSegments();
}
/*
* Commits the transaction. This will write the new
* segments file and remove and pending deletions we have
* accumulated during the transaction
*/
private synchronized void commitTransaction() throws IOException {
if (infoStream != null)
message("now commit transaction");
// Give deleter a chance to remove files now:
checkpoint();
// Remove the incRef we did in startTransaction.
deleter.decRef(localRollbackSegmentInfos);
localRollbackSegmentInfos = null;
assert !hasExternalSegments();
finishAddIndexes();
}
/**
* Close the <code>IndexWriter</code> without committing
* any changes that have occurred since the last commit
* (or since it was opened, if commit hasn't been called).
* This removes any temporary files that had been created,
* after which the state of the index will be the same as
* it was when commit() was last called or when this
* writer was first opened. This also clears a previous
* call to {@link #prepareCommit}.
* @throws IOException if there is a low-level IO error
*/
public void rollback() throws IOException {
ensureOpen();
// Ensure that only one thread actually gets to do the closing:
if (shouldClose())
rollbackInternal();
}
private void rollbackInternal() throws IOException {
boolean success = false;
docWriter.pauseAllThreads();
try {
finishMerges(false);
// Must pre-close these two, in case they increment
// changeCount so that we can then set it to false
// before calling closeInternal
mergePolicy.close();
mergeScheduler.close();
synchronized(this) {
if (pendingCommit != null) {
pendingCommit.rollbackCommit(directory);
deleter.decRef(pendingCommit);
pendingCommit = null;
notifyAll();
}
// Keep the same segmentInfos instance but replace all
// of its SegmentInfo instances. This is so the next
// attempt to commit using this instance of IndexWriter
// will always write to a new generation ("write
// once").
segmentInfos.clear();
segmentInfos.addAll(rollbackSegmentInfos);
assert !hasExternalSegments();
docWriter.abort();
assert testPoint("rollback before checkpoint");
// Ask deleter to locate unreferenced files & remove
// them:
deleter.checkpoint(segmentInfos, false);
deleter.refresh();
}
// Don't bother saving any changes in our segmentInfos
readerPool.clear(null);
lastCommitChangeCount = changeCount;
success = true;
} catch (OutOfMemoryError oom) {
handleOOM(oom, "rollbackInternal");
} finally {
synchronized(this) {
if (!success) {
docWriter.resumeAllThreads();
closing = false;
notifyAll();
if (infoStream != null)
message("hit exception during rollback");
}
}
}
closeInternal(false);
}
/**
* Delete all documents in the index.
*
* <p>This method will drop all buffered documents and will
* remove all segments from the index. This change will not be
* visible until a {@link #commit()} has been called. This method
* can be rolled back using {@link #rollback()}.</p>
*
* <p>NOTE: this method is much faster than using deleteDocuments( new MatchAllDocsQuery() ).</p>
*
* <p>NOTE: this method will forcefully abort all merges
* in progress. If other threads are running {@link
* #optimize()} or any of the addIndexes methods, they
* will receive {@link MergePolicy.MergeAbortedException}s.
*/
public synchronized void deleteAll() throws IOException {
docWriter.pauseAllThreads();
try {
// Abort any running merges
finishMerges(false);
// Remove any buffered docs
docWriter.abort();
docWriter.setFlushedDocCount(0);
// Remove all segments
segmentInfos.clear();
// Ask deleter to locate unreferenced files & remove them:
deleter.checkpoint(segmentInfos, false);
deleter.refresh();
// Don't bother saving any changes in our segmentInfos
readerPool.clear(null);
// Mark that the index has changed
++changeCount;
} catch (OutOfMemoryError oom) {
handleOOM(oom, "deleteAll");
} finally {
docWriter.resumeAllThreads();
if (infoStream != null) {
message("hit exception during deleteAll");
}
}
}
private synchronized void finishMerges(boolean waitForMerges) throws IOException {
if (!waitForMerges) {
stopMerges = true;
// Abort all pending & running merges:
for (final MergePolicy.OneMerge merge : pendingMerges) {
if (infoStream != null)
message("now abort pending merge " + merge.segString(directory));
merge.abort();
mergeFinish(merge);
}
pendingMerges.clear();
for (final MergePolicy.OneMerge merge : runningMerges) {
if (infoStream != null)
message("now abort running merge " + merge.segString(directory));
merge.abort();
}
// Ensure any running addIndexes finishes. It's fine
// if a new one attempts to start because its merges
// will quickly see the stopMerges == true and abort.
acquireRead();
releaseRead();
// These merges periodically check whether they have
// been aborted, and stop if so. We wait here to make
// sure they all stop. It should not take very long
// because the merge threads periodically check if
// they are aborted.
while(runningMerges.size() > 0) {
if (infoStream != null)
message("now wait for " + runningMerges.size() + " running merge to abort");
doWait();
}
stopMerges = false;
notifyAll();
assert 0 == mergingSegments.size();
if (infoStream != null)
message("all running merges have aborted");
} else {
// waitForMerges() will ensure any running addIndexes finishes.
// It's fine if a new one attempts to start because from our
// caller above the call will see that we are in the
// process of closing, and will throw an
// AlreadyClosedException.
waitForMerges();
}
}
/**
* Wait for any currently outstanding merges to finish.
*
* <p>It is guaranteed that any merges started prior to calling this method
* will have completed once this method completes.</p>
*/
public synchronized void waitForMerges() {
// Ensure any running addIndexes finishes.
acquireRead();
releaseRead();
while(pendingMerges.size() > 0 || runningMerges.size() > 0) {
doWait();
}
// sanity check
assert 0 == mergingSegments.size();
}
/*
* Called whenever the SegmentInfos has been updated and
* the index files referenced exist (correctly) in the
* index directory.
*/
private synchronized void checkpoint() throws IOException {
changeCount++;
deleter.checkpoint(segmentInfos, false);
}
private void finishAddIndexes() {
releaseWrite();
}
private void blockAddIndexes(boolean includePendingClose) {
acquireRead();
boolean success = false;
try {
// Make sure we are still open since we could have
// waited quite a while for last addIndexes to finish
ensureOpen(includePendingClose);
success = true;
} finally {
if (!success)
releaseRead();
}
}
private void resumeAddIndexes() {
releaseRead();
}
private synchronized void resetMergeExceptions() {
mergeExceptions = new ArrayList<MergePolicy.OneMerge>();
mergeGen++;
}
private void noDupDirs(Directory... dirs) {
HashSet<Directory> dups = new HashSet<Directory>();
for(int i=0;i<dirs.length;i++) {
if (dups.contains(dirs[i]))
throw new IllegalArgumentException("Directory " + dirs[i] + " appears more than once");
if (dirs[i] == directory)
throw new IllegalArgumentException("Cannot add directory to itself");
dups.add(dirs[i]);
}
}
/**
* Merges all segments from an array of indexes into this
* index.
*
* <p>This may be used to parallelize batch indexing. A large document
* collection can be broken into sub-collections. Each sub-collection can be
* indexed in parallel, on a different thread, process or machine. The
* complete index can then be created by merging sub-collection indexes
* with this method.
*
* <p><b>NOTE:</b> the index in each Directory must not be
* changed (opened by a writer) while this method is
* running. This method does not acquire a write lock in
* each input Directory, so it is up to the caller to
* enforce this.
*
* <p><b>NOTE:</b> while this is running, any attempts to
* add or delete documents (with another thread) will be
* paused until this method completes.
*
* <p>This method is transactional in how Exceptions are
* handled: it does not commit a new segments_N file until
* all indexes are added. This means if an Exception
* occurs (for example disk full), then either no indexes
* will have been added or they all will have been.</p>
*
* <p>Note that this requires temporary free space in the
* Directory up to 2X the sum of all input indexes
* (including the starting index). If readers/searchers
* are open against the starting index, then temporary
* free space required will be higher by the size of the
* starting index (see {@link #optimize()} for details).
* </p>
*
* <p>Once this completes, the final size of the index
* will be less than the sum of all input index sizes
* (including the starting index). It could be quite a
* bit smaller (if there were many pending deletes) or
* just slightly smaller.</p>
*
* <p>
* This requires this index not be among those to be added.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addIndexesNoOptimize(Directory... dirs)
throws CorruptIndexException, IOException {
ensureOpen();
noDupDirs(dirs);
// Do not allow add docs or deletes while we are running:
docWriter.pauseAllThreads();
try {
if (infoStream != null)
message("flush at addIndexesNoOptimize");
flush(true, false, true);
boolean success = false;
startTransaction(false);
try {
int docCount = 0;
synchronized(this) {
ensureOpen();
for (int i = 0; i < dirs.length; i++) {
if (directory == dirs[i]) {
// cannot add this index: segments may be deleted in merge before added
throw new IllegalArgumentException("Cannot add this index to itself");
}
SegmentInfos sis = new SegmentInfos(); // read infos from dir
sis.read(dirs[i]);
for (int j = 0; j < sis.size(); j++) {
SegmentInfo info = sis.info(j);
assert !segmentInfos.contains(info): "dup info dir=" + info.dir + " name=" + info.name;
docCount += info.docCount;
segmentInfos.add(info); // add each info
}
}
}
// Notify DocumentsWriter that the flushed count just increased
docWriter.updateFlushedDocCount(docCount);
maybeMerge();
ensureOpen();
// If after merging there remain segments in the index
// that are in a different directory, just copy these
// over into our index. This is necessary (before
// finishing the transaction) to avoid leaving the
// index in an unusable (inconsistent) state.
resolveExternalSegments();
ensureOpen();
success = true;
} finally {
if (success) {
commitTransaction();
} else {
rollbackTransaction();
}
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "addIndexesNoOptimize");
} finally {
if (docWriter != null) {
docWriter.resumeAllThreads();
}
}
}
private boolean hasExternalSegments() {
return segmentInfos.hasExternalSegments(directory);
}
/* If any of our segments are using a directory != ours
* then we have to either copy them over one by one, merge
* them (if merge policy has chosen to) or wait until
* currently running merges (in the background) complete.
* We don't return until the SegmentInfos has no more
* external segments. Currently this is only used by
* addIndexesNoOptimize(). */
private void resolveExternalSegments() throws CorruptIndexException, IOException {
boolean any = false;
boolean done = false;
while(!done) {
SegmentInfo info = null;
MergePolicy.OneMerge merge = null;
synchronized(this) {
if (stopMerges)
throw new MergePolicy.MergeAbortedException("rollback() was called or addIndexes* hit an unhandled exception");
final int numSegments = segmentInfos.size();
done = true;
for(int i=0;i<numSegments;i++) {
info = segmentInfos.info(i);
if (info.dir != directory) {
done = false;
final MergePolicy.OneMerge newMerge = new MergePolicy.OneMerge(segmentInfos.range(i, 1+i), mergePolicy instanceof LogMergePolicy && getUseCompoundFile());
// Returns true if no running merge conflicts
// with this one (and, records this merge as
// pending), ie, this segment is not currently
// being merged:
if (registerMerge(newMerge)) {
merge = newMerge;
// If this segment is not currently being
// merged, then advance it to running & run
// the merge ourself (below):
pendingMerges.remove(merge);
runningMerges.add(merge);
break;
}
}
}
if (!done && merge == null)
// We are not yet done (external segments still
// exist in segmentInfos), yet, all such segments
// are currently "covered" by a pending or running
// merge. We now try to grab any pending merge
// that involves external segments:
merge = getNextExternalMerge();
if (!done && merge == null)
// We are not yet done, and, all external segments
// fall under merges that the merge scheduler is
// currently running. So, we now wait and check
// back to see if the merge has completed.
doWait();
}
if (merge != null) {
any = true;
merge(merge);
}
}
if (any)
// Sometimes, on copying an external segment over,
// more merges may become necessary:
mergeScheduler.merge(this);
}
/** Merges the provided indexes into this index.
* <p>After this completes, the index is optimized. </p>
* <p>The provided IndexReaders are not closed.</p>
*
* <p><b>NOTE:</b> while this is running, any attempts to
* add or delete documents (with another thread) will be
* paused until this method completes.
*
* <p>See {@link #addIndexesNoOptimize} for
* details on transactional semantics, temporary free
* space required in the Directory, and non-CFS segments
* on an Exception.</p>
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public void addIndexes(IndexReader... readers)
throws CorruptIndexException, IOException {
ensureOpen();
// Do not allow add docs or deletes while we are running:
docWriter.pauseAllThreads();
// We must pre-acquire a read lock here (and upgrade to
// write lock in startTransaction below) so that no
// other addIndexes is allowed to start up after we have
// flushed & optimized but before we then start our
// transaction. This is because the merging below
// requires that only one segment is present in the
// index:
acquireRead();
try {
SegmentInfo info = null;
String mergedName = null;
SegmentMerger merger = null;
boolean success = false;
try {
flush(true, false, true);
optimize(); // start with zero or 1 seg
success = true;
} finally {
// Take care to release the read lock if we hit an
// exception before starting the transaction
if (!success)
releaseRead();
}
// true means we already have a read lock; if this
// call hits an exception it will release the write
// lock:
startTransaction(true);
try {
mergedName = newSegmentName();
merger = new SegmentMerger(this, mergedName, null);
SegmentReader sReader = null;
synchronized(this) {
if (segmentInfos.size() == 1) { // add existing index, if any
sReader = readerPool.get(segmentInfos.info(0), true, BufferedIndexInput.BUFFER_SIZE, -1);
}
}
success = false;
try {
if (sReader != null)
merger.add(sReader);
for (int i = 0; i < readers.length; i++) // add new indexes
merger.add(readers[i]);
int docCount = merger.merge(); // merge 'em
synchronized(this) {
segmentInfos.clear(); // pop old infos & add new
info = new SegmentInfo(mergedName, docCount, directory, false, true,
-1, null, false, merger.hasProx());
setDiagnostics(info, "addIndexes(IndexReader...)");
segmentInfos.add(info);
}
// Notify DocumentsWriter that the flushed count just increased
docWriter.updateFlushedDocCount(docCount);
success = true;
} finally {
if (sReader != null) {
readerPool.release(sReader);
}
}
} finally {
if (!success) {
if (infoStream != null)
message("hit exception in addIndexes during merge");
rollbackTransaction();
} else {
commitTransaction();
}
}
if (mergePolicy instanceof LogMergePolicy && getUseCompoundFile()) {
List<String> files = null;
synchronized(this) {
// Must incRef our files so that if another thread
// is running merge/optimize, it doesn't delete our
// segment's files before we have a change to
// finish making the compound file.
if (segmentInfos.contains(info)) {
files = info.files();
deleter.incRef(files);
}
}
if (files != null) {
success = false;
startTransaction(false);
try {
merger.createCompoundFile(mergedName + ".cfs");
synchronized(this) {
info.setUseCompoundFile(true);
}
success = true;
} finally {
deleter.decRef(files);
if (!success) {
if (infoStream != null)
message("hit exception building compound file in addIndexes during merge");
rollbackTransaction();
} else {
commitTransaction();
}
}
}
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "addIndexes(IndexReader...)");
} finally {
if (docWriter != null) {
docWriter.resumeAllThreads();
}
}
}
/**
* A hook for extending classes to execute operations after pending added and
* deleted documents have been flushed to the Directory but before the change
* is committed (new segments_N file written).
*/
protected void doAfterFlush() throws IOException {}
/**
* A hook for extending classes to execute operations before pending added and
* deleted documents are flushed to the Directory.
*/
protected void doBeforeFlush() throws IOException {}
/** Expert: prepare for commit.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @see #prepareCommit(Map) */
public final void prepareCommit() throws CorruptIndexException, IOException {
ensureOpen();
prepareCommit(null);
}
/** <p>Expert: prepare for commit, specifying
* commitUserData Map (String -> String). This does the
* first phase of 2-phase commit. This method does all
* steps necessary to commit changes since this writer
* was opened: flushes pending added and deleted docs,
* syncs the index files, writes most of next segments_N
* file. After calling this you must call either {@link
* #commit()} to finish the commit, or {@link
* #rollback()} to revert the commit and undo all changes
* done since the writer was opened.</p>
*
* You can also just call {@link #commit(Map)} directly
* without prepareCommit first in which case that method
* will internally call prepareCommit.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @param commitUserData Opaque Map (String->String)
* that's recorded into the segments file in the index,
* and retrievable by {@link
* IndexReader#getCommitUserData}. Note that when
* IndexWriter commits itself during {@link #close}, the
* commitUserData is unchanged (just carried over from
* the prior commit). If this is null then the previous
* commitUserData is kept. Also, the commitUserData will
* only "stick" if there are actually changes in the
* index to commit.
*/
public final void prepareCommit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
}
if (pendingCommit != null)
throw new IllegalStateException("prepareCommit was already called with no corresponding call to commit");
if (infoStream != null)
message("prepareCommit: flush");
flush(true, true, true);
startCommit(0, commitUserData);
}
// Used only by commit, below; lock order is commitLock -> IW
private final Object commitLock = new Object();
private void commit(long sizeInBytes) throws IOException {
synchronized(commitLock) {
startCommit(sizeInBytes, null);
finishCommit();
}
}
/**
* <p>Commits all pending changes (added & deleted
* documents, optimizations, segment merges, added
* indexes, etc.) to the index, and syncs all referenced
* index files, such that a reader will see the changes
* and the index updates will survive an OS or machine
* crash or power loss. Note that this does not wait for
* any running background merges to finish. This may be a
* costly operation, so you should test the cost in your
* application and do it only when really necessary.</p>
*
* <p> Note that this operation calls Directory.sync on
* the index files. That call should not return until the
* file contents & metadata are on stable storage. For
* FSDirectory, this calls the OS's fsync. But, beware:
* some hardware devices may in fact cache writes even
* during fsync, and return before the bits are actually
* on stable storage, to give the appearance of faster
* performance. If you have such a device, and it does
* not have a battery backup (for example) then on power
* loss it may still lose data. Lucene cannot guarantee
* consistency on such devices. </p>
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*
* @see #prepareCommit
* @see #commit(Map)
*/
public final void commit() throws CorruptIndexException, IOException {
commit(null);
}
/** Commits all changes to the index, specifying a
* commitUserData Map (String -> String). This just
* calls {@link #prepareCommit(Map)} (if you didn't
* already call it) and then {@link #finishCommit}.
*
* <p><b>NOTE</b>: if this method hits an OutOfMemoryError
* you should immediately close the writer. See <a
* href="#OOME">above</a> for details.</p>
*/
public final void commit(Map<String,String> commitUserData) throws CorruptIndexException, IOException {
ensureOpen();
if (infoStream != null) {
message("commit: start");
}
synchronized(commitLock) {
if (infoStream != null) {
message("commit: enter lock");
}
if (pendingCommit == null) {
if (infoStream != null) {
message("commit: now prepare");
}
prepareCommit(commitUserData);
} else if (infoStream != null) {
message("commit: already prepared");
}
finishCommit();
}
}
private synchronized final void finishCommit() throws CorruptIndexException, IOException {
if (pendingCommit != null) {
try {
if (infoStream != null)
message("commit: pendingCommit != null");
pendingCommit.finishCommit(directory);
if (infoStream != null)
message("commit: wrote segments file \"" + pendingCommit.getCurrentSegmentFileName() + "\"");
lastCommitChangeCount = pendingCommitChangeCount;
segmentInfos.updateGeneration(pendingCommit);
segmentInfos.setUserData(pendingCommit.getUserData());
setRollbackSegmentInfos(pendingCommit);
deleter.checkpoint(pendingCommit, true);
} finally {
deleter.decRef(pendingCommit);
pendingCommit = null;
notifyAll();
}
} else if (infoStream != null)
message("commit: pendingCommit == null; skip");
if (infoStream != null)
message("commit: done");
}
/**
* Flush all in-memory buffered udpates (adds and deletes)
* to the Directory.
* @param triggerMerge if true, we may merge segments (if
* deletes or docs were flushed) if necessary
* @param flushDocStores if false we are allowed to keep
* doc stores open to share with the next segment
* @param flushDeletes whether pending deletes should also
* be flushed
*/
protected final void flush(boolean triggerMerge, boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
// We can be called during close, when closing==true, so we must pass false to ensureOpen:
ensureOpen(false);
if (doFlush(flushDocStores, flushDeletes) && triggerMerge)
maybeMerge();
}
// TODO: this method should not have to be entirely
// synchronized, ie, merges should be allowed to commit
// even while a flush is happening
private synchronized final boolean doFlush(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
try {
try {
return doFlushInternal(flushDocStores, flushDeletes);
} finally {
if (docWriter.doBalanceRAM()) {
docWriter.balanceRAM();
}
}
} finally {
docWriter.clearFlushPending();
}
}
// TODO: this method should not have to be entirely
// synchronized, ie, merges should be allowed to commit
// even while a flush is happening
private synchronized final boolean doFlushInternal(boolean flushDocStores, boolean flushDeletes) throws CorruptIndexException, IOException {
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot flush");
}
ensureOpen(false);
assert testPoint("startDoFlush");
doBeforeFlush();
flushCount++;
// If we are flushing because too many deletes
// accumulated, then we should apply the deletes to free
// RAM:
flushDeletes |= docWriter.doApplyDeletes();
// Make sure no threads are actively adding a document.
// Returns true if docWriter is currently aborting, in
// which case we skip flushing this segment
if (infoStream != null) {
message("flush: now pause all indexing threads");
}
if (docWriter.pauseAllThreads()) {
docWriter.resumeAllThreads();
return false;
}
try {
SegmentInfo newSegment = null;
final int numDocs = docWriter.getNumDocsInRAM();
// Always flush docs if there are any
boolean flushDocs = numDocs > 0;
String docStoreSegment = docWriter.getDocStoreSegment();
assert docStoreSegment != null || numDocs == 0: "dss=" + docStoreSegment + " numDocs=" + numDocs;
if (docStoreSegment == null)
flushDocStores = false;
int docStoreOffset = docWriter.getDocStoreOffset();
boolean docStoreIsCompoundFile = false;
if (infoStream != null) {
message(" flush: segment=" + docWriter.getSegment() +
" docStoreSegment=" + docWriter.getDocStoreSegment() +
" docStoreOffset=" + docStoreOffset +
" flushDocs=" + flushDocs +
" flushDeletes=" + flushDeletes +
" flushDocStores=" + flushDocStores +
" numDocs=" + numDocs +
" numBufDelTerms=" + docWriter.getNumBufferedDeleteTerms());
message(" index before flush " + segString());
}
// Check if the doc stores must be separately flushed
// because other segments, besides the one we are about
// to flush, reference it
if (flushDocStores && (!flushDocs || !docWriter.getSegment().equals(docWriter.getDocStoreSegment()))) {
// We must separately flush the doc store
if (infoStream != null)
message(" flush shared docStore segment " + docStoreSegment);
docStoreIsCompoundFile = flushDocStores();
flushDocStores = false;
}
String segment = docWriter.getSegment();
// If we are flushing docs, segment must not be null:
assert segment != null || !flushDocs;
if (flushDocs) {
boolean success = false;
final int flushedDocCount;
try {
flushedDocCount = docWriter.flush(flushDocStores);
success = true;
} finally {
if (!success) {
if (infoStream != null)
message("hit exception flushing segment " + segment);
deleter.refresh(segment);
}
}
if (0 == docStoreOffset && flushDocStores) {
// This means we are flushing private doc stores
// with this segment, so it will not be shared
// with other segments
assert docStoreSegment != null;
assert docStoreSegment.equals(segment);
docStoreOffset = -1;
docStoreIsCompoundFile = false;
docStoreSegment = null;
}
// Create new SegmentInfo, but do not add to our
// segmentInfos until deletes are flushed
// successfully.
newSegment = new SegmentInfo(segment,
flushedDocCount,
directory, false, true,
docStoreOffset, docStoreSegment,
docStoreIsCompoundFile,
docWriter.hasProx());
setDiagnostics(newSegment, "flush");
}
docWriter.pushDeletes();
if (flushDocs) {
segmentInfos.add(newSegment);
checkpoint();
}
if (flushDocs && mergePolicy.useCompoundFile(segmentInfos, newSegment)) {
// Now build compound file
boolean success = false;
try {
docWriter.createCompoundFile(segment);
success = true;
} finally {
if (!success) {
if (infoStream != null)
message("hit exception creating compound file for newly flushed segment " + segment);
deleter.deleteFile(segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
}
}
newSegment.setUseCompoundFile(true);
checkpoint();
}
if (flushDeletes) {
applyDeletes();
}
if (flushDocs)
checkpoint();
doAfterFlush();
return flushDocs;
} catch (OutOfMemoryError oom) {
handleOOM(oom, "doFlush");
// never hit
return false;
} finally {
docWriter.resumeAllThreads();
}
}
/** Expert: Return the total size of all index files currently cached in memory.
* Useful for size management with flushRamDocs()
*/
public final long ramSizeInBytes() {
ensureOpen();
return docWriter.getRAMUsed();
}
/** Expert: Return the number of documents currently
* buffered in RAM. */
public final synchronized int numRamDocs() {
ensureOpen();
return docWriter.getNumDocsInRAM();
}
private int ensureContiguousMerge(MergePolicy.OneMerge merge) {
int first = segmentInfos.indexOf(merge.segments.info(0));
if (first == -1)
throw new MergePolicy.MergeException("could not find segment " + merge.segments.info(0).name + " in current index " + segString(), directory);
final int numSegments = segmentInfos.size();
final int numSegmentsToMerge = merge.segments.size();
for(int i=0;i<numSegmentsToMerge;i++) {
final SegmentInfo info = merge.segments.info(i);
if (first + i >= numSegments || !segmentInfos.info(first+i).equals(info)) {
if (segmentInfos.indexOf(info) == -1)
throw new MergePolicy.MergeException("MergePolicy selected a segment (" + info.name + ") that is not in the current index " + segString(), directory);
else
throw new MergePolicy.MergeException("MergePolicy selected non-contiguous segments to merge (" + merge.segString(directory) + " vs " + segString() + "), which IndexWriter (currently) cannot handle",
directory);
}
}
return first;
}
/** Carefully merges deletes for the segments we just
* merged. This is tricky because, although merging will
* clear all deletes (compacts the documents), new
* deletes may have been flushed to the segments since
* the merge was started. This method "carries over"
* such new deletes onto the newly merged segment, and
* saves the resulting deletes file (incrementing the
* delete generation for merge.info). If no deletes were
* flushed, no new deletes file is saved. */
synchronized private void commitMergedDeletes(MergePolicy.OneMerge merge, SegmentReader mergeReader) throws IOException {
assert testPoint("startCommitMergeDeletes");
final SegmentInfos sourceSegments = merge.segments;
if (infoStream != null)
message("commitMergeDeletes " + merge.segString(directory));
// Carefully merge deletes that occurred after we
// started merging:
int docUpto = 0;
int delCount = 0;
for(int i=0; i < sourceSegments.size(); i++) {
SegmentInfo info = sourceSegments.info(i);
int docCount = info.docCount;
SegmentReader previousReader = merge.readersClone[i];
SegmentReader currentReader = merge.readers[i];
if (previousReader.hasDeletions()) {
// There were deletes on this segment when the merge
// started. The merge has collapsed away those
// deletes, but, if new deletes were flushed since
// the merge started, we must now carefully keep any
// newly flushed deletes but mapping them to the new
// docIDs.
if (currentReader.numDeletedDocs() > previousReader.numDeletedDocs()) {
// This means this segment has had new deletes
// committed since we started the merge, so we
// must merge them:
for(int j=0;j<docCount;j++) {
if (previousReader.isDeleted(j))
assert currentReader.isDeleted(j);
else {
if (currentReader.isDeleted(j)) {
mergeReader.doDelete(docUpto);
delCount++;
}
docUpto++;
}
}
} else {
docUpto += docCount - previousReader.numDeletedDocs();
}
} else if (currentReader.hasDeletions()) {
// This segment had no deletes before but now it
// does:
for(int j=0; j<docCount; j++) {
if (currentReader.isDeleted(j)) {
mergeReader.doDelete(docUpto);
delCount++;
}
docUpto++;
}
} else
// No deletes before or after
docUpto += info.docCount;
}
assert mergeReader.numDeletedDocs() == delCount;
mergeReader.hasChanges = delCount >= 0;
}
/* FIXME if we want to support non-contiguous segment merges */
synchronized private boolean commitMerge(MergePolicy.OneMerge merge, SegmentMerger merger, int mergedDocCount, SegmentReader mergedReader) throws IOException {
assert testPoint("startCommitMerge");
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot complete merge");
}
if (infoStream != null)
message("commitMerge: " + merge.segString(directory) + " index=" + segString());
assert merge.registerDone;
// If merge was explicitly aborted, or, if rollback() or
// rollbackTransaction() had been called since our merge
// started (which results in an unqualified
// deleter.refresh() call that will remove any index
// file that current segments does not reference), we
// abort this merge
if (merge.isAborted()) {
if (infoStream != null)
message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
deleter.refresh(merge.info.name);
return false;
}
final int start = ensureContiguousMerge(merge);
commitMergedDeletes(merge, mergedReader);
docWriter.remapDeletes(segmentInfos, merger.getDocMaps(), merger.getDelCounts(), merge, mergedDocCount);
setMergeDocStoreIsCompoundFile(merge);
merge.info.setHasProx(merger.hasProx());
segmentInfos.subList(start, start + merge.segments.size()).clear();
assert !segmentInfos.contains(merge.info);
segmentInfos.add(start, merge.info);
// Must note the change to segmentInfos so any commits
// in-flight don't lose it:
checkpoint();
// If the merged segments had pending changes, clear
// them so that they don't bother writing them to
// disk, updating SegmentInfo, etc.:
readerPool.clear(merge.segments);
if (merge.optimize)
segmentsToOptimize.add(merge.info);
return true;
}
private synchronized void decrefMergeSegments(MergePolicy.OneMerge merge) throws IOException {
assert merge.increfDone;
merge.increfDone = false;
}
final private void handleMergeException(Throwable t, MergePolicy.OneMerge merge) throws IOException {
if (infoStream != null) {
message("handleMergeException: merge=" + merge.segString(directory) + " exc=" + t);
}
// Set the exception on the merge, so if
// optimize() is waiting on us it sees the root
// cause exception:
merge.setException(t);
addMergeException(merge);
if (t instanceof MergePolicy.MergeAbortedException) {
// We can ignore this exception (it happens when
// close(false) or rollback is called), unless the
// merge involves segments from external directories,
// in which case we must throw it so, for example, the
// rollbackTransaction code in addIndexes* is
// executed.
if (merge.isExternal)
throw (MergePolicy.MergeAbortedException) t;
} else if (t instanceof IOException)
throw (IOException) t;
else if (t instanceof RuntimeException)
throw (RuntimeException) t;
else if (t instanceof Error)
throw (Error) t;
else
// Should not get here
throw new RuntimeException(t);
}
/**
* Merges the indicated segments, replacing them in the stack with a
* single segment.
*/
final void merge(MergePolicy.OneMerge merge)
throws CorruptIndexException, IOException {
boolean success = false;
try {
try {
try {
mergeInit(merge);
if (infoStream != null)
message("now merge\n merge=" + merge.segString(directory) + "\n merge=" + merge + "\n index=" + segString());
mergeMiddle(merge);
mergeSuccess(merge);
success = true;
} catch (Throwable t) {
handleMergeException(t, merge);
}
} finally {
synchronized(this) {
mergeFinish(merge);
if (!success) {
if (infoStream != null)
message("hit exception during merge");
if (merge.info != null && !segmentInfos.contains(merge.info))
deleter.refresh(merge.info.name);
}
// This merge (and, generally, any change to the
// segments) may now enable new merges, so we call
// merge policy & update pending merges.
if (success && !merge.isAborted() && !closed && !closing)
updatePendingMerges(merge.maxNumSegmentsOptimize, merge.optimize);
}
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "merge");
}
}
/** Hook that's called when the specified merge is complete. */
void mergeSuccess(MergePolicy.OneMerge merge) {
}
/** Checks whether this merge involves any segments
* already participating in a merge. If not, this merge
* is "registered", meaning we record that its segments
* are now participating in a merge, and true is
* returned. Else (the merge conflicts) false is
* returned. */
final synchronized boolean registerMerge(MergePolicy.OneMerge merge) throws MergePolicy.MergeAbortedException {
if (merge.registerDone)
return true;
if (stopMerges) {
merge.abort();
throw new MergePolicy.MergeAbortedException("merge is aborted: " + merge.segString(directory));
}
final int count = merge.segments.size();
boolean isExternal = false;
for(int i=0;i<count;i++) {
final SegmentInfo info = merge.segments.info(i);
if (mergingSegments.contains(info))
return false;
if (segmentInfos.indexOf(info) == -1)
return false;
if (info.dir != directory)
isExternal = true;
}
ensureContiguousMerge(merge);
pendingMerges.add(merge);
if (infoStream != null)
message("add merge to pendingMerges: " + merge.segString(directory) + " [total " + pendingMerges.size() + " pending]");
merge.mergeGen = mergeGen;
merge.isExternal = isExternal;
// OK it does not conflict; now record that this merge
// is running (while synchronized) to avoid race
// condition where two conflicting merges from different
// threads, start
for(int i=0;i<count;i++)
mergingSegments.add(merge.segments.info(i));
// Merge is now registered
merge.registerDone = true;
return true;
}
/** Does initial setup for a merge, which is fast but holds
* the synchronized lock on IndexWriter instance. */
final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
boolean success = false;
try {
_mergeInit(merge);
success = true;
} finally {
if (!success) {
mergeFinish(merge);
}
}
}
final synchronized private void _mergeInit(MergePolicy.OneMerge merge) throws IOException {
assert testPoint("startMergeInit");
assert merge.registerDone;
assert !merge.optimize || merge.maxNumSegmentsOptimize > 0;
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot merge");
}
if (merge.info != null)
// mergeInit already done
return;
if (merge.isAborted())
return;
applyDeletes();
final SegmentInfos sourceSegments = merge.segments;
final int end = sourceSegments.size();
// Check whether this merge will allow us to skip
// merging the doc stores (stored field & vectors).
// This is a very substantial optimization (saves tons
// of IO).
Directory lastDir = directory;
String lastDocStoreSegment = null;
int next = -1;
boolean mergeDocStores = false;
boolean doFlushDocStore = false;
final String currentDocStoreSegment = docWriter.getDocStoreSegment();
// Test each segment to be merged: check if we need to
// flush/merge doc stores
for (int i = 0; i < end; i++) {
SegmentInfo si = sourceSegments.info(i);
// If it has deletions we must merge the doc stores
if (si.hasDeletions())
mergeDocStores = true;
// If it has its own (private) doc stores we must
// merge the doc stores
if (-1 == si.getDocStoreOffset())
mergeDocStores = true;
// If it has a different doc store segment than
// previous segments, we must merge the doc stores
String docStoreSegment = si.getDocStoreSegment();
if (docStoreSegment == null)
mergeDocStores = true;
else if (lastDocStoreSegment == null)
lastDocStoreSegment = docStoreSegment;
else if (!lastDocStoreSegment.equals(docStoreSegment))
mergeDocStores = true;
// Segments' docScoreOffsets must be in-order,
// contiguous. For the default merge policy now
// this will always be the case but for an arbitrary
// merge policy this may not be the case
if (-1 == next)
next = si.getDocStoreOffset() + si.docCount;
else if (next != si.getDocStoreOffset())
mergeDocStores = true;
else
next = si.getDocStoreOffset() + si.docCount;
// If the segment comes from a different directory
// we must merge
if (lastDir != si.dir)
mergeDocStores = true;
// If the segment is referencing the current "live"
// doc store outputs then we must merge
if (si.getDocStoreOffset() != -1 && currentDocStoreSegment != null && si.getDocStoreSegment().equals(currentDocStoreSegment)) {
doFlushDocStore = true;
}
}
final int docStoreOffset;
final String docStoreSegment;
final boolean docStoreIsCompoundFile;
if (mergeDocStores) {
docStoreOffset = -1;
docStoreSegment = null;
docStoreIsCompoundFile = false;
} else {
SegmentInfo si = sourceSegments.info(0);
docStoreOffset = si.getDocStoreOffset();
docStoreSegment = si.getDocStoreSegment();
docStoreIsCompoundFile = si.getDocStoreIsCompoundFile();
}
if (mergeDocStores && doFlushDocStore) {
// SegmentMerger intends to merge the doc stores
// (stored fields, vectors), and at least one of the
// segments to be merged refers to the currently
// live doc stores.
// TODO: if we know we are about to merge away these
// newly flushed doc store files then we should not
// make compound file out of them...
if (infoStream != null)
message("now flush at merge");
doFlush(true, false);
}
merge.increfDone = true;
merge.mergeDocStores = mergeDocStores;
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
merge.info = new SegmentInfo(newSegmentName(), 0,
directory, false, true,
docStoreOffset,
docStoreSegment,
docStoreIsCompoundFile,
false);
Map<String,String> details = new HashMap<String,String>();
details.put("optimize", merge.optimize+"");
details.put("mergeFactor", end+"");
details.put("mergeDocStores", mergeDocStores+"");
setDiagnostics(merge.info, "merge", details);
// Also enroll the merged segment into mergingSegments;
// this prevents it from getting selected for a merge
// after our merge is done but while we are building the
// CFS:
mergingSegments.add(merge.info);
}
private void setDiagnostics(SegmentInfo info, String source) {
setDiagnostics(info, source, null);
}
private void setDiagnostics(SegmentInfo info, String source, Map<String,String> details) {
Map<String,String> diagnostics = new HashMap<String,String>();
diagnostics.put("source", source);
diagnostics.put("lucene.version", Constants.LUCENE_VERSION);
diagnostics.put("os", Constants.OS_NAME+"");
diagnostics.put("os.arch", Constants.OS_ARCH+"");
diagnostics.put("os.version", Constants.OS_VERSION+"");
diagnostics.put("java.version", Constants.JAVA_VERSION+"");
diagnostics.put("java.vendor", Constants.JAVA_VENDOR+"");
if (details != null) {
diagnostics.putAll(details);
}
info.setDiagnostics(diagnostics);
}
/** Does fininishing for a merge, which is fast but holds
* the synchronized lock on IndexWriter instance. */
final synchronized void mergeFinish(MergePolicy.OneMerge merge) throws IOException {
// Optimize, addIndexes or finishMerges may be waiting
// on merges to finish.
notifyAll();
if (merge.increfDone)
decrefMergeSegments(merge);
if (merge.mergeFiles != null) {
deleter.decRef(merge.mergeFiles);
merge.mergeFiles = null;
}
// It's possible we are called twice, eg if there was an
// exception inside mergeInit
if (merge.registerDone) {
final SegmentInfos sourceSegments = merge.segments;
final int end = sourceSegments.size();
for(int i=0;i<end;i++)
mergingSegments.remove(sourceSegments.info(i));
mergingSegments.remove(merge.info);
merge.registerDone = false;
}
runningMerges.remove(merge);
}
private synchronized void setMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge) {
final String mergeDocStoreSegment = merge.info.getDocStoreSegment();
if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) {
final int size = segmentInfos.size();
for(int i=0;i<size;i++) {
final SegmentInfo info = segmentInfos.info(i);
final String docStoreSegment = info.getDocStoreSegment();
if (docStoreSegment != null &&
docStoreSegment.equals(mergeDocStoreSegment) &&
info.getDocStoreIsCompoundFile()) {
merge.info.setDocStoreIsCompoundFile(true);
break;
}
}
}
}
/** Does the actual (time-consuming) work of the merge,
* but without holding synchronized lock on IndexWriter
* instance */
final private int mergeMiddle(MergePolicy.OneMerge merge)
throws CorruptIndexException, IOException {
merge.checkAborted(directory);
final String mergedName = merge.info.name;
SegmentMerger merger = null;
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
final int numSegments = sourceSegments.size();
if (infoStream != null)
message("merging " + merge.segString(directory));
merger = new SegmentMerger(this, mergedName, merge);
merge.readers = new SegmentReader[numSegments];
merge.readersClone = new SegmentReader[numSegments];
boolean mergeDocStores = false;
final Set<String> dss = new HashSet<String>();
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
int totDocCount = 0;
for (int i = 0; i < numSegments; i++) {
final SegmentInfo info = sourceSegments.info(i);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
SegmentReader reader = merge.readers[i] = readerPool.get(info, merge.mergeDocStores,
MERGE_READ_BUFFER_SIZE,
-1);
// We clone the segment readers because other
// deletes may come in while we're merging so we
// need readers that will not change
SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.clone(true);
merger.add(clone);
if (clone.hasDeletions()) {
mergeDocStores = true;
}
if (info.getDocStoreOffset() != -1) {
dss.add(info.getDocStoreSegment());
}
totDocCount += clone.numDocs();
}
if (infoStream != null) {
message("merge: total "+totDocCount+" docs");
}
merge.checkAborted(directory);
// If deletions have arrived and it has now become
// necessary to merge doc stores, go and open them:
if (mergeDocStores && !merge.mergeDocStores) {
merge.mergeDocStores = true;
synchronized(this) {
if (dss.contains(docWriter.getDocStoreSegment())) {
if (infoStream != null)
message("now flush at mergeMiddle");
doFlush(true, false);
}
}
for(int i=0;i<numSegments;i++) {
merge.readersClone[i].openDocStores();
}
// Clear DSS
synchronized(this) {
merge.info.setDocStore(-1, null, false);
}
}
// This is where all the work happens:
mergedDocCount = merge.info.docCount = merger.merge(merge.mergeDocStores);
assert mergedDocCount == totDocCount;
// TODO: in the non-realtime case, we may want to only
// keep deletes (it's costly to open entire reader
// when we just need deletes)
final int termsIndexDivisor;
final boolean loadDocStores;
synchronized(this) {
// If the doc store we are using has been closed and
// is in now compound format (but wasn't when we
// started), then we will switch to the compound
// format as well:
setMergeDocStoreIsCompoundFile(merge);
assert merge.mergeFiles == null;
merge.mergeFiles = merge.info.files();
deleter.incRef(merge.mergeFiles);
}
if (poolReaders && mergedSegmentWarmer != null) {
// Load terms index & doc stores so the segment
// warmer can run searches, load documents/term
// vectors
termsIndexDivisor = readerTermsIndexDivisor;
loadDocStores = true;
} else {
termsIndexDivisor = -1;
loadDocStores = false;
}
final SegmentReader mergedReader = readerPool.get(merge.info, loadDocStores, BufferedIndexInput.BUFFER_SIZE, termsIndexDivisor);
try {
if (poolReaders && mergedSegmentWarmer != null) {
mergedSegmentWarmer.warm(mergedReader);
}
if (!commitMerge(merge, merger, mergedDocCount, mergedReader))
// commitMerge will return false if this merge was aborted
return 0;
} finally {
synchronized(this) {
readerPool.release(mergedReader);
}
}
success = true;
} finally {
synchronized(this) {
if (!success) {
// Suppress any new exceptions so we throw the
// original cause
for (int i=0;i<numSegments;i++) {
if (merge.readers[i] != null) {
try {
readerPool.release(merge.readers[i], true);
} catch (Throwable t) {
}
}
if (merge.readersClone[i] != null) {
try {
merge.readersClone[i].close();
} catch (Throwable t) {
}
// This was a private clone and we had the only reference
assert merge.readersClone[i].getRefCount() == 0;
}
}
} else {
for (int i=0;i<numSegments;i++) {
if (merge.readers[i] != null) {
readerPool.release(merge.readers[i], true);
}
if (merge.readersClone[i] != null) {
merge.readersClone[i].close();
// This was a private clone and we had the only reference
assert merge.readersClone[i].getRefCount() == 0;
}
}
}
}
}
// Must checkpoint before decrefing so any newly
// referenced files in the new merge.info are incref'd
// first:
synchronized(this) {
deleter.checkpoint(segmentInfos, false);
}
decrefMergeSegments(merge);
if (merge.useCompoundFile) {
success = false;
final String compoundFileName = mergedName + "." + IndexFileNames.COMPOUND_FILE_EXTENSION;
try {
merger.createCompoundFile(compoundFileName);
success = true;
} catch (IOException ioe) {
synchronized(this) {
if (merge.isAborted()) {
// This can happen if rollback or close(false)
// is called -- fall through to logic below to
// remove the partially created CFS:
success = true;
} else
handleMergeException(ioe, merge);
}
} catch (Throwable t) {
handleMergeException(t, merge);
} finally {
if (!success) {
if (infoStream != null)
message("hit exception creating compound file during merge");
synchronized(this) {
deleter.deleteFile(compoundFileName);
}
}
}
if (merge.isAborted()) {
if (infoStream != null)
message("abort merge after building CFS");
deleter.deleteFile(compoundFileName);
return 0;
}
synchronized(this) {
if (segmentInfos.indexOf(merge.info) == -1 || merge.isAborted()) {
// Our segment (committed in non-compound
// format) got merged away while we were
// building the compound format.
deleter.deleteFile(compoundFileName);
} else {
merge.info.setUseCompoundFile(true);
checkpoint();
}
}
}
return mergedDocCount;
}
synchronized void addMergeException(MergePolicy.OneMerge merge) {
assert merge.getException() != null;
if (!mergeExceptions.contains(merge) && mergeGen == merge.mergeGen)
mergeExceptions.add(merge);
}
// Apply buffered deletes to all segments.
private final synchronized boolean applyDeletes() throws CorruptIndexException, IOException {
assert testPoint("startApplyDeletes");
flushDeletesCount++;
SegmentInfos rollback = (SegmentInfos) segmentInfos.clone();
boolean success = false;
boolean changed;
try {
changed = docWriter.applyDeletes(segmentInfos);
success = true;
} finally {
if (!success) {
if (infoStream != null)
message("hit exception flushing deletes");
// Carefully remove any partially written .del
// files
final int size = rollback.size();
for(int i=0;i<size;i++) {
final String newDelFileName = segmentInfos.info(i).getDelFileName();
final String delFileName = rollback.info(i).getDelFileName();
if (newDelFileName != null && !newDelFileName.equals(delFileName))
deleter.deleteFile(newDelFileName);
}
// Fully replace the segmentInfos since flushed
// deletes could have changed any of the
// SegmentInfo instances:
segmentInfos.clear();
segmentInfos.addAll(rollback);
}
}
if (changed)
checkpoint();
return changed;
}
// For test purposes.
final synchronized int getBufferedDeleteTermsSize() {
return docWriter.getBufferedDeleteTerms().size();
}
// For test purposes.
final synchronized int getNumBufferedDeleteTerms() {
return docWriter.getNumBufferedDeleteTerms();
}
// utility routines for tests
SegmentInfo newestSegment() {
return segmentInfos.info(segmentInfos.size()-1);
}
public synchronized String segString() {
return segString(segmentInfos);
}
private synchronized String segString(SegmentInfos infos) {
StringBuilder buffer = new StringBuilder();
final int count = infos.size();
for(int i = 0; i < count; i++) {
if (i > 0) {
buffer.append(' ');
}
final SegmentInfo info = infos.info(i);
buffer.append(info.segString(directory));
if (info.dir != directory)
buffer.append("**");
}
return buffer.toString();
}
// Files that have been sync'd already
private HashSet<String> synced = new HashSet<String>();
// Files that are now being sync'd
private HashSet<String> syncing = new HashSet<String>();
private boolean startSync(String fileName, Collection<String> pending) {
synchronized(synced) {
if (!synced.contains(fileName)) {
if (!syncing.contains(fileName)) {
syncing.add(fileName);
return true;
} else {
pending.add(fileName);
return false;
}
} else
return false;
}
}
private void finishSync(String fileName, boolean success) {
synchronized(synced) {
assert syncing.contains(fileName);
syncing.remove(fileName);
if (success)
synced.add(fileName);
synced.notifyAll();
}
}
/** Blocks until all files in syncing are sync'd */
private boolean waitForAllSynced(Collection<String> syncing) throws IOException {
synchronized(synced) {
Iterator<String> it = syncing.iterator();
while(it.hasNext()) {
final String fileName = it.next();
while(!synced.contains(fileName)) {
if (!syncing.contains(fileName))
// There was an error because a file that was
// previously syncing failed to appear in synced
return false;
else
try {
synced.wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
return true;
}
}
private synchronized void doWait() {
// NOTE: the callers of this method should in theory
// be able to do simply wait(), but, as a defense
// against thread timing hazards where notifyAll()
// falls to be called, we wait for at most 1 second
// and then return so caller can check if wait
// conditions are satisfied:
try {
wait(1000);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
/** Walk through all files referenced by the current
* segmentInfos and ask the Directory to sync each file,
* if it wasn't already. If that succeeds, then we
* prepare a new segments_N file but do not fully commit
* it. */
private void startCommit(long sizeInBytes, Map<String,String> commitUserData) throws IOException {
assert testPoint("startStartCommit");
// TODO: as of LUCENE-2095, we can simplify this method,
// since only 1 thread can be in here at once
if (hitOOM) {
throw new IllegalStateException("this writer hit an OutOfMemoryError; cannot commit");
}
try {
if (infoStream != null)
message("startCommit(): start sizeInBytes=" + sizeInBytes);
SegmentInfos toSync = null;
final long myChangeCount;
synchronized(this) {
// Wait for any running addIndexes to complete
// first, then block any from running until we've
// copied the segmentInfos we intend to sync:
blockAddIndexes(false);
// On commit the segmentInfos must never
// reference a segment in another directory:
assert !hasExternalSegments();
try {
assert lastCommitChangeCount <= changeCount;
if (changeCount == lastCommitChangeCount) {
if (infoStream != null)
message(" skip startCommit(): no changes pending");
return;
}
// First, we clone & incref the segmentInfos we intend
// to sync, then, without locking, we sync() each file
// referenced by toSync, in the background. Multiple
// threads can be doing this at once, if say a large
// merge and a small merge finish at the same time:
if (infoStream != null)
message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount);
readerPool.commit();
toSync = (SegmentInfos) segmentInfos.clone();
if (commitUserData != null)
toSync.setUserData(commitUserData);
deleter.incRef(toSync, false);
myChangeCount = changeCount;
Collection<String> files = toSync.files(directory, false);
for(final String fileName: files) {
assert directory.fileExists(fileName): "file " + fileName + " does not exist";
}
} finally {
resumeAddIndexes();
}
}
assert testPoint("midStartCommit");
boolean setPending = false;
try {
// Loop until all files toSync references are sync'd:
while(true) {
final Collection<String> pending = new ArrayList<String>();
Iterator<String> it = toSync.files(directory, false).iterator();
while(it.hasNext()) {
final String fileName = it.next();
if (startSync(fileName, pending)) {
boolean success = false;
try {
// Because we incRef'd this commit point, above,
// the file had better exist:
assert directory.fileExists(fileName): "file '" + fileName + "' does not exist dir=" + directory;
if (infoStream != null)
message("now sync " + fileName);
directory.sync(fileName);
success = true;
} finally {
finishSync(fileName, success);
}
}
}
// All files that I require are either synced or being
// synced by other threads. If they are being synced,
// we must at this point block until they are done.
// If this returns false, that means an error in
// another thread resulted in failing to actually
// sync one of our files, so we repeat:
if (waitForAllSynced(pending))
break;
}
assert testPoint("midStartCommit2");
synchronized(this) {
// If someone saved a newer version of segments file
// since I first started syncing my version, I can
// safely skip saving myself since I've been
// superseded:
while(true) {
if (myChangeCount <= lastCommitChangeCount) {
if (infoStream != null) {
message("sync superseded by newer infos");
}
break;
} else if (pendingCommit == null) {
// My turn to commit
if (segmentInfos.getGeneration() > toSync.getGeneration())
toSync.updateGeneration(segmentInfos);
boolean success = false;
try {
// Exception here means nothing is prepared
// (this method unwinds everything it did on
// an exception)
try {
toSync.prepareCommit(directory);
} finally {
// Have our master segmentInfos record the
// generations we just prepared. We do this
// on error or success so we don't
// double-write a segments_N file.
segmentInfos.updateGeneration(toSync);
}
assert pendingCommit == null;
setPending = true;
pendingCommit = toSync;
pendingCommitChangeCount = myChangeCount;
success = true;
} finally {
if (!success && infoStream != null)
message("hit exception committing segments file");
}
break;
} else {
// Must wait for other commit to complete
doWait();
}
}
}
if (infoStream != null)
message("done all syncs");
assert testPoint("midStartCommitSuccess");
} finally {
synchronized(this) {
if (!setPending)
deleter.decRef(toSync);
}
}
} catch (OutOfMemoryError oom) {
handleOOM(oom, "startCommit");
}
assert testPoint("finishStartCommit");
}
/**
* Returns <code>true</code> iff the index in the named directory is
* currently locked.
* @param directory the directory to check for a lock
* @throws IOException if there is a low-level IO error
*/
public static boolean isLocked(Directory directory) throws IOException {
return directory.makeLock(WRITE_LOCK_NAME).isLocked();
}
/**
* Forcibly unlocks the index in the named directory.
* <P>
* Caution: this should only be used by failure recovery code,
* when it is known that no other process nor thread is in fact
* currently accessing this index.
*/
public static void unlock(Directory directory) throws IOException {
directory.makeLock(IndexWriter.WRITE_LOCK_NAME).release();
}
/**
* Specifies maximum field length (in number of tokens/terms) in {@link IndexWriter} constructors.
* {@link #setMaxFieldLength(int)} overrides the value set by
* the constructor.
*/
public static final class MaxFieldLength {
private int limit;
private String name;
/**
* Private type-safe-enum-pattern constructor.
*
* @param name instance name
* @param limit maximum field length
*/
private MaxFieldLength(String name, int limit) {
this.name = name;
this.limit = limit;
}
/**
* Public constructor to allow users to specify the maximum field size limit.
*
* @param limit The maximum field length
*/
public MaxFieldLength(int limit) {
this("User-specified", limit);
}
public int getLimit() {
return limit;
}
@Override
public String toString()
{
return name + ":" + limit;
}
/** Sets the maximum field length to {@link Integer#MAX_VALUE}. */
public static final MaxFieldLength UNLIMITED
= new MaxFieldLength("UNLIMITED", Integer.MAX_VALUE);
/**
* Sets the maximum field length to
* {@link #DEFAULT_MAX_FIELD_LENGTH}
* */
public static final MaxFieldLength LIMITED
= new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH);
}
/** If {@link #getReader} has been called (ie, this writer
* is in near real-time mode), then after a merge
* completes, this class can be invoked to warm the
* reader on the newly merged segment, before the merge
* commits. This is not required for near real-time
* search, but will reduce search latency on opening a
* new near real-time reader after a merge completes.
*
* <p><b>NOTE:</b> This API is experimental and might
* change in incompatible ways in the next release.</p>
*
* <p><b>NOTE</b>: warm is called before any deletes have
* been carried over to the merged segment. */
public static abstract class IndexReaderWarmer {
public abstract void warm(IndexReader reader) throws IOException;
}
private IndexReaderWarmer mergedSegmentWarmer;
/** Set the merged segment warmer. See {@link
* IndexReaderWarmer}. */
public void setMergedSegmentWarmer(IndexReaderWarmer warmer) {
mergedSegmentWarmer = warmer;
}
/** Returns the current merged segment warmer. See {@link
* IndexReaderWarmer}. */
public IndexReaderWarmer getMergedSegmentWarmer() {
return mergedSegmentWarmer;
}
private void handleOOM(OutOfMemoryError oom, String location) {
if (infoStream != null) {
message("hit OutOfMemoryError inside " + location);
}
hitOOM = true;
throw oom;
}
// Used only by assert for testing. Current points:
// startDoFlush
// startCommitMerge
// startStartCommit
// midStartCommit
// midStartCommit2
// midStartCommitSuccess
// finishStartCommit
// startCommitMergeDeletes
// startMergeInit
// startApplyDeletes
// DocumentsWriter.ThreadState.init start
boolean testPoint(String name) {
return true;
}
synchronized boolean nrtIsCurrent(SegmentInfos infos) {
if (!infos.equals(segmentInfos)) {
// if any structural changes (new segments), we are
// stale
return false;
} else {
return !docWriter.anyChanges();
}
}
synchronized boolean isClosed() {
return closed;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexWriter.java | Java | art | 171,610 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.Closeable;
/** TermDocs provides an interface for enumerating <document, frequency>
pairs for a term. <p> The document portion names each document containing
the term. Documents are indicated by number. The frequency portion gives
the number of times the term occurred in each document. <p> The pairs are
ordered by document number.
@see IndexReader#termDocs()
*/
public interface TermDocs extends Closeable {
/** Sets this to the data for a term.
* The enumeration is reset to the start of the data for this term.
*/
void seek(Term term) throws IOException;
/** Sets this to the data for the current term in a {@link TermEnum}.
* This may be optimized in some implementations.
*/
void seek(TermEnum termEnum) throws IOException;
/** Returns the current document number. <p> This is invalid until {@link
#next()} is called for the first time.*/
int doc();
/** Returns the frequency of the term within the current document. <p> This
is invalid until {@link #next()} is called for the first time.*/
int freq();
/** Moves to the next pair in the enumeration. <p> Returns true iff there is
such a next pair in the enumeration. */
boolean next() throws IOException;
/** Attempts to read multiple entries from the enumeration, up to length of
* <i>docs</i>. Document numbers are stored in <i>docs</i>, and term
* frequencies are stored in <i>freqs</i>. The <i>freqs</i> array must be as
* long as the <i>docs</i> array.
*
* <p>Returns the number of entries read. Zero is only returned when the
* stream has been exhausted. */
int read(int[] docs, int[] freqs) throws IOException;
/** Skips entries to the first beyond the current whose document number is
* greater than or equal to <i>target</i>. <p>Returns true iff there is such
* an entry. <p>Behaves as if written: <pre>
* boolean skipTo(int target) {
* do {
* if (!next())
* return false;
* } while (target > doc());
* return true;
* }
* </pre>
* Some implementations are considerably more efficient than that.
*/
boolean skipTo(int target) throws IOException;
/** Frees associated resources. */
void close() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermDocs.java | Java | art | 3,132 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.HashSet;
import java.util.Collection;
import org.apache.lucene.store.Directory;
class SegmentWriteState {
DocumentsWriter docWriter;
Directory directory;
String segmentName;
String docStoreSegmentName;
int numDocs;
int termIndexInterval;
int numDocsInStore;
Collection<String> flushedFiles;
public SegmentWriteState(DocumentsWriter docWriter, Directory directory, String segmentName, String docStoreSegmentName, int numDocs,
int numDocsInStore, int termIndexInterval) {
this.docWriter = docWriter;
this.directory = directory;
this.segmentName = segmentName;
this.docStoreSegmentName = docStoreSegmentName;
this.numDocs = numDocs;
this.numDocsInStore = numDocsInStore;
this.termIndexInterval = termIndexInterval;
flushedFiles = new HashSet<String>();
}
public String segmentFileName(String ext) {
return segmentName + "." + ext;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentWriteState.java | Java | art | 1,778 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Useful constants representing filenames and extensions used by lucene
*/
final class IndexFileNames {
/** Name of the index segment file */
static final String SEGMENTS = "segments";
/** Name of the generation reference file name */
static final String SEGMENTS_GEN = "segments.gen";
/** Name of the index deletable file (only used in
* pre-lockless indices) */
static final String DELETABLE = "deletable";
/** Extension of norms file */
static final String NORMS_EXTENSION = "nrm";
/** Extension of freq postings file */
static final String FREQ_EXTENSION = "frq";
/** Extension of prox postings file */
static final String PROX_EXTENSION = "prx";
/** Extension of terms file */
static final String TERMS_EXTENSION = "tis";
/** Extension of terms index file */
static final String TERMS_INDEX_EXTENSION = "tii";
/** Extension of stored fields index file */
static final String FIELDS_INDEX_EXTENSION = "fdx";
/** Extension of stored fields file */
static final String FIELDS_EXTENSION = "fdt";
/** Extension of vectors fields file */
static final String VECTORS_FIELDS_EXTENSION = "tvf";
/** Extension of vectors documents file */
static final String VECTORS_DOCUMENTS_EXTENSION = "tvd";
/** Extension of vectors index file */
static final String VECTORS_INDEX_EXTENSION = "tvx";
/** Extension of compound file */
static final String COMPOUND_FILE_EXTENSION = "cfs";
/** Extension of compound file for doc store files*/
static final String COMPOUND_FILE_STORE_EXTENSION = "cfx";
/** Extension of deletes */
static final String DELETES_EXTENSION = "del";
/** Extension of field infos */
static final String FIELD_INFOS_EXTENSION = "fnm";
/** Extension of plain norms */
static final String PLAIN_NORMS_EXTENSION = "f";
/** Extension of separate norms */
static final String SEPARATE_NORMS_EXTENSION = "s";
/** Extension of gen file */
static final String GEN_EXTENSION = "gen";
/**
* This array contains all filename extensions used by
* Lucene's index files, with two exceptions, namely the
* extension made up from <code>.f</code> + a number and
* from <code>.s</code> + a number. Also note that
* Lucene's <code>segments_N</code> files do not have any
* filename extension.
*/
static final String INDEX_EXTENSIONS[] = new String[] {
COMPOUND_FILE_EXTENSION,
FIELD_INFOS_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION,
TERMS_INDEX_EXTENSION,
TERMS_EXTENSION,
FREQ_EXTENSION,
PROX_EXTENSION,
DELETES_EXTENSION,
VECTORS_INDEX_EXTENSION,
VECTORS_DOCUMENTS_EXTENSION,
VECTORS_FIELDS_EXTENSION,
GEN_EXTENSION,
NORMS_EXTENSION,
COMPOUND_FILE_STORE_EXTENSION,
};
/** File extensions that are added to a compound file
* (same as above, minus "del", "gen", "cfs"). */
static final String[] INDEX_EXTENSIONS_IN_COMPOUND_FILE = new String[] {
FIELD_INFOS_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION,
TERMS_INDEX_EXTENSION,
TERMS_EXTENSION,
FREQ_EXTENSION,
PROX_EXTENSION,
VECTORS_INDEX_EXTENSION,
VECTORS_DOCUMENTS_EXTENSION,
VECTORS_FIELDS_EXTENSION,
NORMS_EXTENSION
};
static final String[] STORE_INDEX_EXTENSIONS = new String[] {
VECTORS_INDEX_EXTENSION,
VECTORS_FIELDS_EXTENSION,
VECTORS_DOCUMENTS_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION
};
static final String[] NON_STORE_INDEX_EXTENSIONS = new String[] {
FIELD_INFOS_EXTENSION,
FREQ_EXTENSION,
PROX_EXTENSION,
TERMS_EXTENSION,
TERMS_INDEX_EXTENSION,
NORMS_EXTENSION
};
/** File extensions of old-style index files */
static final String COMPOUND_EXTENSIONS[] = new String[] {
FIELD_INFOS_EXTENSION,
FREQ_EXTENSION,
PROX_EXTENSION,
FIELDS_INDEX_EXTENSION,
FIELDS_EXTENSION,
TERMS_INDEX_EXTENSION,
TERMS_EXTENSION
};
/** File extensions for term vector support */
static final String VECTOR_EXTENSIONS[] = new String[] {
VECTORS_INDEX_EXTENSION,
VECTORS_DOCUMENTS_EXTENSION,
VECTORS_FIELDS_EXTENSION
};
/**
* Computes the full file name from base, extension and
* generation. If the generation is -1, the file name is
* null. If it's 0, the file name is <base><extension>.
* If it's > 0, the file name is <base>_<generation><extension>.
*
* @param base -- main part of the file name
* @param extension -- extension of the filename (including .)
* @param gen -- generation
*/
static final String fileNameFromGeneration(String base, String extension, long gen) {
if (gen == SegmentInfo.NO) {
return null;
} else if (gen == SegmentInfo.WITHOUT_GEN) {
return base + extension;
} else {
return base + "_" + Long.toString(gen, Character.MAX_RADIX) + extension;
}
}
/**
* Returns true if the provided filename is one of the doc
* store files (ends with an extension in
* STORE_INDEX_EXTENSIONS).
*/
static final boolean isDocStoreFile(String fileName) {
if (fileName.endsWith(COMPOUND_FILE_STORE_EXTENSION))
return true;
for(int i=0;i<STORE_INDEX_EXTENSIONS.length;i++)
if (fileName.endsWith(STORE_INDEX_EXTENSIONS[i]))
return true;
return false;
}
static String segmentFileName(String segmentName, String ext) {
return segmentName + "." + ext;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexFileNames.java | Java | art | 6,251 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.ThreadInterruptedException;
import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
/** A {@link MergeScheduler} that runs each merge using a
* separate thread, up until a maximum number of threads
* ({@link #setMaxThreadCount}) at which when a merge is
* needed, the thread(s) that are updating the index will
* pause until one or more merges completes. This is a
* simple way to use concurrency in the indexing process
* without having to create and manage application level
* threads. */
public class ConcurrentMergeScheduler extends MergeScheduler {
private int mergeThreadPriority = -1;
protected List<MergeThread> mergeThreads = new ArrayList<MergeThread>();
// Max number of threads allowed to be merging at once
private int maxThreadCount = 1;
protected Directory dir;
private boolean closed;
protected IndexWriter writer;
protected int mergeThreadCount;
public ConcurrentMergeScheduler() {
if (allInstances != null) {
// Only for testing
addMyself();
}
}
/** Sets the max # simultaneous threads that may be
* running. If a merge is necessary yet we already have
* this many threads running, the incoming thread (that
* is calling add/updateDocument) will block until
* a merge thread has completed. */
public void setMaxThreadCount(int count) {
if (count < 1)
throw new IllegalArgumentException("count should be at least 1");
maxThreadCount = count;
}
/** Get the max # simultaneous threads that may be
* running. @see #setMaxThreadCount. */
public int getMaxThreadCount() {
return maxThreadCount;
}
/** Return the priority that merge threads run at. By
* default the priority is 1 plus the priority of (ie,
* slightly higher priority than) the first thread that
* calls merge. */
public synchronized int getMergeThreadPriority() {
initMergeThreadPriority();
return mergeThreadPriority;
}
/** Set the priority that merge threads run at. */
public synchronized void setMergeThreadPriority(int pri) {
if (pri > Thread.MAX_PRIORITY || pri < Thread.MIN_PRIORITY)
throw new IllegalArgumentException("priority must be in range " + Thread.MIN_PRIORITY + " .. " + Thread.MAX_PRIORITY + " inclusive");
mergeThreadPriority = pri;
final int numThreads = mergeThreadCount();
for(int i=0;i<numThreads;i++) {
MergeThread merge = mergeThreads.get(i);
merge.setThreadPriority(pri);
}
}
private boolean verbose() {
return writer != null && writer.verbose();
}
private void message(String message) {
if (verbose())
writer.message("CMS: " + message);
}
private synchronized void initMergeThreadPriority() {
if (mergeThreadPriority == -1) {
// Default to slightly higher priority than our
// calling thread
mergeThreadPriority = 1+Thread.currentThread().getPriority();
if (mergeThreadPriority > Thread.MAX_PRIORITY)
mergeThreadPriority = Thread.MAX_PRIORITY;
}
}
@Override
public void close() {
closed = true;
}
public synchronized void sync() {
while(mergeThreadCount() > 0) {
if (verbose())
message("now wait for threads; currently " + mergeThreads.size() + " still running");
final int count = mergeThreads.size();
if (verbose()) {
for(int i=0;i<count;i++)
message(" " + i + ": " + mergeThreads.get(i));
}
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
}
private synchronized int mergeThreadCount() {
int count = 0;
final int numThreads = mergeThreads.size();
for(int i=0;i<numThreads;i++)
if (mergeThreads.get(i).isAlive())
count++;
return count;
}
@Override
public void merge(IndexWriter writer)
throws CorruptIndexException, IOException {
assert !Thread.holdsLock(writer);
this.writer = writer;
initMergeThreadPriority();
dir = writer.getDirectory();
// First, quickly run through the newly proposed merges
// and add any orthogonal merges (ie a merge not
// involving segments already pending to be merged) to
// the queue. If we are way behind on merging, many of
// these newly proposed merges will likely already be
// registered.
if (verbose()) {
message("now merge");
message(" index: " + writer.segString());
}
// Iterate, pulling from the IndexWriter's queue of
// pending merges, until it's empty:
while(true) {
// TODO: we could be careful about which merges to do in
// the BG (eg maybe the "biggest" ones) vs FG, which
// merges to do first (the easiest ones?), etc.
MergePolicy.OneMerge merge = writer.getNextMerge();
if (merge == null) {
if (verbose())
message(" no more merges pending; now return");
return;
}
// We do this w/ the primary thread to keep
// deterministic assignment of segment names
writer.mergeInit(merge);
boolean success = false;
try {
synchronized(this) {
final MergeThread merger;
while (mergeThreadCount() >= maxThreadCount) {
if (verbose())
message(" too many merge threads running; stalling...");
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (verbose())
message(" consider merge " + merge.segString(dir));
assert mergeThreadCount() < maxThreadCount;
// OK to spawn a new merge thread to handle this
// merge:
merger = getMergeThread(writer, merge);
mergeThreads.add(merger);
if (verbose())
message(" launch new thread [" + merger.getName() + "]");
merger.start();
success = true;
}
} finally {
if (!success) {
writer.mergeFinish(merge);
}
}
}
}
/** Does the actual merge, by calling {@link IndexWriter#merge} */
protected void doMerge(MergePolicy.OneMerge merge)
throws IOException {
writer.merge(merge);
}
/** Create and return a new MergeThread */
protected synchronized MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
final MergeThread thread = new MergeThread(writer, merge);
thread.setThreadPriority(mergeThreadPriority);
thread.setDaemon(true);
thread.setName("Lucene Merge Thread #" + mergeThreadCount++);
return thread;
}
protected class MergeThread extends Thread {
IndexWriter writer;
MergePolicy.OneMerge startMerge;
MergePolicy.OneMerge runningMerge;
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) throws IOException {
this.writer = writer;
this.startMerge = startMerge;
}
public synchronized void setRunningMerge(MergePolicy.OneMerge merge) {
runningMerge = merge;
}
public synchronized MergePolicy.OneMerge getRunningMerge() {
return runningMerge;
}
public void setThreadPriority(int pri) {
try {
setPriority(pri);
} catch (NullPointerException npe) {
// Strangely, Sun's JDK 1.5 on Linux sometimes
// throws NPE out of here...
} catch (SecurityException se) {
// Ignore this because we will still run fine with
// normal thread priority
}
}
@Override
public void run() {
// First time through the while loop we do the merge
// that we were started with:
MergePolicy.OneMerge merge = this.startMerge;
try {
if (verbose())
message(" merge thread: start");
while(true) {
setRunningMerge(merge);
doMerge(merge);
// Subsequent times through the loop we do any new
// merge that writer says is necessary:
merge = writer.getNextMerge();
if (merge != null) {
writer.mergeInit(merge);
if (verbose())
message(" merge thread: do another merge " + merge.segString(dir));
} else
break;
}
if (verbose())
message(" merge thread: done");
} catch (Throwable exc) {
// Ignore the exception if it was due to abort:
if (!(exc instanceof MergePolicy.MergeAbortedException)) {
if (!suppressExceptions) {
// suppressExceptions is normally only set during
// testing.
anyExceptions = true;
handleMergeException(exc);
}
}
} finally {
synchronized(ConcurrentMergeScheduler.this) {
ConcurrentMergeScheduler.this.notifyAll();
boolean removed = mergeThreads.remove(this);
assert removed;
}
}
}
@Override
public String toString() {
MergePolicy.OneMerge merge = getRunningMerge();
if (merge == null)
merge = startMerge;
return "merge thread: " + merge.segString(dir);
}
}
/** Called when an exception is hit in a background merge
* thread */
protected void handleMergeException(Throwable exc) {
try {
// When an exception is hit during merge, IndexWriter
// removes any partial files and then allows another
// merge to run. If whatever caused the error is not
// transient then the exception will keep happening,
// so, we sleep here to avoid saturating CPU in such
// cases:
Thread.sleep(250);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
throw new MergePolicy.MergeException(exc, dir);
}
static boolean anyExceptions = false;
/** Used for testing */
public static boolean anyUnhandledExceptions() {
if (allInstances == null) {
throw new RuntimeException("setTestMode() was not called; often this is because your test case's setUp method fails to call super.setUp in LuceneTestCase");
}
synchronized(allInstances) {
final int count = allInstances.size();
// Make sure all outstanding threads are done so we see
// any exceptions they may produce:
for(int i=0;i<count;i++)
allInstances.get(i).sync();
boolean v = anyExceptions;
anyExceptions = false;
return v;
}
}
public static void clearUnhandledExceptions() {
synchronized(allInstances) {
anyExceptions = false;
}
}
/** Used for testing */
private void addMyself() {
synchronized(allInstances) {
final int size = allInstances.size();
int upto = 0;
for(int i=0;i<size;i++) {
final ConcurrentMergeScheduler other = allInstances.get(i);
if (!(other.closed && 0 == other.mergeThreadCount()))
// Keep this one for now: it still has threads or
// may spawn new threads
allInstances.set(upto++, other);
}
allInstances.subList(upto, allInstances.size()).clear();
allInstances.add(this);
}
}
private boolean suppressExceptions;
/** Used for testing */
void setSuppressExceptions() {
suppressExceptions = true;
}
/** Used for testing */
void clearSuppressExceptions() {
suppressExceptions = false;
}
/** Used for testing */
private static List<ConcurrentMergeScheduler> allInstances;
public static void setTestMode() {
allInstances = new ArrayList<ConcurrentMergeScheduler>();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ConcurrentMergeScheduler.java | Java | art | 12,527 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.AbstractField;
import org.apache.lucene.document.CompressionTools;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldSelector;
import org.apache.lucene.document.FieldSelectorResult;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.util.CloseableThreadLocal;
import java.io.IOException;
import java.io.Reader;
import java.util.zip.DataFormatException;
/**
* Class responsible for access to stored document fields.
* <p/>
* It uses <segment>.fdt and <segment>.fdx; files.
*/
final class FieldsReader implements Cloneable {
private final FieldInfos fieldInfos;
// The main fieldStream, used only for cloning.
private final IndexInput cloneableFieldsStream;
// This is a clone of cloneableFieldsStream used for reading documents.
// It should not be cloned outside of a synchronized context.
private final IndexInput fieldsStream;
private final IndexInput cloneableIndexStream;
private final IndexInput indexStream;
private int numTotalDocs;
private int size;
private boolean closed;
private final int format;
private final int formatSize;
// The docID offset where our docs begin in the index
// file. This will be 0 if we have our own private file.
private int docStoreOffset;
private CloseableThreadLocal<IndexInput> fieldsStreamTL = new CloseableThreadLocal<IndexInput>();
private boolean isOriginal = false;
/** Returns a cloned FieldsReader that shares open
* IndexInputs with the original one. It is the caller's
* job not to close the original FieldsReader until all
* clones are called (eg, currently SegmentReader manages
* this logic). */
@Override
public Object clone() {
ensureOpen();
return new FieldsReader(fieldInfos, numTotalDocs, size, format, formatSize, docStoreOffset, cloneableFieldsStream, cloneableIndexStream);
}
// Used only by clone
private FieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int formatSize,
int docStoreOffset, IndexInput cloneableFieldsStream, IndexInput cloneableIndexStream) {
this.fieldInfos = fieldInfos;
this.numTotalDocs = numTotalDocs;
this.size = size;
this.format = format;
this.formatSize = formatSize;
this.docStoreOffset = docStoreOffset;
this.cloneableFieldsStream = cloneableFieldsStream;
this.cloneableIndexStream = cloneableIndexStream;
fieldsStream = (IndexInput) cloneableFieldsStream.clone();
indexStream = (IndexInput) cloneableIndexStream.clone();
}
FieldsReader(Directory d, String segment, FieldInfos fn) throws IOException {
this(d, segment, fn, BufferedIndexInput.BUFFER_SIZE, -1, 0);
}
FieldsReader(Directory d, String segment, FieldInfos fn, int readBufferSize) throws IOException {
this(d, segment, fn, readBufferSize, -1, 0);
}
FieldsReader(Directory d, String segment, FieldInfos fn, int readBufferSize, int docStoreOffset, int size) throws IOException {
boolean success = false;
isOriginal = true;
try {
fieldInfos = fn;
cloneableFieldsStream = d.openInput(segment + "." + IndexFileNames.FIELDS_EXTENSION, readBufferSize);
cloneableIndexStream = d.openInput(segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION, readBufferSize);
// First version of fdx did not include a format
// header, but, the first int will always be 0 in that
// case
int firstInt = cloneableIndexStream.readInt();
if (firstInt == 0)
format = 0;
else
format = firstInt;
if (format > FieldsWriter.FORMAT_CURRENT)
throw new CorruptIndexException("Incompatible format version: " + format + " expected "
+ FieldsWriter.FORMAT_CURRENT + " or lower");
if (format > FieldsWriter.FORMAT)
formatSize = 4;
else
formatSize = 0;
if (format < FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES)
cloneableFieldsStream.setModifiedUTF8StringsMode();
fieldsStream = (IndexInput) cloneableFieldsStream.clone();
final long indexSize = cloneableIndexStream.length()-formatSize;
if (docStoreOffset != -1) {
// We read only a slice out of this shared fields file
this.docStoreOffset = docStoreOffset;
this.size = size;
// Verify the file is long enough to hold all of our
// docs
assert ((int) (indexSize / 8)) >= size + this.docStoreOffset: "indexSize=" + indexSize + " size=" + size + " docStoreOffset=" + docStoreOffset;
} else {
this.docStoreOffset = 0;
this.size = (int) (indexSize >> 3);
}
indexStream = (IndexInput) cloneableIndexStream.clone();
numTotalDocs = (int) (indexSize >> 3);
success = true;
} finally {
// With lock-less commits, it's entirely possible (and
// fine) to hit a FileNotFound exception above. In
// this case, we want to explicitly close any subset
// of things that were opened so that we don't have to
// wait for a GC to do so.
if (!success) {
close();
}
}
}
/**
* @throws AlreadyClosedException if this FieldsReader is closed
*/
protected final void ensureOpen() throws AlreadyClosedException {
if (closed) {
throw new AlreadyClosedException("this FieldsReader is closed");
}
}
/**
* Closes the underlying {@link org.apache.lucene.store.IndexInput} streams, including any ones associated with a
* lazy implementation of a Field. This means that the Fields values will not be accessible.
*
* @throws IOException
*/
final void close() throws IOException {
if (!closed) {
if (fieldsStream != null) {
fieldsStream.close();
}
if (isOriginal) {
if (cloneableFieldsStream != null) {
cloneableFieldsStream.close();
}
if (cloneableIndexStream != null) {
cloneableIndexStream.close();
}
}
if (indexStream != null) {
indexStream.close();
}
fieldsStreamTL.close();
closed = true;
}
}
final int size() {
return size;
}
private final void seekIndex(int docID) throws IOException {
indexStream.seek(formatSize + (docID + docStoreOffset) * 8L);
}
boolean canReadRawDocs() {
// Disable reading raw docs in 2.x format, because of the removal of compressed
// fields in 3.0. We don't want rawDocs() to decode field bits to figure out
// if a field was compressed, hence we enforce ordinary (non-raw) stored field merges
// for <3.0 indexes.
return format >= FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS;
}
final Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
seekIndex(n);
long position = indexStream.readLong();
fieldsStream.seek(position);
Document doc = new Document();
int numFields = fieldsStream.readVInt();
for (int i = 0; i < numFields; i++) {
int fieldNumber = fieldsStream.readVInt();
FieldInfo fi = fieldInfos.fieldInfo(fieldNumber);
FieldSelectorResult acceptField = fieldSelector == null ? FieldSelectorResult.LOAD : fieldSelector.accept(fi.name);
byte bits = fieldsStream.readByte();
assert bits <= FieldsWriter.FIELD_IS_COMPRESSED + FieldsWriter.FIELD_IS_TOKENIZED + FieldsWriter.FIELD_IS_BINARY;
boolean compressed = (bits & FieldsWriter.FIELD_IS_COMPRESSED) != 0;
assert (compressed ? (format < FieldsWriter.FORMAT_LUCENE_3_0_NO_COMPRESSED_FIELDS) : true)
: "compressed fields are only allowed in indexes of version <= 2.9";
boolean tokenize = (bits & FieldsWriter.FIELD_IS_TOKENIZED) != 0;
boolean binary = (bits & FieldsWriter.FIELD_IS_BINARY) != 0;
//TODO: Find an alternative approach here if this list continues to grow beyond the
//list of 5 or 6 currently here. See Lucene 762 for discussion
if (acceptField.equals(FieldSelectorResult.LOAD)) {
addField(doc, fi, binary, compressed, tokenize);
}
else if (acceptField.equals(FieldSelectorResult.LOAD_AND_BREAK)){
addField(doc, fi, binary, compressed, tokenize);
break;//Get out of this loop
}
else if (acceptField.equals(FieldSelectorResult.LAZY_LOAD)) {
addFieldLazy(doc, fi, binary, compressed, tokenize);
}
else if (acceptField.equals(FieldSelectorResult.SIZE)){
skipField(binary, compressed, addFieldSize(doc, fi, binary, compressed));
}
else if (acceptField.equals(FieldSelectorResult.SIZE_AND_BREAK)){
addFieldSize(doc, fi, binary, compressed);
break;
}
else {
skipField(binary, compressed);
}
}
return doc;
}
/** Returns the length in bytes of each raw document in a
* contiguous range of length numDocs starting with
* startDocID. Returns the IndexInput (the fieldStream),
* already seeked to the starting point for startDocID.*/
final IndexInput rawDocs(int[] lengths, int startDocID, int numDocs) throws IOException {
seekIndex(startDocID);
long startOffset = indexStream.readLong();
long lastOffset = startOffset;
int count = 0;
while (count < numDocs) {
final long offset;
final int docID = docStoreOffset + startDocID + count + 1;
assert docID <= numTotalDocs;
if (docID < numTotalDocs)
offset = indexStream.readLong();
else
offset = fieldsStream.length();
lengths[count++] = (int) (offset-lastOffset);
lastOffset = offset;
}
fieldsStream.seek(startOffset);
return fieldsStream;
}
/**
* Skip the field. We still have to read some of the information about the field, but can skip past the actual content.
* This will have the most payoff on large fields.
*/
private void skipField(boolean binary, boolean compressed) throws IOException {
skipField(binary, compressed, fieldsStream.readVInt());
}
private void skipField(boolean binary, boolean compressed, int toRead) throws IOException {
if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES || binary || compressed) {
fieldsStream.seek(fieldsStream.getFilePointer() + toRead);
} else {
// We need to skip chars. This will slow us down, but still better
fieldsStream.skipChars(toRead);
}
}
private void addFieldLazy(Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize) throws IOException {
if (binary) {
int toRead = fieldsStream.readVInt();
long pointer = fieldsStream.getFilePointer();
//was: doc.add(new Fieldable(fi.name, b, Fieldable.Store.YES));
doc.add(new LazyField(fi.name, Field.Store.YES, toRead, pointer, binary, compressed));
//Need to move the pointer ahead by toRead positions
fieldsStream.seek(pointer + toRead);
} else {
Field.Store store = Field.Store.YES;
Field.Index index = Field.Index.toIndex(fi.isIndexed, tokenize);
Field.TermVector termVector = Field.TermVector.toTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
AbstractField f;
if (compressed) {
int toRead = fieldsStream.readVInt();
long pointer = fieldsStream.getFilePointer();
f = new LazyField(fi.name, store, toRead, pointer, binary, compressed);
//skip over the part that we aren't loading
fieldsStream.seek(pointer + toRead);
f.setOmitNorms(fi.omitNorms);
f.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
} else {
int length = fieldsStream.readVInt();
long pointer = fieldsStream.getFilePointer();
//Skip ahead of where we are by the length of what is stored
if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) {
fieldsStream.seek(pointer+length);
} else {
fieldsStream.skipChars(length);
}
f = new LazyField(fi.name, store, index, termVector, length, pointer, binary, compressed);
f.setOmitNorms(fi.omitNorms);
f.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
}
doc.add(f);
}
}
private void addField(Document doc, FieldInfo fi, boolean binary, boolean compressed, boolean tokenize) throws CorruptIndexException, IOException {
//we have a binary stored field, and it may be compressed
if (binary) {
int toRead = fieldsStream.readVInt();
final byte[] b = new byte[toRead];
fieldsStream.readBytes(b, 0, b.length);
if (compressed) {
doc.add(new Field(fi.name, uncompress(b), Field.Store.YES));
} else {
doc.add(new Field(fi.name, b, Field.Store.YES));
}
} else {
Field.Store store = Field.Store.YES;
Field.Index index = Field.Index.toIndex(fi.isIndexed, tokenize);
Field.TermVector termVector = Field.TermVector.toTermVector(fi.storeTermVector, fi.storeOffsetWithTermVector, fi.storePositionWithTermVector);
AbstractField f;
if (compressed) {
int toRead = fieldsStream.readVInt();
final byte[] b = new byte[toRead];
fieldsStream.readBytes(b, 0, b.length);
f = new Field(fi.name, // field name
false,
new String(uncompress(b), "UTF-8"), // uncompress the value and add as string
store,
index,
termVector);
f.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
f.setOmitNorms(fi.omitNorms);
} else {
f = new Field(fi.name, // name
false,
fieldsStream.readString(), // read value
store,
index,
termVector);
f.setOmitTermFreqAndPositions(fi.omitTermFreqAndPositions);
f.setOmitNorms(fi.omitNorms);
}
doc.add(f);
}
}
// Add the size of field as a byte[] containing the 4 bytes of the integer byte size (high order byte first; char = 2 bytes)
// Read just the size -- caller must skip the field content to continue reading fields
// Return the size in bytes or chars, depending on field type
private int addFieldSize(Document doc, FieldInfo fi, boolean binary, boolean compressed) throws IOException {
int size = fieldsStream.readVInt(), bytesize = binary || compressed ? size : 2*size;
byte[] sizebytes = new byte[4];
sizebytes[0] = (byte) (bytesize>>>24);
sizebytes[1] = (byte) (bytesize>>>16);
sizebytes[2] = (byte) (bytesize>>> 8);
sizebytes[3] = (byte) bytesize ;
doc.add(new Field(fi.name, sizebytes, Field.Store.YES));
return size;
}
/**
* A Lazy implementation of Fieldable that differs loading of fields until asked for, instead of when the Document is
* loaded.
*/
private class LazyField extends AbstractField implements Fieldable {
private int toRead;
private long pointer;
/** @deprecated Only kept for backward-compatbility with <3.0 indexes. Will be removed in 4.0. */
private boolean isCompressed;
public LazyField(String name, Field.Store store, int toRead, long pointer, boolean isBinary, boolean isCompressed) {
super(name, store, Field.Index.NO, Field.TermVector.NO);
this.toRead = toRead;
this.pointer = pointer;
this.isBinary = isBinary;
if (isBinary)
binaryLength = toRead;
lazy = true;
this.isCompressed = isCompressed;
}
public LazyField(String name, Field.Store store, Field.Index index, Field.TermVector termVector, int toRead, long pointer, boolean isBinary, boolean isCompressed) {
super(name, store, index, termVector);
this.toRead = toRead;
this.pointer = pointer;
this.isBinary = isBinary;
if (isBinary)
binaryLength = toRead;
lazy = true;
this.isCompressed = isCompressed;
}
private IndexInput getFieldStream() {
IndexInput localFieldsStream = fieldsStreamTL.get();
if (localFieldsStream == null) {
localFieldsStream = (IndexInput) cloneableFieldsStream.clone();
fieldsStreamTL.set(localFieldsStream);
}
return localFieldsStream;
}
/** The value of the field as a Reader, or null. If null, the String value,
* binary value, or TokenStream value is used. Exactly one of stringValue(),
* readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
public Reader readerValue() {
ensureOpen();
return null;
}
/** The value of the field as a TokenStream, or null. If null, the Reader value,
* String value, or binary value is used. Exactly one of stringValue(),
* readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
public TokenStream tokenStreamValue() {
ensureOpen();
return null;
}
/** The value of the field as a String, or null. If null, the Reader value,
* binary value, or TokenStream value is used. Exactly one of stringValue(),
* readerValue(), getBinaryValue(), and tokenStreamValue() must be set. */
public String stringValue() {
ensureOpen();
if (isBinary)
return null;
else {
if (fieldsData == null) {
IndexInput localFieldsStream = getFieldStream();
try {
localFieldsStream.seek(pointer);
if (isCompressed) {
final byte[] b = new byte[toRead];
localFieldsStream.readBytes(b, 0, b.length);
fieldsData = new String(uncompress(b), "UTF-8");
} else {
if (format >= FieldsWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) {
byte[] bytes = new byte[toRead];
localFieldsStream.readBytes(bytes, 0, toRead);
fieldsData = new String(bytes, "UTF-8");
} else {
//read in chars b/c we already know the length we need to read
char[] chars = new char[toRead];
localFieldsStream.readChars(chars, 0, toRead);
fieldsData = new String(chars);
}
}
} catch (IOException e) {
throw new FieldReaderException(e);
}
}
return (String) fieldsData;
}
}
public long getPointer() {
ensureOpen();
return pointer;
}
public void setPointer(long pointer) {
ensureOpen();
this.pointer = pointer;
}
public int getToRead() {
ensureOpen();
return toRead;
}
public void setToRead(int toRead) {
ensureOpen();
this.toRead = toRead;
}
@Override
public byte[] getBinaryValue(byte[] result) {
ensureOpen();
if (isBinary) {
if (fieldsData == null) {
// Allocate new buffer if result is null or too small
final byte[] b;
if (result == null || result.length < toRead)
b = new byte[toRead];
else
b = result;
IndexInput localFieldsStream = getFieldStream();
// Throw this IOException since IndexReader.document does so anyway, so probably not that big of a change for people
// since they are already handling this exception when getting the document
try {
localFieldsStream.seek(pointer);
localFieldsStream.readBytes(b, 0, toRead);
if (isCompressed == true) {
fieldsData = uncompress(b);
} else {
fieldsData = b;
}
} catch (IOException e) {
throw new FieldReaderException(e);
}
binaryOffset = 0;
binaryLength = toRead;
}
return (byte[]) fieldsData;
} else
return null;
}
}
private byte[] uncompress(byte[] b)
throws CorruptIndexException {
try {
return CompressionTools.decompress(b);
} catch (DataFormatException e) {
// this will happen if the field is not compressed
CorruptIndexException newException = new CorruptIndexException("field data are in wrong format: " + e.toString());
newException.initCause(e);
throw newException;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldsReader.java | Java | art | 21,400 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
abstract class TermsHashConsumerPerThread {
abstract void startDocument() throws IOException;
abstract DocumentsWriter.DocWriter finishDocument() throws IOException;
abstract public TermsHashConsumerPerField addField(TermsHashPerField termsHashPerField, FieldInfo fieldInfo);
abstract public void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermsHashConsumerPerThread.java | Java | art | 1,183 |
<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
</head>
<body>
Code to maintain and access indices.
</body>
</html>
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/package.html | HTML | art | 997 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.PriorityQueue;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
/**
* Allows you to iterate over the {@link TermPositions} for multiple {@link Term}s as
* a single {@link TermPositions}.
*
*/
public class MultipleTermPositions implements TermPositions {
private static final class TermPositionsQueue extends PriorityQueue<TermPositions> {
TermPositionsQueue(List<TermPositions> termPositions) throws IOException {
initialize(termPositions.size());
for (TermPositions tp : termPositions) {
if (tp.next())
add(tp);
}
}
final TermPositions peek() {
return top();
}
@Override
public final boolean lessThan(TermPositions a, TermPositions b) {
return a.doc() < b.doc();
}
}
private static final class IntQueue {
private int _arraySize = 16;
private int _index = 0;
private int _lastIndex = 0;
private int[] _array = new int[_arraySize];
final void add(int i) {
if (_lastIndex == _arraySize)
growArray();
_array[_lastIndex++] = i;
}
final int next() {
return _array[_index++];
}
final void sort() {
Arrays.sort(_array, _index, _lastIndex);
}
final void clear() {
_index = 0;
_lastIndex = 0;
}
final int size() {
return (_lastIndex - _index);
}
private void growArray() {
int[] newArray = new int[_arraySize * 2];
System.arraycopy(_array, 0, newArray, 0, _arraySize);
_array = newArray;
_arraySize *= 2;
}
}
private int _doc;
private int _freq;
private TermPositionsQueue _termPositionsQueue;
private IntQueue _posList;
/**
* Creates a new <code>MultipleTermPositions</code> instance.
*
* @exception IOException
*/
public MultipleTermPositions(IndexReader indexReader, Term[] terms) throws IOException {
List<TermPositions> termPositions = new LinkedList<TermPositions>();
for (int i = 0; i < terms.length; i++)
termPositions.add(indexReader.termPositions(terms[i]));
_termPositionsQueue = new TermPositionsQueue(termPositions);
_posList = new IntQueue();
}
public final boolean next() throws IOException {
if (_termPositionsQueue.size() == 0)
return false;
_posList.clear();
_doc = _termPositionsQueue.peek().doc();
TermPositions tp;
do {
tp = _termPositionsQueue.peek();
for (int i = 0; i < tp.freq(); i++)
_posList.add(tp.nextPosition());
if (tp.next())
_termPositionsQueue.updateTop();
else {
_termPositionsQueue.pop();
tp.close();
}
} while (_termPositionsQueue.size() > 0 && _termPositionsQueue.peek().doc() == _doc);
_posList.sort();
_freq = _posList.size();
return true;
}
public final int nextPosition() {
return _posList.next();
}
public final boolean skipTo(int target) throws IOException {
while (_termPositionsQueue.peek() != null && target > _termPositionsQueue.peek().doc()) {
TermPositions tp = _termPositionsQueue.pop();
if (tp.skipTo(target))
_termPositionsQueue.add(tp);
else
tp.close();
}
return next();
}
public final int doc() {
return _doc;
}
public final int freq() {
return _freq;
}
public final void close() throws IOException {
while (_termPositionsQueue.size() > 0)
_termPositionsQueue.pop().close();
}
/**
* Not implemented.
* @throws UnsupportedOperationException
*/
public void seek(Term arg0) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Not implemented.
* @throws UnsupportedOperationException
*/
public void seek(TermEnum termEnum) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Not implemented.
* @throws UnsupportedOperationException
*/
public int read(int[] arg0, int[] arg1) throws IOException {
throw new UnsupportedOperationException();
}
/**
* Not implemented.
* @throws UnsupportedOperationException
*/
public int getPayloadLength() {
throw new UnsupportedOperationException();
}
/**
* Not implemented.
* @throws UnsupportedOperationException
*/
public byte[] getPayload(byte[] data, int offset) throws IOException {
throw new UnsupportedOperationException();
}
/**
*
* @return false
*/
// TODO: Remove warning after API has been finalized
public boolean isPayloadAvailable() {
return false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MultipleTermPositions.java | Java | art | 5,439 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.Map;
abstract class DocFieldConsumer {
FieldInfos fieldInfos;
/** Called when DocumentsWriter decides to create a new
* segment */
abstract void flush(Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException;
/** Called when DocumentsWriter decides to close the doc
* stores */
abstract void closeDocStore(SegmentWriteState state) throws IOException;
/** Called when an aborting exception is hit */
abstract void abort();
/** Add a new thread */
abstract DocFieldConsumerPerThread addThread(DocFieldProcessorPerThread docFieldProcessorPerThread) throws IOException;
/** Called when DocumentsWriter is using too much RAM.
* The consumer should free RAM, if possible, returning
* true if any RAM was in fact freed. */
abstract boolean freeRAM();
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldConsumer.java | Java | art | 1,863 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Class that Posting and PostingVector use to write byte
* streams into shared fixed-size byte[] arrays. The idea
* is to allocate slices of increasing lengths For
* example, the first slice is 5 bytes, the next slice is
* 14, etc. We start by writing our bytes into the first
* 5 bytes. When we hit the end of the slice, we allocate
* the next slice and then write the address of the new
* slice into the last 4 bytes of the previous slice (the
* "forwarding address").
*
* Each slice is filled with 0's initially, and we mark
* the end with a non-zero byte. This way the methods
* that are writing into the slice don't need to record
* its length and instead allocate a new slice once they
* hit a non-zero byte. */
import java.util.Arrays;
import java.util.List;
final class ByteBlockPool {
abstract static class Allocator {
abstract void recycleByteBlocks(byte[][] blocks, int start, int end);
abstract void recycleByteBlocks(List<byte[]> blocks);
abstract byte[] getByteBlock(boolean trackAllocations);
}
public byte[][] buffers = new byte[10][];
int bufferUpto = -1; // Which buffer we are upto
public int byteUpto = DocumentsWriter.BYTE_BLOCK_SIZE; // Where we are in head buffer
public byte[] buffer; // Current head buffer
public int byteOffset = -DocumentsWriter.BYTE_BLOCK_SIZE; // Current head offset
private final boolean trackAllocations;
private final Allocator allocator;
public ByteBlockPool(Allocator allocator, boolean trackAllocations) {
this.allocator = allocator;
this.trackAllocations = trackAllocations;
}
public void reset() {
if (bufferUpto != -1) {
// We allocated at least one buffer
for(int i=0;i<bufferUpto;i++)
// Fully zero fill buffers that we fully used
Arrays.fill(buffers[i], (byte) 0);
// Partial zero fill the final buffer
Arrays.fill(buffers[bufferUpto], 0, byteUpto, (byte) 0);
if (bufferUpto > 0)
// Recycle all but the first buffer
allocator.recycleByteBlocks(buffers, 1, 1+bufferUpto);
// Re-use the first buffer
bufferUpto = 0;
byteUpto = 0;
byteOffset = 0;
buffer = buffers[0];
}
}
public void nextBuffer() {
if (1+bufferUpto == buffers.length) {
byte[][] newBuffers = new byte[(int) (buffers.length*1.5)][];
System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
buffers = newBuffers;
}
buffer = buffers[1+bufferUpto] = allocator.getByteBlock(trackAllocations);
bufferUpto++;
byteUpto = 0;
byteOffset += DocumentsWriter.BYTE_BLOCK_SIZE;
}
public int newSlice(final int size) {
if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE-size)
nextBuffer();
final int upto = byteUpto;
byteUpto += size;
buffer[byteUpto-1] = 16;
return upto;
}
// Size of each slice. These arrays should be at most 16
// elements (index is encoded with 4 bits). First array
// is just a compact way to encode X+1 with a max. Second
// array is the length of each slice, ie first slice is 5
// bytes, next slice is 14 bytes, etc.
final static int[] nextLevelArray = {1, 2, 3, 4, 5, 6, 7, 8, 9, 9};
final static int[] levelSizeArray = {5, 14, 20, 30, 40, 40, 80, 80, 120, 200};
final static int FIRST_LEVEL_SIZE = levelSizeArray[0];
public int allocSlice(final byte[] slice, final int upto) {
final int level = slice[upto] & 15;
final int newLevel = nextLevelArray[level];
final int newSize = levelSizeArray[newLevel];
// Maybe allocate another block
if (byteUpto > DocumentsWriter.BYTE_BLOCK_SIZE-newSize)
nextBuffer();
final int newUpto = byteUpto;
final int offset = newUpto + byteOffset;
byteUpto += newSize;
// Copy forward the past 3 bytes (which we are about
// to overwrite with the forwarding address):
buffer[newUpto] = slice[upto-3];
buffer[newUpto+1] = slice[upto-2];
buffer[newUpto+2] = slice[upto-1];
// Write forwarding address at end of last slice:
slice[upto-3] = (byte) (offset >>> 24);
slice[upto-2] = (byte) (offset >>> 16);
slice[upto-1] = (byte) (offset >>> 8);
slice[upto] = (byte) offset;
// Write new level:
buffer[byteUpto-1] = (byte) (16|newLevel);
return newUpto+3;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ByteBlockPool.java | Java | art | 5,225 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.util.UnicodeUtil;
final class TermsHashPerField extends InvertedDocConsumerPerField {
final TermsHashConsumerPerField consumer;
final TermsHashPerField nextPerField;
final TermsHashPerThread perThread;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
TermAttribute termAtt;
// Copied from our perThread
final CharBlockPool charPool;
final IntBlockPool intPool;
final ByteBlockPool bytePool;
final int streamCount;
final int numPostingInt;
final FieldInfo fieldInfo;
boolean postingsCompacted;
int numPostings;
private int postingsHashSize = 4;
private int postingsHashHalfSize = postingsHashSize/2;
private int postingsHashMask = postingsHashSize-1;
private RawPostingList[] postingsHash = new RawPostingList[postingsHashSize];
private RawPostingList p;
public TermsHashPerField(DocInverterPerField docInverterPerField, final TermsHashPerThread perThread, final TermsHashPerThread nextPerThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
intPool = perThread.intPool;
charPool = perThread.charPool;
bytePool = perThread.bytePool;
docState = perThread.docState;
fieldState = docInverterPerField.fieldState;
this.consumer = perThread.consumer.addField(this, fieldInfo);
streamCount = consumer.getStreamCount();
numPostingInt = 2*streamCount;
this.fieldInfo = fieldInfo;
if (nextPerThread != null)
nextPerField = (TermsHashPerField) nextPerThread.addField(docInverterPerField, fieldInfo);
else
nextPerField = null;
}
void shrinkHash(int targetSize) {
assert postingsCompacted || numPostings == 0;
final int newSize = 4;
if (newSize != postingsHash.length) {
postingsHash = new RawPostingList[newSize];
postingsHashSize = newSize;
postingsHashHalfSize = newSize/2;
postingsHashMask = newSize-1;
}
Arrays.fill(postingsHash, null);
}
public void reset() {
if (!postingsCompacted)
compactPostings();
assert numPostings <= postingsHash.length;
if (numPostings > 0) {
perThread.termsHash.recyclePostings(postingsHash, numPostings);
Arrays.fill(postingsHash, 0, numPostings, null);
numPostings = 0;
}
postingsCompacted = false;
if (nextPerField != null)
nextPerField.reset();
}
@Override
synchronized public void abort() {
reset();
if (nextPerField != null)
nextPerField.abort();
}
public void initReader(ByteSliceReader reader, RawPostingList p, int stream) {
assert stream < streamCount;
final int[] ints = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
final int upto = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
reader.init(bytePool,
p.byteStart+stream*ByteBlockPool.FIRST_LEVEL_SIZE,
ints[upto+stream]);
}
private synchronized void compactPostings() {
int upto = 0;
for(int i=0;i<postingsHashSize;i++) {
if (postingsHash[i] != null) {
if (upto < i) {
postingsHash[upto] = postingsHash[i];
postingsHash[i] = null;
}
upto++;
}
}
assert upto == numPostings;
postingsCompacted = true;
}
/** Collapse the hash table & sort in-place. */
public RawPostingList[] sortPostings() {
compactPostings();
quickSort(postingsHash, 0, numPostings-1);
return postingsHash;
}
void quickSort(RawPostingList[] postings, int lo, int hi) {
if (lo >= hi)
return;
else if (hi == 1+lo) {
if (comparePostings(postings[lo], postings[hi]) > 0) {
final RawPostingList tmp = postings[lo];
postings[lo] = postings[hi];
postings[hi] = tmp;
}
return;
}
int mid = (lo + hi) >>> 1;
if (comparePostings(postings[lo], postings[mid]) > 0) {
RawPostingList tmp = postings[lo];
postings[lo] = postings[mid];
postings[mid] = tmp;
}
if (comparePostings(postings[mid], postings[hi]) > 0) {
RawPostingList tmp = postings[mid];
postings[mid] = postings[hi];
postings[hi] = tmp;
if (comparePostings(postings[lo], postings[mid]) > 0) {
RawPostingList tmp2 = postings[lo];
postings[lo] = postings[mid];
postings[mid] = tmp2;
}
}
int left = lo + 1;
int right = hi - 1;
if (left >= right)
return;
RawPostingList partition = postings[mid];
for (; ;) {
while (comparePostings(postings[right], partition) > 0)
--right;
while (left < right && comparePostings(postings[left], partition) <= 0)
++left;
if (left < right) {
RawPostingList tmp = postings[left];
postings[left] = postings[right];
postings[right] = tmp;
--right;
} else {
break;
}
}
quickSort(postings, lo, left);
quickSort(postings, left + 1, hi);
}
/** Compares term text for two Posting instance and
* returns -1 if p1 < p2; 1 if p1 > p2; else 0. */
int comparePostings(RawPostingList p1, RawPostingList p2) {
if (p1 == p2)
return 0;
final char[] text1 = charPool.buffers[p1.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
int pos1 = p1.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
final char[] text2 = charPool.buffers[p2.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
int pos2 = p2.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
assert text1 != text2 || pos1 != pos2;
while(true) {
final char c1 = text1[pos1++];
final char c2 = text2[pos2++];
if (c1 != c2) {
if (0xffff == c2)
return 1;
else if (0xffff == c1)
return -1;
else
return c1-c2;
} else
// This method should never compare equal postings
// unless p1==p2
assert c1 != 0xffff;
}
}
/** Test whether the text for current RawPostingList p equals
* current tokenText. */
private boolean postingEquals(final char[] tokenText, final int tokenTextLen) {
final char[] text = perThread.charPool.buffers[p.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
assert text != null;
int pos = p.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
int tokenPos = 0;
for(;tokenPos<tokenTextLen;pos++,tokenPos++)
if (tokenText[tokenPos] != text[pos])
return false;
return 0xffff == text[pos];
}
private boolean doCall;
private boolean doNextCall;
@Override
void start(Fieldable f) {
termAtt = fieldState.attributeSource.addAttribute(TermAttribute.class);
consumer.start(f);
if (nextPerField != null) {
nextPerField.start(f);
}
}
@Override
boolean start(Fieldable[] fields, int count) throws IOException {
doCall = consumer.start(fields, count);
if (nextPerField != null)
doNextCall = nextPerField.start(fields, count);
return doCall || doNextCall;
}
// Secondary entry point (for 2nd & subsequent TermsHash),
// because token text has already been "interned" into
// textStart, so we hash by textStart
public void add(int textStart) throws IOException {
int code = textStart;
int hashPos = code & postingsHashMask;
assert !postingsCompacted;
// Locate RawPostingList in hash
p = postingsHash[hashPos];
if (p != null && p.textStart != textStart) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
p = postingsHash[hashPos];
} while (p != null && p.textStart != textStart);
}
if (p == null) {
// First time we are seeing this token since we last
// flushed the hash.
// Refill?
if (0 == perThread.freePostingsCount)
perThread.morePostings();
// Pull next free RawPostingList from free list
p = perThread.freePostings[--perThread.freePostingsCount];
assert p != null;
p.textStart = textStart;
assert postingsHash[hashPos] == null;
postingsHash[hashPos] = p;
numPostings++;
if (numPostings == postingsHashHalfSize)
rehashPostings(2*postingsHashSize);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
p.intStart = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
p.byteStart = intUptos[intUptoStart];
consumer.newTerm(p);
} else {
intUptos = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(p);
}
}
// Primary entry point (for first TermsHash)
@Override
void add() throws IOException {
assert !postingsCompacted;
// We are first in the chain so we must "intern" the
// term text into textStart address
// Get the text of this term.
final char[] tokenText = termAtt.termBuffer();;
final int tokenTextLen = termAtt.termLength();
// Compute hashcode & replace any invalid UTF16 sequences
int downto = tokenTextLen;
int code = 0;
while (downto > 0) {
char ch = tokenText[--downto];
if (ch >= UnicodeUtil.UNI_SUR_LOW_START && ch <= UnicodeUtil.UNI_SUR_LOW_END) {
if (0 == downto) {
// Unpaired
ch = tokenText[downto] = UnicodeUtil.UNI_REPLACEMENT_CHAR;
} else {
final char ch2 = tokenText[downto-1];
if (ch2 >= UnicodeUtil.UNI_SUR_HIGH_START && ch2 <= UnicodeUtil.UNI_SUR_HIGH_END) {
// OK: high followed by low. This is a valid
// surrogate pair.
code = ((code*31) + ch)*31+ch2;
downto--;
continue;
} else {
// Unpaired
ch = tokenText[downto] = UnicodeUtil.UNI_REPLACEMENT_CHAR;
}
}
} else if (ch >= UnicodeUtil.UNI_SUR_HIGH_START && (ch <= UnicodeUtil.UNI_SUR_HIGH_END ||
ch == 0xffff)) {
// Unpaired or 0xffff
ch = tokenText[downto] = UnicodeUtil.UNI_REPLACEMENT_CHAR;
}
code = (code*31) + ch;
}
int hashPos = code & postingsHashMask;
// Locate RawPostingList in hash
p = postingsHash[hashPos];
if (p != null && !postingEquals(tokenText, tokenTextLen)) {
// Conflict: keep searching different locations in
// the hash table.
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & postingsHashMask;
p = postingsHash[hashPos];
} while (p != null && !postingEquals(tokenText, tokenTextLen));
}
if (p == null) {
// First time we are seeing this token since we last
// flushed the hash.
final int textLen1 = 1+tokenTextLen;
if (textLen1 + charPool.charUpto > DocumentsWriter.CHAR_BLOCK_SIZE) {
if (textLen1 > DocumentsWriter.CHAR_BLOCK_SIZE) {
// Just skip this term, to remain as robust as
// possible during indexing. A TokenFilter
// can be inserted into the analyzer chain if
// other behavior is wanted (pruning the term
// to a prefix, throwing an exception, etc).
if (docState.maxTermPrefix == null)
docState.maxTermPrefix = new String(tokenText, 0, 30);
consumer.skippingLongTerm();
return;
}
charPool.nextBuffer();
}
// Refill?
if (0 == perThread.freePostingsCount)
perThread.morePostings();
// Pull next free RawPostingList from free list
p = perThread.freePostings[--perThread.freePostingsCount];
assert p != null;
final char[] text = charPool.buffer;
final int textUpto = charPool.charUpto;
p.textStart = textUpto + charPool.charOffset;
charPool.charUpto += textLen1;
System.arraycopy(tokenText, 0, text, textUpto, tokenTextLen);
text[textUpto+tokenTextLen] = 0xffff;
assert postingsHash[hashPos] == null;
postingsHash[hashPos] = p;
numPostings++;
if (numPostings == postingsHashHalfSize)
rehashPostings(2*postingsHashSize);
// Init stream slices
if (numPostingInt + intPool.intUpto > DocumentsWriter.INT_BLOCK_SIZE)
intPool.nextBuffer();
if (DocumentsWriter.BYTE_BLOCK_SIZE - bytePool.byteUpto < numPostingInt*ByteBlockPool.FIRST_LEVEL_SIZE)
bytePool.nextBuffer();
intUptos = intPool.buffer;
intUptoStart = intPool.intUpto;
intPool.intUpto += streamCount;
p.intStart = intUptoStart + intPool.intOffset;
for(int i=0;i<streamCount;i++) {
final int upto = bytePool.newSlice(ByteBlockPool.FIRST_LEVEL_SIZE);
intUptos[intUptoStart+i] = upto + bytePool.byteOffset;
}
p.byteStart = intUptos[intUptoStart];
consumer.newTerm(p);
} else {
intUptos = intPool.buffers[p.intStart >> DocumentsWriter.INT_BLOCK_SHIFT];
intUptoStart = p.intStart & DocumentsWriter.INT_BLOCK_MASK;
consumer.addTerm(p);
}
if (doNextCall)
nextPerField.add(p.textStart);
}
int[] intUptos;
int intUptoStart;
void writeByte(int stream, byte b) {
int upto = intUptos[intUptoStart+stream];
byte[] bytes = bytePool.buffers[upto >> DocumentsWriter.BYTE_BLOCK_SHIFT];
assert bytes != null;
int offset = upto & DocumentsWriter.BYTE_BLOCK_MASK;
if (bytes[offset] != 0) {
// End of slice; allocate a new one
offset = bytePool.allocSlice(bytes, offset);
bytes = bytePool.buffer;
intUptos[intUptoStart+stream] = offset + bytePool.byteOffset;
}
bytes[offset] = b;
(intUptos[intUptoStart+stream])++;
}
public void writeBytes(int stream, byte[] b, int offset, int len) {
// TODO: optimize
final int end = offset + len;
for(int i=offset;i<end;i++)
writeByte(stream, b[i]);
}
void writeVInt(int stream, int i) {
assert stream < streamCount;
while ((i & ~0x7F) != 0) {
writeByte(stream, (byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte(stream, (byte) i);
}
@Override
void finish() throws IOException {
consumer.finish();
if (nextPerField != null)
nextPerField.finish();
}
/** Called when postings hash is too small (> 50%
* occupied) or too large (< 20% occupied). */
void rehashPostings(final int newSize) {
final int newMask = newSize-1;
RawPostingList[] newHash = new RawPostingList[newSize];
for(int i=0;i<postingsHashSize;i++) {
RawPostingList p0 = postingsHash[i];
if (p0 != null) {
int code;
if (perThread.primary) {
final int start = p0.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
final char[] text = charPool.buffers[p0.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
int pos = start;
while(text[pos] != 0xffff)
pos++;
code = 0;
while (pos > start)
code = (code*31) + text[--pos];
} else
code = p0.textStart;
int hashPos = code & newMask;
assert hashPos >= 0;
if (newHash[hashPos] != null) {
final int inc = ((code>>8)+code)|1;
do {
code += inc;
hashPos = code & newMask;
} while (newHash[hashPos] != null);
}
newHash[hashPos] = p0;
}
}
postingsHashMask = newMask;
postingsHash = newHash;
postingsHashSize = newSize;
postingsHashHalfSize = newSize >> 1;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermsHashPerField.java | Java | art | 17,039 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.StringHelper;
import java.io.IOException;
import java.util.*;
/** Access to the Fieldable Info file that describes document fields and whether or
* not they are indexed. Each segment has a separate Fieldable Info file. Objects
* of this class are thread-safe for multiple readers, but only one thread can
* be adding documents at a time, with no other reader or writer threads
* accessing this object.
*/
final class FieldInfos {
// Used internally (ie not written to *.fnm files) for pre-2.9 files
public static final int FORMAT_PRE = -1;
// First used in 2.9; prior to 2.9 there was no format header
public static final int FORMAT_START = -2;
static final int CURRENT_FORMAT = FORMAT_START;
static final byte IS_INDEXED = 0x1;
static final byte STORE_TERMVECTOR = 0x2;
static final byte STORE_POSITIONS_WITH_TERMVECTOR = 0x4;
static final byte STORE_OFFSET_WITH_TERMVECTOR = 0x8;
static final byte OMIT_NORMS = 0x10;
static final byte STORE_PAYLOADS = 0x20;
static final byte OMIT_TERM_FREQ_AND_POSITIONS = 0x40;
private final ArrayList<FieldInfo> byNumber = new ArrayList<FieldInfo>();
private final HashMap<String,FieldInfo> byName = new HashMap<String,FieldInfo>();
private int format;
FieldInfos() { }
/**
* Construct a FieldInfos object using the directory and the name of the file
* IndexInput
* @param d The directory to open the IndexInput from
* @param name The name of the file to open the IndexInput from in the Directory
* @throws IOException
*/
FieldInfos(Directory d, String name) throws IOException {
IndexInput input = d.openInput(name);
try {
try {
read(input, name);
} catch (IOException ioe) {
if (format == FORMAT_PRE) {
// LUCENE-1623: FORMAT_PRE (before there was a
// format) may be 2.3.2 (pre-utf8) or 2.4.x (utf8)
// encoding; retry with input set to pre-utf8
input.seek(0);
input.setModifiedUTF8StringsMode();
byNumber.clear();
byName.clear();
try {
read(input, name);
} catch (Throwable t) {
// Ignore any new exception & throw original IOE
throw ioe;
}
} else {
// The IOException cannot be caused by
// LUCENE-1623, so re-throw it
throw ioe;
}
}
} finally {
input.close();
}
}
/**
* Returns a deep clone of this FieldInfos instance.
*/
@Override
synchronized public Object clone() {
FieldInfos fis = new FieldInfos();
final int numField = byNumber.size();
for(int i=0;i<numField;i++) {
FieldInfo fi = (FieldInfo) ( byNumber.get(i)).clone();
fis.byNumber.add(fi);
fis.byName.put(fi.name, fi);
}
return fis;
}
/** Adds field info for a Document. */
synchronized public void add(Document doc) {
List<Fieldable> fields = doc.getFields();
for (Fieldable field : fields) {
add(field.name(), field.isIndexed(), field.isTermVectorStored(), field.isStorePositionWithTermVector(),
field.isStoreOffsetWithTermVector(), field.getOmitNorms(), false, field.getOmitTermFreqAndPositions());
}
}
/** Returns true if any fields do not omitTermFreqAndPositions */
boolean hasProx() {
final int numFields = byNumber.size();
for(int i=0;i<numFields;i++) {
final FieldInfo fi = fieldInfo(i);
if (fi.isIndexed && !fi.omitTermFreqAndPositions) {
return true;
}
}
return false;
}
/**
* Add fields that are indexed. Whether they have termvectors has to be specified.
*
* @param names The names of the fields
* @param storeTermVectors Whether the fields store term vectors or not
* @param storePositionWithTermVector true if positions should be stored.
* @param storeOffsetWithTermVector true if offsets should be stored
*/
synchronized public void addIndexed(Collection<String> names, boolean storeTermVectors, boolean storePositionWithTermVector,
boolean storeOffsetWithTermVector) {
for (String name : names) {
add(name, true, storeTermVectors, storePositionWithTermVector, storeOffsetWithTermVector);
}
}
/**
* Assumes the fields are not storing term vectors.
*
* @param names The names of the fields
* @param isIndexed Whether the fields are indexed or not
*
* @see #add(String, boolean)
*/
synchronized public void add(Collection<String> names, boolean isIndexed) {
for (String name : names) {
add(name, isIndexed);
}
}
/**
* Calls 5 parameter add with false for all TermVector parameters.
*
* @param name The name of the Fieldable
* @param isIndexed true if the field is indexed
* @see #add(String, boolean, boolean, boolean, boolean)
*/
synchronized public void add(String name, boolean isIndexed) {
add(name, isIndexed, false, false, false, false);
}
/**
* Calls 5 parameter add with false for term vector positions and offsets.
*
* @param name The name of the field
* @param isIndexed true if the field is indexed
* @param storeTermVector true if the term vector should be stored
*/
synchronized public void add(String name, boolean isIndexed, boolean storeTermVector){
add(name, isIndexed, storeTermVector, false, false, false);
}
/** If the field is not yet known, adds it. If it is known, checks to make
* sure that the isIndexed flag is the same as was given previously for this
* field. If not - marks it as being indexed. Same goes for the TermVector
* parameters.
*
* @param name The name of the field
* @param isIndexed true if the field is indexed
* @param storeTermVector true if the term vector should be stored
* @param storePositionWithTermVector true if the term vector with positions should be stored
* @param storeOffsetWithTermVector true if the term vector with offsets should be stored
*/
synchronized public void add(String name, boolean isIndexed, boolean storeTermVector,
boolean storePositionWithTermVector, boolean storeOffsetWithTermVector) {
add(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, false);
}
/** If the field is not yet known, adds it. If it is known, checks to make
* sure that the isIndexed flag is the same as was given previously for this
* field. If not - marks it as being indexed. Same goes for the TermVector
* parameters.
*
* @param name The name of the field
* @param isIndexed true if the field is indexed
* @param storeTermVector true if the term vector should be stored
* @param storePositionWithTermVector true if the term vector with positions should be stored
* @param storeOffsetWithTermVector true if the term vector with offsets should be stored
* @param omitNorms true if the norms for the indexed field should be omitted
*/
synchronized public void add(String name, boolean isIndexed, boolean storeTermVector,
boolean storePositionWithTermVector, boolean storeOffsetWithTermVector, boolean omitNorms) {
add(name, isIndexed, storeTermVector, storePositionWithTermVector,
storeOffsetWithTermVector, omitNorms, false, false);
}
/** If the field is not yet known, adds it. If it is known, checks to make
* sure that the isIndexed flag is the same as was given previously for this
* field. If not - marks it as being indexed. Same goes for the TermVector
* parameters.
*
* @param name The name of the field
* @param isIndexed true if the field is indexed
* @param storeTermVector true if the term vector should be stored
* @param storePositionWithTermVector true if the term vector with positions should be stored
* @param storeOffsetWithTermVector true if the term vector with offsets should be stored
* @param omitNorms true if the norms for the indexed field should be omitted
* @param storePayloads true if payloads should be stored for this field
* @param omitTermFreqAndPositions true if term freqs should be omitted for this field
*/
synchronized public FieldInfo add(String name, boolean isIndexed, boolean storeTermVector,
boolean storePositionWithTermVector, boolean storeOffsetWithTermVector,
boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) {
FieldInfo fi = fieldInfo(name);
if (fi == null) {
return addInternal(name, isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
} else {
fi.update(isIndexed, storeTermVector, storePositionWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
}
return fi;
}
private FieldInfo addInternal(String name, boolean isIndexed,
boolean storeTermVector, boolean storePositionWithTermVector,
boolean storeOffsetWithTermVector, boolean omitNorms, boolean storePayloads, boolean omitTermFreqAndPositions) {
name = StringHelper.intern(name);
FieldInfo fi = new FieldInfo(name, isIndexed, byNumber.size(), storeTermVector, storePositionWithTermVector,
storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
byNumber.add(fi);
byName.put(name, fi);
return fi;
}
public int fieldNumber(String fieldName) {
FieldInfo fi = fieldInfo(fieldName);
return (fi != null) ? fi.number : -1;
}
public FieldInfo fieldInfo(String fieldName) {
return byName.get(fieldName);
}
/**
* Return the fieldName identified by its number.
*
* @param fieldNumber
* @return the fieldName or an empty string when the field
* with the given number doesn't exist.
*/
public String fieldName(int fieldNumber) {
FieldInfo fi = fieldInfo(fieldNumber);
return (fi != null) ? fi.name : "";
}
/**
* Return the fieldinfo object referenced by the fieldNumber.
* @param fieldNumber
* @return the FieldInfo object or null when the given fieldNumber
* doesn't exist.
*/
public FieldInfo fieldInfo(int fieldNumber) {
return (fieldNumber >= 0) ? byNumber.get(fieldNumber) : null;
}
public int size() {
return byNumber.size();
}
public boolean hasVectors() {
boolean hasVectors = false;
for (int i = 0; i < size(); i++) {
if (fieldInfo(i).storeTermVector) {
hasVectors = true;
break;
}
}
return hasVectors;
}
public void write(Directory d, String name) throws IOException {
IndexOutput output = d.createOutput(name);
try {
write(output);
} finally {
output.close();
}
}
public void write(IndexOutput output) throws IOException {
output.writeVInt(CURRENT_FORMAT);
output.writeVInt(size());
for (int i = 0; i < size(); i++) {
FieldInfo fi = fieldInfo(i);
byte bits = 0x0;
if (fi.isIndexed) bits |= IS_INDEXED;
if (fi.storeTermVector) bits |= STORE_TERMVECTOR;
if (fi.storePositionWithTermVector) bits |= STORE_POSITIONS_WITH_TERMVECTOR;
if (fi.storeOffsetWithTermVector) bits |= STORE_OFFSET_WITH_TERMVECTOR;
if (fi.omitNorms) bits |= OMIT_NORMS;
if (fi.storePayloads) bits |= STORE_PAYLOADS;
if (fi.omitTermFreqAndPositions) bits |= OMIT_TERM_FREQ_AND_POSITIONS;
output.writeString(fi.name);
output.writeByte(bits);
}
}
private void read(IndexInput input, String fileName) throws IOException {
int firstInt = input.readVInt();
if (firstInt < 0) {
// This is a real format
format = firstInt;
} else {
format = FORMAT_PRE;
}
if (format != FORMAT_PRE & format != FORMAT_START) {
throw new CorruptIndexException("unrecognized format " + format + " in file \"" + fileName + "\"");
}
int size;
if (format == FORMAT_PRE) {
size = firstInt;
} else {
size = input.readVInt(); //read in the size
}
for (int i = 0; i < size; i++) {
String name = StringHelper.intern(input.readString());
byte bits = input.readByte();
boolean isIndexed = (bits & IS_INDEXED) != 0;
boolean storeTermVector = (bits & STORE_TERMVECTOR) != 0;
boolean storePositionsWithTermVector = (bits & STORE_POSITIONS_WITH_TERMVECTOR) != 0;
boolean storeOffsetWithTermVector = (bits & STORE_OFFSET_WITH_TERMVECTOR) != 0;
boolean omitNorms = (bits & OMIT_NORMS) != 0;
boolean storePayloads = (bits & STORE_PAYLOADS) != 0;
boolean omitTermFreqAndPositions = (bits & OMIT_TERM_FREQ_AND_POSITIONS) != 0;
addInternal(name, isIndexed, storeTermVector, storePositionsWithTermVector, storeOffsetWithTermVector, omitNorms, storePayloads, omitTermFreqAndPositions);
}
if (input.getFilePointer() != input.length()) {
throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read " + input.getFilePointer() + " vs size " + input.length());
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldInfos.java | Java | art | 14,256 |
package org.apache.lucene.index;
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* For each Field, store position by position information. It ignores frequency information
* <p/>
* This is not thread-safe.
*/
public class PositionBasedTermVectorMapper extends TermVectorMapper{
private Map<String, Map<Integer,TVPositionInfo>> fieldToTerms;
private String currentField;
/**
* A Map of Integer and TVPositionInfo
*/
private Map<Integer,TVPositionInfo> currentPositions;
private boolean storeOffsets;
/**
*
*
*/
public PositionBasedTermVectorMapper() {
super(false, false);
}
public PositionBasedTermVectorMapper(boolean ignoringOffsets)
{
super(false, ignoringOffsets);
}
/**
* Never ignores positions. This mapper doesn't make much sense unless there are positions
* @return false
*/
@Override
public boolean isIgnoringPositions() {
return false;
}
/**
* Callback for the TermVectorReader.
* @param term
* @param frequency
* @param offsets
* @param positions
*/
@Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
for (int i = 0; i < positions.length; i++) {
Integer posVal = Integer.valueOf(positions[i]);
TVPositionInfo pos = currentPositions.get(posVal);
if (pos == null) {
pos = new TVPositionInfo(positions[i], storeOffsets);
currentPositions.put(posVal, pos);
}
pos.addTerm(term, offsets != null ? offsets[i] : null);
}
}
/**
* Callback mechanism used by the TermVectorReader
* @param field The field being read
* @param numTerms The number of terms in the vector
* @param storeOffsets Whether offsets are available
* @param storePositions Whether positions are available
*/
@Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
if (storePositions == false)
{
throw new RuntimeException("You must store positions in order to use this Mapper");
}
if (storeOffsets == true)
{
//ignoring offsets
}
fieldToTerms = new HashMap<String,Map<Integer,TVPositionInfo>>(numTerms);
this.storeOffsets = storeOffsets;
currentField = field;
currentPositions = new HashMap<Integer,TVPositionInfo>();
fieldToTerms.put(currentField, currentPositions);
}
/**
* Get the mapping between fields and terms, sorted by the comparator
*
* @return A map between field names and a Map. The sub-Map key is the position as the integer, the value is {@link org.apache.lucene.index.PositionBasedTermVectorMapper.TVPositionInfo}.
*/
public Map<String, Map<Integer, TVPositionInfo>> getFieldToTerms() {
return fieldToTerms;
}
/**
* Container for a term at a position
*/
public static class TVPositionInfo{
private int position;
private List<String> terms;
private List<TermVectorOffsetInfo> offsets;
public TVPositionInfo(int position, boolean storeOffsets) {
this.position = position;
terms = new ArrayList<String>();
if (storeOffsets) {
offsets = new ArrayList<TermVectorOffsetInfo>();
}
}
void addTerm(String term, TermVectorOffsetInfo info)
{
terms.add(term);
if (offsets != null) {
offsets.add(info);
}
}
/**
*
* @return The position of the term
*/
public int getPosition() {
return position;
}
/**
* Note, there may be multiple terms at the same position
* @return A List of Strings
*/
public List<String> getTerms() {
return terms;
}
/**
* Parallel list (to {@link #getTerms()}) of TermVectorOffsetInfo objects. There may be multiple entries since there may be multiple terms at a position
* @return A List of TermVectorOffsetInfo objects, if offsets are stored.
*/
public List<TermVectorOffsetInfo> getOffsets() {
return offsets;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/PositionBasedTermVectorMapper.java | Java | art | 4,686 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.IndexOutput;
import java.io.IOException;
final class FormatPostingsPositionsWriter extends FormatPostingsPositionsConsumer {
final FormatPostingsDocsWriter parent;
final IndexOutput out;
boolean omitTermFreqAndPositions;
boolean storePayloads;
int lastPayloadLength = -1;
FormatPostingsPositionsWriter(SegmentWriteState state, FormatPostingsDocsWriter parent) throws IOException {
this.parent = parent;
omitTermFreqAndPositions = parent.omitTermFreqAndPositions;
if (parent.parent.parent.fieldInfos.hasProx()) {
// At least one field does not omit TF, so create the
// prox file
final String fileName = IndexFileNames.segmentFileName(parent.parent.parent.segment, IndexFileNames.PROX_EXTENSION);
state.flushedFiles.add(fileName);
out = parent.parent.parent.dir.createOutput(fileName);
parent.skipListWriter.setProxOutput(out);
} else
// Every field omits TF so we will write no prox file
out = null;
}
int lastPosition;
/** Add a new position & payload */
@Override
void addPosition(int position, byte[] payload, int payloadOffset, int payloadLength) throws IOException {
assert !omitTermFreqAndPositions: "omitTermFreqAndPositions is true";
assert out != null;
final int delta = position - lastPosition;
lastPosition = position;
if (storePayloads) {
if (payloadLength != lastPayloadLength) {
lastPayloadLength = payloadLength;
out.writeVInt((delta<<1)|1);
out.writeVInt(payloadLength);
} else
out.writeVInt(delta << 1);
if (payloadLength > 0)
out.writeBytes(payload, payloadLength);
} else
out.writeVInt(delta);
}
void setField(FieldInfo fieldInfo) {
omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
storePayloads = omitTermFreqAndPositions ? false : fieldInfo.storePayloads;
}
/** Called when we are done adding positions & payloads */
@Override
void finish() {
lastPosition = 0;
lastPayloadLength = -1;
}
void close() throws IOException {
if (out != null)
out.close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsPositionsWriter.java | Java | art | 2,997 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.List;
/**
* This {@link IndexDeletionPolicy} implementation that
* keeps only the most recent commit and immediately removes
* all prior commits after a new commit is done. This is
* the default deletion policy.
*/
public final class KeepOnlyLastCommitDeletionPolicy implements IndexDeletionPolicy {
/**
* Deletes all commits except the most recent one.
*/
public void onInit(List<? extends IndexCommit> commits) {
// Note that commits.size() should normally be 1:
onCommit(commits);
}
/**
* Deletes all commits except the most recent one.
*/
public void onCommit(List<? extends IndexCommit> commits) {
// Note that commits.size() should normally be 2 (if not
// called by onInit above):
int size = commits.size();
for(int i=0;i<size-1;i++) {
commits.get(i).delete();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/KeepOnlyLastCommitDeletionPolicy.java | Java | art | 1,697 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Consumes doc & freq, writing them using the current
* index file format */
import java.io.IOException;
import org.apache.lucene.util.UnicodeUtil;
import org.apache.lucene.store.IndexOutput;
final class FormatPostingsDocsWriter extends FormatPostingsDocsConsumer {
final IndexOutput out;
final FormatPostingsTermsWriter parent;
final FormatPostingsPositionsWriter posWriter;
final DefaultSkipListWriter skipListWriter;
final int skipInterval;
final int totalNumDocs;
boolean omitTermFreqAndPositions;
boolean storePayloads;
long freqStart;
FieldInfo fieldInfo;
FormatPostingsDocsWriter(SegmentWriteState state, FormatPostingsTermsWriter parent) throws IOException {
super();
this.parent = parent;
final String fileName = IndexFileNames.segmentFileName(parent.parent.segment, IndexFileNames.FREQ_EXTENSION);
state.flushedFiles.add(fileName);
out = parent.parent.dir.createOutput(fileName);
totalNumDocs = parent.parent.totalNumDocs;
// TODO: abstraction violation
skipInterval = parent.parent.termsOut.skipInterval;
skipListWriter = parent.parent.skipListWriter;
skipListWriter.setFreqOutput(out);
posWriter = new FormatPostingsPositionsWriter(state, this);
}
void setField(FieldInfo fieldInfo) {
this.fieldInfo = fieldInfo;
omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
storePayloads = fieldInfo.storePayloads;
posWriter.setField(fieldInfo);
}
int lastDocID;
int df;
/** Adds a new doc in this term. If this returns null
* then we just skip consuming positions/payloads. */
@Override
FormatPostingsPositionsConsumer addDoc(int docID, int termDocFreq) throws IOException {
final int delta = docID - lastDocID;
if (docID < 0 || (df > 0 && delta <= 0))
throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )");
if ((++df % skipInterval) == 0) {
// TODO: abstraction violation
skipListWriter.setSkipData(lastDocID, storePayloads, posWriter.lastPayloadLength);
skipListWriter.bufferSkip(df);
}
assert docID < totalNumDocs: "docID=" + docID + " totalNumDocs=" + totalNumDocs;
lastDocID = docID;
if (omitTermFreqAndPositions)
out.writeVInt(delta);
else if (1 == termDocFreq)
out.writeVInt((delta<<1) | 1);
else {
out.writeVInt(delta<<1);
out.writeVInt(termDocFreq);
}
return posWriter;
}
private final TermInfo termInfo = new TermInfo(); // minimize consing
final UnicodeUtil.UTF8Result utf8 = new UnicodeUtil.UTF8Result();
/** Called when we are done adding docs to this term */
@Override
void finish() throws IOException {
long skipPointer = skipListWriter.writeSkip(out);
// TODO: this is abstraction violation -- we should not
// peek up into parents terms encoding format
termInfo.set(df, parent.freqStart, parent.proxStart, (int) (skipPointer - parent.freqStart));
// TODO: we could do this incrementally
UnicodeUtil.UTF16toUTF8(parent.currentTerm, parent.currentTermStart, utf8);
if (df > 0) {
parent.termsOut.add(fieldInfo.number,
utf8.result,
utf8.length,
termInfo);
}
lastDocID = 0;
df = 0;
}
void close() throws IOException {
out.close();
posWriter.close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsDocsWriter.java | Java | art | 4,230 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import java.io.IOException;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.util.Map;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Collection;
/*
* This class keeps track of each SegmentInfos instance that
* is still "live", either because it corresponds to a
* segments_N file in the Directory (a "commit", i.e. a
* committed SegmentInfos) or because it's an in-memory
* SegmentInfos that a writer is actively updating but has
* not yet committed. This class uses simple reference
* counting to map the live SegmentInfos instances to
* individual files in the Directory.
*
* The same directory file may be referenced by more than
* one IndexCommit, i.e. more than one SegmentInfos.
* Therefore we count how many commits reference each file.
* When all the commits referencing a certain file have been
* deleted, the refcount for that file becomes zero, and the
* file is deleted.
*
* A separate deletion policy interface
* (IndexDeletionPolicy) is consulted on creation (onInit)
* and once per commit (onCommit), to decide when a commit
* should be removed.
*
* It is the business of the IndexDeletionPolicy to choose
* when to delete commit points. The actual mechanics of
* file deletion, retrying, etc, derived from the deletion
* of commit points is the business of the IndexFileDeleter.
*
* The current default deletion policy is {@link
* KeepOnlyLastCommitDeletionPolicy}, which removes all
* prior commits when a new commit has completed. This
* matches the behavior before 2.2.
*
* Note that you must hold the write.lock before
* instantiating this class. It opens segments_N file(s)
* directly with no retry logic.
*/
final class IndexFileDeleter {
/* Files that we tried to delete but failed (likely
* because they are open and we are running on Windows),
* so we will retry them again later: */
private List<String> deletable;
/* Reference count for all files in the index.
* Counts how many existing commits reference a file.
**/
private Map<String, RefCount> refCounts = new HashMap<String, RefCount>();
/* Holds all commits (segments_N) currently in the index.
* This will have just 1 commit if you are using the
* default delete policy (KeepOnlyLastCommitDeletionPolicy).
* Other policies may leave commit points live for longer
* in which case this list would be longer than 1: */
private List<CommitPoint> commits = new ArrayList<CommitPoint>();
/* Holds files we had incref'd from the previous
* non-commit checkpoint: */
private List<Collection<String>> lastFiles = new ArrayList<Collection<String>>();
/* Commits that the IndexDeletionPolicy have decided to delete: */
private List<CommitPoint> commitsToDelete = new ArrayList<CommitPoint>();
private PrintStream infoStream;
private Directory directory;
private IndexDeletionPolicy policy;
private DocumentsWriter docWriter;
final boolean startingCommitDeleted;
/** Change to true to see details of reference counts when
* infoStream != null */
public static boolean VERBOSE_REF_COUNTS = false;
void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
if (infoStream != null)
message("setInfoStream deletionPolicy=" + policy);
}
private void message(String message) {
infoStream.println("IFD [" + Thread.currentThread().getName() + "]: " + message);
}
/**
* Initialize the deleter: find all previous commits in
* the Directory, incref the files they reference, call
* the policy to let it delete commits. This will remove
* any files not referenced by any of the commits.
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
public IndexFileDeleter(Directory directory, IndexDeletionPolicy policy, SegmentInfos segmentInfos, PrintStream infoStream, DocumentsWriter docWriter)
throws CorruptIndexException, IOException {
this.docWriter = docWriter;
this.infoStream = infoStream;
if (infoStream != null)
message("init: current segments file is \"" + segmentInfos.getCurrentSegmentFileName() + "\"; deletionPolicy=" + policy);
this.policy = policy;
this.directory = directory;
// First pass: walk the files and initialize our ref
// counts:
long currentGen = segmentInfos.getGeneration();
IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
String[] files = directory.listAll();
CommitPoint currentCommitPoint = null;
for(int i=0;i<files.length;i++) {
String fileName = files[i];
if (filter.accept(null, fileName) && !fileName.equals(IndexFileNames.SEGMENTS_GEN)) {
// Add this file to refCounts with initial count 0:
getRefCount(fileName);
if (fileName.startsWith(IndexFileNames.SEGMENTS)) {
// This is a commit (segments or segments_N), and
// it's valid (<= the max gen). Load it, then
// incref all files it refers to:
if (SegmentInfos.generationFromSegmentsFileName(fileName) <= currentGen) {
if (infoStream != null) {
message("init: load commit \"" + fileName + "\"");
}
SegmentInfos sis = new SegmentInfos();
try {
sis.read(directory, fileName);
} catch (FileNotFoundException e) {
// LUCENE-948: on NFS (and maybe others), if
// you have writers switching back and forth
// between machines, it's very likely that the
// dir listing will be stale and will claim a
// file segments_X exists when in fact it
// doesn't. So, we catch this and handle it
// as if the file does not exist
if (infoStream != null) {
message("init: hit FileNotFoundException when loading commit \"" + fileName + "\"; skipping this commit point");
}
sis = null;
}
if (sis != null) {
CommitPoint commitPoint = new CommitPoint(commitsToDelete, directory, sis);
if (sis.getGeneration() == segmentInfos.getGeneration()) {
currentCommitPoint = commitPoint;
}
commits.add(commitPoint);
incRef(sis, true);
}
}
}
}
}
if (currentCommitPoint == null) {
// We did not in fact see the segments_N file
// corresponding to the segmentInfos that was passed
// in. Yet, it must exist, because our caller holds
// the write lock. This can happen when the directory
// listing was stale (eg when index accessed via NFS
// client with stale directory listing cache). So we
// try now to explicitly open this commit point:
SegmentInfos sis = new SegmentInfos();
try {
sis.read(directory, segmentInfos.getCurrentSegmentFileName());
} catch (IOException e) {
throw new CorruptIndexException("failed to locate current segments_N file");
}
if (infoStream != null)
message("forced open of current segments file " + segmentInfos.getCurrentSegmentFileName());
currentCommitPoint = new CommitPoint(commitsToDelete, directory, sis);
commits.add(currentCommitPoint);
incRef(sis, true);
}
// We keep commits list in sorted order (oldest to newest):
Collections.sort(commits);
// Now delete anything with ref count at 0. These are
// presumably abandoned files eg due to crash of
// IndexWriter.
for(Map.Entry<String, RefCount> entry : refCounts.entrySet() ) {
RefCount rc = entry.getValue();
final String fileName = entry.getKey();
if (0 == rc.count) {
if (infoStream != null) {
message("init: removing unreferenced file \"" + fileName + "\"");
}
deleteFile(fileName);
}
}
// Finally, give policy a chance to remove things on
// startup:
policy.onInit(commits);
// Always protect the incoming segmentInfos since
// sometime it may not be the most recent commit
checkpoint(segmentInfos, false);
startingCommitDeleted = currentCommitPoint.isDeleted();
deleteCommits();
}
/**
* Remove the CommitPoints in the commitsToDelete List by
* DecRef'ing all files from each SegmentInfos.
*/
private void deleteCommits() throws IOException {
int size = commitsToDelete.size();
if (size > 0) {
// First decref all files that had been referred to by
// the now-deleted commits:
for(int i=0;i<size;i++) {
CommitPoint commit = commitsToDelete.get(i);
if (infoStream != null) {
message("deleteCommits: now decRef commit \"" + commit.getSegmentsFileName() + "\"");
}
for (final String file : commit.files) {
decRef(file);
}
}
commitsToDelete.clear();
// Now compact commits to remove deleted ones (preserving the sort):
size = commits.size();
int readFrom = 0;
int writeTo = 0;
while(readFrom < size) {
CommitPoint commit = commits.get(readFrom);
if (!commit.deleted) {
if (writeTo != readFrom) {
commits.set(writeTo, commits.get(readFrom));
}
writeTo++;
}
readFrom++;
}
while(size > writeTo) {
commits.remove(size-1);
size--;
}
}
}
/**
* Writer calls this when it has hit an error and had to
* roll back, to tell us that there may now be
* unreferenced files in the filesystem. So we re-list
* the filesystem and delete such files. If segmentName
* is non-null, we will only delete files corresponding to
* that segment.
*/
public void refresh(String segmentName) throws IOException {
String[] files = directory.listAll();
IndexFileNameFilter filter = IndexFileNameFilter.getFilter();
String segmentPrefix1;
String segmentPrefix2;
if (segmentName != null) {
segmentPrefix1 = segmentName + ".";
segmentPrefix2 = segmentName + "_";
} else {
segmentPrefix1 = null;
segmentPrefix2 = null;
}
for(int i=0;i<files.length;i++) {
String fileName = files[i];
if (filter.accept(null, fileName) &&
(segmentName == null || fileName.startsWith(segmentPrefix1) || fileName.startsWith(segmentPrefix2)) &&
!refCounts.containsKey(fileName) &&
!fileName.equals(IndexFileNames.SEGMENTS_GEN)) {
// Unreferenced file, so remove it
if (infoStream != null) {
message("refresh [prefix=" + segmentName + "]: removing newly created unreferenced file \"" + fileName + "\"");
}
deleteFile(fileName);
}
}
}
public void refresh() throws IOException {
refresh(null);
}
public void close() throws IOException {
// DecRef old files from the last checkpoint, if any:
int size = lastFiles.size();
if (size > 0) {
for(int i=0;i<size;i++)
decRef(lastFiles.get(i));
lastFiles.clear();
}
deletePendingFiles();
}
private void deletePendingFiles() throws IOException {
if (deletable != null) {
List<String> oldDeletable = deletable;
deletable = null;
int size = oldDeletable.size();
for(int i=0;i<size;i++) {
if (infoStream != null)
message("delete pending file " + oldDeletable.get(i));
deleteFile(oldDeletable.get(i));
}
}
}
/**
* For definition of "check point" see IndexWriter comments:
* "Clarification: Check Points (and commits)".
*
* Writer calls this when it has made a "consistent
* change" to the index, meaning new files are written to
* the index and the in-memory SegmentInfos have been
* modified to point to those files.
*
* This may or may not be a commit (segments_N may or may
* not have been written).
*
* We simply incref the files referenced by the new
* SegmentInfos and decref the files we had previously
* seen (if any).
*
* If this is a commit, we also call the policy to give it
* a chance to remove other commits. If any commits are
* removed, we decref their files as well.
*/
public void checkpoint(SegmentInfos segmentInfos, boolean isCommit) throws IOException {
if (infoStream != null) {
message("now checkpoint \"" + segmentInfos.getCurrentSegmentFileName() + "\" [" + segmentInfos.size() + " segments " + "; isCommit = " + isCommit + "]");
}
// Try again now to delete any previously un-deletable
// files (because they were in use, on Windows):
deletePendingFiles();
// Incref the files:
incRef(segmentInfos, isCommit);
if (isCommit) {
// Append to our commits list:
commits.add(new CommitPoint(commitsToDelete, directory, segmentInfos));
// Tell policy so it can remove commits:
policy.onCommit(commits);
// Decref files for commits that were deleted by the policy:
deleteCommits();
} else {
final List<String> docWriterFiles;
if (docWriter != null) {
docWriterFiles = docWriter.openFiles();
if (docWriterFiles != null)
// We must incRef these files before decRef'ing
// last files to make sure we don't accidentally
// delete them:
incRef(docWriterFiles);
} else
docWriterFiles = null;
// DecRef old files from the last checkpoint, if any:
int size = lastFiles.size();
if (size > 0) {
for(int i=0;i<size;i++)
decRef(lastFiles.get(i));
lastFiles.clear();
}
// Save files so we can decr on next checkpoint/commit:
lastFiles.add(segmentInfos.files(directory, false));
if (docWriterFiles != null)
lastFiles.add(docWriterFiles);
}
}
void incRef(SegmentInfos segmentInfos, boolean isCommit) throws IOException {
// If this is a commit point, also incRef the
// segments_N file:
for( final String fileName: segmentInfos.files(directory, isCommit) ) {
incRef(fileName);
}
}
void incRef(Collection<String> files) throws IOException {
for(final String file : files) {
incRef(file);
}
}
void incRef(String fileName) throws IOException {
RefCount rc = getRefCount(fileName);
if (infoStream != null && VERBOSE_REF_COUNTS) {
message(" IncRef \"" + fileName + "\": pre-incr count is " + rc.count);
}
rc.IncRef();
}
void decRef(Collection<String> files) throws IOException {
for(final String file : files) {
decRef(file);
}
}
void decRef(String fileName) throws IOException {
RefCount rc = getRefCount(fileName);
if (infoStream != null && VERBOSE_REF_COUNTS) {
message(" DecRef \"" + fileName + "\": pre-decr count is " + rc.count);
}
if (0 == rc.DecRef()) {
// This file is no longer referenced by any past
// commit points nor by the in-memory SegmentInfos:
deleteFile(fileName);
refCounts.remove(fileName);
}
}
void decRef(SegmentInfos segmentInfos) throws IOException {
for (final String file : segmentInfos.files(directory, false)) {
decRef(file);
}
}
private RefCount getRefCount(String fileName) {
RefCount rc;
if (!refCounts.containsKey(fileName)) {
rc = new RefCount(fileName);
refCounts.put(fileName, rc);
} else {
rc = refCounts.get(fileName);
}
return rc;
}
void deleteFiles(List<String> files) throws IOException {
for(final String file: files)
deleteFile(file);
}
/** Deletes the specified files, but only if they are new
* (have not yet been incref'd). */
void deleteNewFiles(Collection<String> files) throws IOException {
for (final String fileName: files) {
if (!refCounts.containsKey(fileName))
deleteFile(fileName);
}
}
void deleteFile(String fileName)
throws IOException {
try {
if (infoStream != null) {
message("delete \"" + fileName + "\"");
}
directory.deleteFile(fileName);
} catch (IOException e) { // if delete fails
if (directory.fileExists(fileName)) {
// Some operating systems (e.g. Windows) don't
// permit a file to be deleted while it is opened
// for read (e.g. by another process or thread). So
// we assume that when a delete fails it is because
// the file is open in another process, and queue
// the file for subsequent deletion.
if (infoStream != null) {
message("IndexFileDeleter: unable to remove file \"" + fileName + "\": " + e.toString() + "; Will re-try later.");
}
if (deletable == null) {
deletable = new ArrayList<String>();
}
deletable.add(fileName); // add to deletable
}
}
}
/**
* Tracks the reference count for a single index file:
*/
final private static class RefCount {
// fileName used only for better assert error messages
final String fileName;
boolean initDone;
RefCount(String fileName) {
this.fileName = fileName;
}
int count;
public int IncRef() {
if (!initDone) {
initDone = true;
} else {
assert count > 0: "RefCount is 0 pre-increment for file \"" + fileName + "\"";
}
return ++count;
}
public int DecRef() {
assert count > 0: "RefCount is 0 pre-decrement for file \"" + fileName + "\"";
return --count;
}
}
/**
* Holds details for each commit point. This class is
* also passed to the deletion policy. Note: this class
* has a natural ordering that is inconsistent with
* equals.
*/
final private static class CommitPoint extends IndexCommit implements Comparable<CommitPoint> {
long gen;
Collection<String> files;
String segmentsFileName;
boolean deleted;
Directory directory;
Collection<CommitPoint> commitsToDelete;
long version;
long generation;
final boolean isOptimized;
final Map<String,String> userData;
public CommitPoint(Collection<CommitPoint> commitsToDelete, Directory directory, SegmentInfos segmentInfos) throws IOException {
this.directory = directory;
this.commitsToDelete = commitsToDelete;
userData = segmentInfos.getUserData();
segmentsFileName = segmentInfos.getCurrentSegmentFileName();
version = segmentInfos.getVersion();
generation = segmentInfos.getGeneration();
files = Collections.unmodifiableCollection(segmentInfos.files(directory, true));
gen = segmentInfos.getGeneration();
isOptimized = segmentInfos.size() == 1 && !segmentInfos.info(0).hasDeletions();
assert !segmentInfos.hasExternalSegments(directory);
}
@Override
public boolean isOptimized() {
return isOptimized;
}
@Override
public String getSegmentsFileName() {
return segmentsFileName;
}
@Override
public Collection<String> getFileNames() throws IOException {
return files;
}
@Override
public Directory getDirectory() {
return directory;
}
@Override
public long getVersion() {
return version;
}
@Override
public long getGeneration() {
return generation;
}
@Override
public Map<String,String> getUserData() {
return userData;
}
/**
* Called only be the deletion policy, to remove this
* commit point from the index.
*/
@Override
public void delete() {
if (!deleted) {
deleted = true;
commitsToDelete.add(this);
}
}
@Override
public boolean isDeleted() {
return deleted;
}
public int compareTo(CommitPoint commit) {
if (gen < commit.gen) {
return -1;
} else if (gen > commit.gen) {
return 1;
} else {
return 0;
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IndexFileDeleter.java | Java | art | 20,975 |
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*
**/
public class FieldReaderException extends RuntimeException{
/**
* Constructs a new runtime exception with <code>null</code> as its
* detail message. The cause is not initialized, and may subsequently be
* initialized by a call to {@link #initCause}.
*/
public FieldReaderException() {
}
/**
* Constructs a new runtime exception with the specified cause and a
* detail message of <tt>(cause==null ? null : cause.toString())</tt>
* (which typically contains the class and detail message of
* <tt>cause</tt>).
* <p>
* This constructor is useful for runtime exceptions
* that are little more than wrappers for other throwables.
*
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @since 1.4
*/
public FieldReaderException(Throwable cause) {
super(cause);
}
/**
* Constructs a new runtime exception with the specified detail message.
* The cause is not initialized, and may subsequently be initialized by a
* call to {@link #initCause}.
*
* @param message the detail message. The detail message is saved for
* later retrieval by the {@link #getMessage()} method.
*/
public FieldReaderException(String message) {
super(message);
}
/**
* Constructs a new runtime exception with the specified detail message and
* cause. <p>Note that the detail message associated with
* <code>cause</code> is <i>not</i> automatically incorporated in
* this runtime exception's detail message.
*
* @param message the detail message (which is saved for later retrieval
* by the {@link #getMessage()} method).
* @param cause the cause (which is saved for later retrieval by the
* {@link #getCause()} method). (A <tt>null</tt> value is
* permitted, and indicates that the cause is nonexistent or
* unknown.)
* @since 1.4
*/
public FieldReaderException(String message, Throwable cause) {
super(message, cause);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldReaderException.java | Java | art | 2,878 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader.FieldOption;
import org.apache.lucene.index.MergePolicy.MergeAbortedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
/**
* The SegmentMerger class combines two or more Segments, represented by an IndexReader ({@link #add},
* into a single Segment. After adding the appropriate readers, call the merge method to combine the
* segments.
*<P>
* If the compoundFile flag is set, then the segments will be merged into a compound file.
*
*
* @see #merge
* @see #add
*/
final class SegmentMerger {
/** norms header placeholder */
static final byte[] NORMS_HEADER = new byte[]{'N','R','M',-1};
private Directory directory;
private String segment;
private int termIndexInterval = IndexWriter.DEFAULT_TERM_INDEX_INTERVAL;
private List<IndexReader> readers = new ArrayList<IndexReader>();
private FieldInfos fieldInfos;
private int mergedDocs;
private final CheckAbort checkAbort;
// Whether we should merge doc stores (stored fields and
// vectors files). When all segments we are merging
// already share the same doc store files, we don't need
// to merge the doc stores.
private boolean mergeDocStores;
/** Maximum number of contiguous documents to bulk-copy
when merging stored fields */
private final static int MAX_RAW_MERGE_DOCS = 4192;
/** This ctor used only by test code.
*
* @param dir The Directory to merge the other segments into
* @param name The name of the new segment
*/
SegmentMerger(Directory dir, String name) {
directory = dir;
segment = name;
checkAbort = new CheckAbort(null, null) {
@Override
public void work(double units) throws MergeAbortedException {
// do nothing
}
};
}
SegmentMerger(IndexWriter writer, String name, MergePolicy.OneMerge merge) {
directory = writer.getDirectory();
segment = name;
if (merge != null) {
checkAbort = new CheckAbort(merge, directory);
} else {
checkAbort = new CheckAbort(null, null) {
@Override
public void work(double units) throws MergeAbortedException {
// do nothing
}
};
}
termIndexInterval = writer.getTermIndexInterval();
}
boolean hasProx() {
return fieldInfos.hasProx();
}
/**
* Add an IndexReader to the collection of readers that are to be merged
* @param reader
*/
final void add(IndexReader reader) {
readers.add(reader);
}
/**
*
* @param i The index of the reader to return
* @return The ith reader to be merged
*/
final IndexReader segmentReader(int i) {
return readers.get(i);
}
/**
* Merges the readers specified by the {@link #add} method into the directory passed to the constructor
* @return The number of documents that were merged
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
final int merge() throws CorruptIndexException, IOException {
return merge(true);
}
/**
* Merges the readers specified by the {@link #add} method
* into the directory passed to the constructor.
* @param mergeDocStores if false, we will not merge the
* stored fields nor vectors files
* @return The number of documents that were merged
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
final int merge(boolean mergeDocStores) throws CorruptIndexException, IOException {
this.mergeDocStores = mergeDocStores;
// NOTE: it's important to add calls to
// checkAbort.work(...) if you make any changes to this
// method that will spend alot of time. The frequency
// of this check impacts how long
// IndexWriter.close(false) takes to actually stop the
// threads.
mergedDocs = mergeFields();
mergeTerms();
mergeNorms();
if (mergeDocStores && fieldInfos.hasVectors())
mergeVectors();
return mergedDocs;
}
/**
* close all IndexReaders that have been added.
* Should not be called before merge().
* @throws IOException
*/
final void closeReaders() throws IOException {
for (final IndexReader reader : readers) {
reader.close();
}
}
final List<String> createCompoundFile(String fileName)
throws IOException {
CompoundFileWriter cfsWriter =
new CompoundFileWriter(directory, fileName, checkAbort);
List<String> files =
new ArrayList<String>(IndexFileNames.COMPOUND_EXTENSIONS.length + 1);
// Basic files
for (int i = 0; i < IndexFileNames.COMPOUND_EXTENSIONS.length; i++) {
String ext = IndexFileNames.COMPOUND_EXTENSIONS[i];
if (ext.equals(IndexFileNames.PROX_EXTENSION) && !hasProx())
continue;
if (mergeDocStores || (!ext.equals(IndexFileNames.FIELDS_EXTENSION) &&
!ext.equals(IndexFileNames.FIELDS_INDEX_EXTENSION)))
files.add(segment + "." + ext);
}
// Fieldable norm files
for (int i = 0; i < fieldInfos.size(); i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
if (fi.isIndexed && !fi.omitNorms) {
files.add(segment + "." + IndexFileNames.NORMS_EXTENSION);
break;
}
}
// Vector files
if (fieldInfos.hasVectors() && mergeDocStores) {
for (int i = 0; i < IndexFileNames.VECTOR_EXTENSIONS.length; i++) {
files.add(segment + "." + IndexFileNames.VECTOR_EXTENSIONS[i]);
}
}
// Now merge all added files
for (String file : files) {
cfsWriter.addFile(file);
}
// Perform the merge
cfsWriter.close();
return files;
}
private void addIndexed(IndexReader reader, FieldInfos fInfos,
Collection<String> names, boolean storeTermVectors,
boolean storePositionWithTermVector, boolean storeOffsetWithTermVector,
boolean storePayloads, boolean omitTFAndPositions)
throws IOException {
for (String field : names) {
fInfos.add(field, true, storeTermVectors,
storePositionWithTermVector, storeOffsetWithTermVector, !reader
.hasNorms(field), storePayloads, omitTFAndPositions);
}
}
private SegmentReader[] matchingSegmentReaders;
private int[] rawDocLengths;
private int[] rawDocLengths2;
private void setMatchingSegmentReaders() {
// If the i'th reader is a SegmentReader and has
// identical fieldName -> number mapping, then this
// array will be non-null at position i:
int numReaders = readers.size();
matchingSegmentReaders = new SegmentReader[numReaders];
// If this reader is a SegmentReader, and all of its
// field name -> number mappings match the "merged"
// FieldInfos, then we can do a bulk copy of the
// stored fields:
for (int i = 0; i < numReaders; i++) {
IndexReader reader = readers.get(i);
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
boolean same = true;
FieldInfos segmentFieldInfos = segmentReader.fieldInfos();
int numFieldInfos = segmentFieldInfos.size();
for (int j = 0; same && j < numFieldInfos; j++) {
same = fieldInfos.fieldName(j).equals(segmentFieldInfos.fieldName(j));
}
if (same) {
matchingSegmentReaders[i] = segmentReader;
}
}
}
// Used for bulk-reading raw bytes for stored fields
rawDocLengths = new int[MAX_RAW_MERGE_DOCS];
rawDocLengths2 = new int[MAX_RAW_MERGE_DOCS];
}
/**
*
* @return The number of documents in all of the readers
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
private final int mergeFields() throws CorruptIndexException, IOException {
if (!mergeDocStores) {
// When we are not merging by doc stores, their field
// name -> number mapping are the same. So, we start
// with the fieldInfos of the last segment in this
// case, to keep that numbering.
final SegmentReader sr = (SegmentReader) readers.get(readers.size()-1);
fieldInfos = (FieldInfos) sr.core.fieldInfos.clone();
} else {
fieldInfos = new FieldInfos(); // merge field names
}
for (IndexReader reader : readers) {
if (reader instanceof SegmentReader) {
SegmentReader segmentReader = (SegmentReader) reader;
FieldInfos readerFieldInfos = segmentReader.fieldInfos();
int numReaderFieldInfos = readerFieldInfos.size();
for (int j = 0; j < numReaderFieldInfos; j++) {
FieldInfo fi = readerFieldInfos.fieldInfo(j);
fieldInfos.add(fi.name, fi.isIndexed, fi.storeTermVector,
fi.storePositionWithTermVector, fi.storeOffsetWithTermVector,
!reader.hasNorms(fi.name), fi.storePayloads,
fi.omitTermFreqAndPositions);
}
} else {
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.TERMVECTOR_WITH_POSITION_OFFSET), true, true, true, false, false);
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.TERMVECTOR_WITH_POSITION), true, true, false, false, false);
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.TERMVECTOR_WITH_OFFSET), true, false, true, false, false);
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.TERMVECTOR), true, false, false, false, false);
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.OMIT_TERM_FREQ_AND_POSITIONS), false, false, false, false, true);
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.STORES_PAYLOADS), false, false, false, true, false);
addIndexed(reader, fieldInfos, reader.getFieldNames(FieldOption.INDEXED), false, false, false, false, false);
fieldInfos.add(reader.getFieldNames(FieldOption.UNINDEXED), false);
}
}
fieldInfos.write(directory, segment + ".fnm");
int docCount = 0;
setMatchingSegmentReaders();
if (mergeDocStores) {
// merge field values
final FieldsWriter fieldsWriter = new FieldsWriter(directory, segment, fieldInfos);
try {
int idx = 0;
for (IndexReader reader : readers) {
final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
FieldsReader matchingFieldsReader = null;
if (matchingSegmentReader != null) {
final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
if (fieldsReader != null && fieldsReader.canReadRawDocs()) {
matchingFieldsReader = fieldsReader;
}
}
if (reader.hasDeletions()) {
docCount += copyFieldsWithDeletions(fieldsWriter,
reader, matchingFieldsReader);
} else {
docCount += copyFieldsNoDeletions(fieldsWriter,
reader, matchingFieldsReader);
}
}
} finally {
fieldsWriter.close();
}
final String fileName = segment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
final long fdxFileLength = directory.fileLength(fileName);
if (4+((long) docCount)*8 != fdxFileLength)
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
// we detect that the bug has struck, here, and
// throw an exception to prevent the corruption from
// entering the index. See LUCENE-1282 for
// details.
throw new RuntimeException("mergeFields produced an invalid result: docCount is " + docCount + " but fdx file size is " + fdxFileLength + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption");
} else
// If we are skipping the doc stores, that means there
// are no deletions in any of these segments, so we
// just sum numDocs() of each segment to get total docCount
for (final IndexReader reader : readers) {
docCount += reader.numDocs();
}
return docCount;
}
private int copyFieldsWithDeletions(final FieldsWriter fieldsWriter, final IndexReader reader,
final FieldsReader matchingFieldsReader)
throws IOException, MergeAbortedException, CorruptIndexException {
int docCount = 0;
final int maxDoc = reader.maxDoc();
if (matchingFieldsReader != null) {
// We can bulk-copy because the fieldInfos are "congruent"
for (int j = 0; j < maxDoc;) {
if (reader.isDeleted(j)) {
// skip deleted docs
++j;
continue;
}
// We can optimize this case (doing a bulk byte copy) since the field
// numbers are identical
int start = j, numDocs = 0;
do {
j++;
numDocs++;
if (j >= maxDoc) break;
if (reader.isDeleted(j)) {
j++;
break;
}
} while(numDocs < MAX_RAW_MERGE_DOCS);
IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, start, numDocs);
fieldsWriter.addRawDocuments(stream, rawDocLengths, numDocs);
docCount += numDocs;
checkAbort.work(300 * numDocs);
}
} else {
for (int j = 0; j < maxDoc; j++) {
if (reader.isDeleted(j)) {
// skip deleted docs
continue;
}
// NOTE: it's very important to first assign to doc then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
Document doc = reader.document(j);
fieldsWriter.addDocument(doc);
docCount++;
checkAbort.work(300);
}
}
return docCount;
}
private int copyFieldsNoDeletions(final FieldsWriter fieldsWriter, final IndexReader reader,
final FieldsReader matchingFieldsReader)
throws IOException, MergeAbortedException, CorruptIndexException {
final int maxDoc = reader.maxDoc();
int docCount = 0;
if (matchingFieldsReader != null) {
// We can bulk-copy because the fieldInfos are "congruent"
while (docCount < maxDoc) {
int len = Math.min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
IndexInput stream = matchingFieldsReader.rawDocs(rawDocLengths, docCount, len);
fieldsWriter.addRawDocuments(stream, rawDocLengths, len);
docCount += len;
checkAbort.work(300 * len);
}
} else {
for (; docCount < maxDoc; docCount++) {
// NOTE: it's very important to first assign to doc then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
Document doc = reader.document(docCount);
fieldsWriter.addDocument(doc);
checkAbort.work(300);
}
}
return docCount;
}
/**
* Merge the TermVectors from each of the segments into the new one.
* @throws IOException
*/
private final void mergeVectors() throws IOException {
TermVectorsWriter termVectorsWriter =
new TermVectorsWriter(directory, segment, fieldInfos);
try {
int idx = 0;
for (final IndexReader reader : readers) {
final SegmentReader matchingSegmentReader = matchingSegmentReaders[idx++];
TermVectorsReader matchingVectorsReader = null;
if (matchingSegmentReader != null) {
TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReaderOrig();
// If the TV* files are an older format then they cannot read raw docs:
if (vectorsReader != null && vectorsReader.canReadRawDocs()) {
matchingVectorsReader = vectorsReader;
}
}
if (reader.hasDeletions()) {
copyVectorsWithDeletions(termVectorsWriter, matchingVectorsReader, reader);
} else {
copyVectorsNoDeletions(termVectorsWriter, matchingVectorsReader, reader);
}
}
} finally {
termVectorsWriter.close();
}
final String fileName = segment + "." + IndexFileNames.VECTORS_INDEX_EXTENSION;
final long tvxSize = directory.fileLength(fileName);
if (4+((long) mergedDocs)*16 != tvxSize)
// This is most likely a bug in Sun JRE 1.6.0_04/_05;
// we detect that the bug has struck, here, and
// throw an exception to prevent the corruption from
// entering the index. See LUCENE-1282 for
// details.
throw new RuntimeException("mergeVectors produced an invalid result: mergedDocs is " + mergedDocs + " but tvx size is " + tvxSize + " file=" + fileName + " file exists?=" + directory.fileExists(fileName) + "; now aborting this merge to prevent index corruption");
}
private void copyVectorsWithDeletions(final TermVectorsWriter termVectorsWriter,
final TermVectorsReader matchingVectorsReader,
final IndexReader reader)
throws IOException, MergeAbortedException {
final int maxDoc = reader.maxDoc();
if (matchingVectorsReader != null) {
// We can bulk-copy because the fieldInfos are "congruent"
for (int docNum = 0; docNum < maxDoc;) {
if (reader.isDeleted(docNum)) {
// skip deleted docs
++docNum;
continue;
}
// We can optimize this case (doing a bulk byte copy) since the field
// numbers are identical
int start = docNum, numDocs = 0;
do {
docNum++;
numDocs++;
if (docNum >= maxDoc) break;
if (reader.isDeleted(docNum)) {
docNum++;
break;
}
} while(numDocs < MAX_RAW_MERGE_DOCS);
matchingVectorsReader.rawDocs(rawDocLengths, rawDocLengths2, start, numDocs);
termVectorsWriter.addRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, numDocs);
checkAbort.work(300 * numDocs);
}
} else {
for (int docNum = 0; docNum < maxDoc; docNum++) {
if (reader.isDeleted(docNum)) {
// skip deleted docs
continue;
}
// NOTE: it's very important to first assign to vectors then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
TermFreqVector[] vectors = reader.getTermFreqVectors(docNum);
termVectorsWriter.addAllDocVectors(vectors);
checkAbort.work(300);
}
}
}
private void copyVectorsNoDeletions(final TermVectorsWriter termVectorsWriter,
final TermVectorsReader matchingVectorsReader,
final IndexReader reader)
throws IOException, MergeAbortedException {
final int maxDoc = reader.maxDoc();
if (matchingVectorsReader != null) {
// We can bulk-copy because the fieldInfos are "congruent"
int docCount = 0;
while (docCount < maxDoc) {
int len = Math.min(MAX_RAW_MERGE_DOCS, maxDoc - docCount);
matchingVectorsReader.rawDocs(rawDocLengths, rawDocLengths2, docCount, len);
termVectorsWriter.addRawDocuments(matchingVectorsReader, rawDocLengths, rawDocLengths2, len);
docCount += len;
checkAbort.work(300 * len);
}
} else {
for (int docNum = 0; docNum < maxDoc; docNum++) {
// NOTE: it's very important to first assign to vectors then pass it to
// termVectorsWriter.addAllDocVectors; see LUCENE-1282
TermFreqVector[] vectors = reader.getTermFreqVectors(docNum);
termVectorsWriter.addAllDocVectors(vectors);
checkAbort.work(300);
}
}
}
private SegmentMergeQueue queue = null;
private final void mergeTerms() throws CorruptIndexException, IOException {
SegmentWriteState state = new SegmentWriteState(null, directory, segment, null, mergedDocs, 0, termIndexInterval);
final FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
try {
queue = new SegmentMergeQueue(readers.size());
mergeTermInfos(consumer);
} finally {
consumer.finish();
if (queue != null) queue.close();
}
}
boolean omitTermFreqAndPositions;
private final void mergeTermInfos(final FormatPostingsFieldsConsumer consumer) throws CorruptIndexException, IOException {
int base = 0;
final int readerCount = readers.size();
for (int i = 0; i < readerCount; i++) {
IndexReader reader = readers.get(i);
TermEnum termEnum = reader.terms();
SegmentMergeInfo smi = new SegmentMergeInfo(base, termEnum, reader);
int[] docMap = smi.getDocMap();
if (docMap != null) {
if (docMaps == null) {
docMaps = new int[readerCount][];
delCounts = new int[readerCount];
}
docMaps[i] = docMap;
delCounts[i] = smi.reader.maxDoc() - smi.reader.numDocs();
}
base += reader.numDocs();
assert reader.numDocs() == reader.maxDoc() - smi.delCount;
if (smi.next())
queue.add(smi); // initialize queue
else
smi.close();
}
SegmentMergeInfo[] match = new SegmentMergeInfo[readers.size()];
String currentField = null;
FormatPostingsTermsConsumer termsConsumer = null;
while (queue.size() > 0) {
int matchSize = 0; // pop matching terms
match[matchSize++] = queue.pop();
Term term = match[0].term;
SegmentMergeInfo top = queue.top();
while (top != null && term.compareTo(top.term) == 0) {
match[matchSize++] = queue.pop();
top = queue.top();
}
if (currentField != term.field) {
currentField = term.field;
if (termsConsumer != null)
termsConsumer.finish();
final FieldInfo fieldInfo = fieldInfos.fieldInfo(currentField);
termsConsumer = consumer.addField(fieldInfo);
omitTermFreqAndPositions = fieldInfo.omitTermFreqAndPositions;
}
int df = appendPostings(termsConsumer, match, matchSize); // add new TermInfo
checkAbort.work(df/3.0);
while (matchSize > 0) {
SegmentMergeInfo smi = match[--matchSize];
if (smi.next())
queue.add(smi); // restore queue
else
smi.close(); // done with a segment
}
}
}
private byte[] payloadBuffer;
private int[][] docMaps;
int[][] getDocMaps() {
return docMaps;
}
private int[] delCounts;
int[] getDelCounts() {
return delCounts;
}
/** Process postings from multiple segments all positioned on the
* same term. Writes out merged entries into freqOutput and
* the proxOutput streams.
*
* @param smis array of segments
* @param n number of cells in the array actually occupied
* @return number of documents across all segments where this term was found
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
*/
private final int appendPostings(final FormatPostingsTermsConsumer termsConsumer, SegmentMergeInfo[] smis, int n)
throws CorruptIndexException, IOException {
final FormatPostingsDocsConsumer docConsumer = termsConsumer.addTerm(smis[0].term.text);
int df = 0;
for (int i = 0; i < n; i++) {
SegmentMergeInfo smi = smis[i];
TermPositions postings = smi.getPositions();
assert postings != null;
int base = smi.base;
int[] docMap = smi.getDocMap();
postings.seek(smi.termEnum);
while (postings.next()) {
df++;
int doc = postings.doc();
if (docMap != null)
doc = docMap[doc]; // map around deletions
doc += base; // convert to merged space
final int freq = postings.freq();
final FormatPostingsPositionsConsumer posConsumer = docConsumer.addDoc(doc, freq);
if (!omitTermFreqAndPositions) {
for (int j = 0; j < freq; j++) {
final int position = postings.nextPosition();
final int payloadLength = postings.getPayloadLength();
if (payloadLength > 0) {
if (payloadBuffer == null || payloadBuffer.length < payloadLength)
payloadBuffer = new byte[payloadLength];
postings.getPayload(payloadBuffer, 0);
}
posConsumer.addPosition(position, payloadBuffer, 0, payloadLength);
}
posConsumer.finish();
}
}
}
docConsumer.finish();
return df;
}
private void mergeNorms() throws IOException {
byte[] normBuffer = null;
IndexOutput output = null;
try {
int numFieldInfos = fieldInfos.size();
for (int i = 0; i < numFieldInfos; i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
if (fi.isIndexed && !fi.omitNorms) {
if (output == null) {
output = directory.createOutput(segment + "." + IndexFileNames.NORMS_EXTENSION);
output.writeBytes(NORMS_HEADER,NORMS_HEADER.length);
}
for ( IndexReader reader : readers) {
int maxDoc = reader.maxDoc();
if (normBuffer == null || normBuffer.length < maxDoc) {
// the buffer is too small for the current segment
normBuffer = new byte[maxDoc];
}
reader.norms(fi.name, normBuffer, 0);
if (!reader.hasDeletions()) {
//optimized case for segments without deleted docs
output.writeBytes(normBuffer, maxDoc);
} else {
// this segment has deleted docs, so we have to
// check for every doc if it is deleted or not
for (int k = 0; k < maxDoc; k++) {
if (!reader.isDeleted(k)) {
output.writeByte(normBuffer[k]);
}
}
}
checkAbort.work(maxDoc);
}
}
}
} finally {
if (output != null) {
output.close();
}
}
}
static class CheckAbort {
private double workCount;
private MergePolicy.OneMerge merge;
private Directory dir;
public CheckAbort(MergePolicy.OneMerge merge, Directory dir) {
this.merge = merge;
this.dir = dir;
}
/**
* Records the fact that roughly units amount of work
* have been done since this method was last called.
* When adding time-consuming code into SegmentMerger,
* you should test different values for units to ensure
* that the time in between calls to merge.checkAborted
* is up to ~ 1 second.
*/
public void work(double units) throws MergePolicy.MergeAbortedException {
workCount += units;
if (workCount >= 10000.0) {
merge.checkAborted(dir);
workCount = 0;
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentMerger.java | Java | art | 27,968 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.IndexInput;
final class SegmentTermEnum extends TermEnum implements Cloneable {
private IndexInput input;
FieldInfos fieldInfos;
long size;
long position = -1;
private TermBuffer termBuffer = new TermBuffer();
private TermBuffer prevBuffer = new TermBuffer();
private TermBuffer scanBuffer = new TermBuffer(); // used for scanning
private TermInfo termInfo = new TermInfo();
private int format;
private boolean isIndex = false;
long indexPointer = 0;
int indexInterval;
int skipInterval;
int maxSkipLevels;
private int formatM1SkipInterval;
SegmentTermEnum(IndexInput i, FieldInfos fis, boolean isi)
throws CorruptIndexException, IOException {
input = i;
fieldInfos = fis;
isIndex = isi;
maxSkipLevels = 1; // use single-level skip lists for formats > -3
int firstInt = input.readInt();
if (firstInt >= 0) {
// original-format file, without explicit format version number
format = 0;
size = firstInt;
// back-compatible settings
indexInterval = 128;
skipInterval = Integer.MAX_VALUE; // switch off skipTo optimization
} else {
// we have a format version number
format = firstInt;
// check that it is a format we can understand
if (format < TermInfosWriter.FORMAT_CURRENT)
throw new CorruptIndexException("Unknown format version:" + format + " expected " + TermInfosWriter.FORMAT_CURRENT + " or higher");
size = input.readLong(); // read the size
if(format == -1){
if (!isIndex) {
indexInterval = input.readInt();
formatM1SkipInterval = input.readInt();
}
// switch off skipTo optimization for file format prior to 1.4rc2 in order to avoid a bug in
// skipTo implementation of these versions
skipInterval = Integer.MAX_VALUE;
} else {
indexInterval = input.readInt();
skipInterval = input.readInt();
if (format <= TermInfosWriter.FORMAT) {
// this new format introduces multi-level skipping
maxSkipLevels = input.readInt();
}
}
assert indexInterval > 0: "indexInterval=" + indexInterval + " is negative; must be > 0";
assert skipInterval > 0: "skipInterval=" + skipInterval + " is negative; must be > 0";
}
if (format > TermInfosWriter.FORMAT_VERSION_UTF8_LENGTH_IN_BYTES) {
termBuffer.setPreUTF8Strings();
scanBuffer.setPreUTF8Strings();
prevBuffer.setPreUTF8Strings();
}
}
@Override
protected Object clone() {
SegmentTermEnum clone = null;
try {
clone = (SegmentTermEnum) super.clone();
} catch (CloneNotSupportedException e) {}
clone.input = (IndexInput) input.clone();
clone.termInfo = new TermInfo(termInfo);
clone.termBuffer = (TermBuffer)termBuffer.clone();
clone.prevBuffer = (TermBuffer)prevBuffer.clone();
clone.scanBuffer = new TermBuffer();
return clone;
}
final void seek(long pointer, long p, Term t, TermInfo ti)
throws IOException {
input.seek(pointer);
position = p;
termBuffer.set(t);
prevBuffer.reset();
termInfo.set(ti);
}
/** Increments the enumeration to the next element. True if one exists.*/
@Override
public final boolean next() throws IOException {
if (position++ >= size - 1) {
prevBuffer.set(termBuffer);
termBuffer.reset();
return false;
}
prevBuffer.set(termBuffer);
termBuffer.read(input, fieldInfos);
termInfo.docFreq = input.readVInt(); // read doc freq
termInfo.freqPointer += input.readVLong(); // read freq pointer
termInfo.proxPointer += input.readVLong(); // read prox pointer
if(format == -1){
// just read skipOffset in order to increment file pointer;
// value is never used since skipTo is switched off
if (!isIndex) {
if (termInfo.docFreq > formatM1SkipInterval) {
termInfo.skipOffset = input.readVInt();
}
}
}
else{
if (termInfo.docFreq >= skipInterval)
termInfo.skipOffset = input.readVInt();
}
if (isIndex)
indexPointer += input.readVLong(); // read index pointer
return true;
}
/** Optimized scan, without allocating new terms.
* Return number of invocations to next(). */
final int scanTo(Term term) throws IOException {
scanBuffer.set(term);
int count = 0;
while (scanBuffer.compareTo(termBuffer) > 0 && next()) {
count++;
}
return count;
}
/** Returns the current Term in the enumeration.
Initially invalid, valid after next() called for the first time.*/
@Override
public final Term term() {
return termBuffer.toTerm();
}
/** Returns the previous Term enumerated. Initially null.*/
final Term prev() {
return prevBuffer.toTerm();
}
/** Returns the current TermInfo in the enumeration.
Initially invalid, valid after next() called for the first time.*/
final TermInfo termInfo() {
return new TermInfo(termInfo);
}
/** Sets the argument to the current TermInfo in the enumeration.
Initially invalid, valid after next() called for the first time.*/
final void termInfo(TermInfo ti) {
ti.set(termInfo);
}
/** Returns the docFreq from the current TermInfo in the enumeration.
Initially invalid, valid after next() called for the first time.*/
@Override
public final int docFreq() {
return termInfo.docFreq;
}
/* Returns the freqPointer from the current TermInfo in the enumeration.
Initially invalid, valid after next() called for the first time.*/
final long freqPointer() {
return termInfo.freqPointer;
}
/* Returns the proxPointer from the current TermInfo in the enumeration.
Initially invalid, valid after next() called for the first time.*/
final long proxPointer() {
return termInfo.proxPointer;
}
/** Closes the enumeration to further activity, freeing resources. */
@Override
public final void close() throws IOException {
input.close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentTermEnum.java | Java | art | 6,965 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.PrintStream;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.HashSet;
import java.util.List;
import java.util.Map.Entry;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.search.Weight;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMFile;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.ThreadInterruptedException;
/**
* This class accepts multiple added documents and directly
* writes a single segment file. It does this more
* efficiently than creating a single segment per document
* (with DocumentWriter) and doing standard merges on those
* segments.
*
* Each added document is passed to the {@link DocConsumer},
* which in turn processes the document and interacts with
* other consumers in the indexing chain. Certain
* consumers, like {@link StoredFieldsWriter} and {@link
* TermVectorsTermsWriter}, digest a document and
* immediately write bytes to the "doc store" files (ie,
* they do not consume RAM per document, except while they
* are processing the document).
*
* Other consumers, eg {@link FreqProxTermsWriter} and
* {@link NormsWriter}, buffer bytes in RAM and flush only
* when a new segment is produced.
* Once we have used our allowed RAM buffer, or the number
* of added docs is large enough (in the case we are
* flushing by doc count instead of RAM usage), we create a
* real segment and flush it to the Directory.
*
* Threads:
*
* Multiple threads are allowed into addDocument at once.
* There is an initial synchronized call to getThreadState
* which allocates a ThreadState for this thread. The same
* thread will get the same ThreadState over time (thread
* affinity) so that if there are consistent patterns (for
* example each thread is indexing a different content
* source) then we make better use of RAM. Then
* processDocument is called on that ThreadState without
* synchronization (most of the "heavy lifting" is in this
* call). Finally the synchronized "finishDocument" is
* called to flush changes to the directory.
*
* When flush is called by IndexWriter we forcefully idle
* all threads and flush only once they are all idle. This
* means you can call flush with a given thread even while
* other threads are actively adding/deleting documents.
*
*
* Exceptions:
*
* Because this class directly updates in-memory posting
* lists, and flushes stored fields and term vectors
* directly to files in the directory, there are certain
* limited times when an exception can corrupt this state.
* For example, a disk full while flushing stored fields
* leaves this file in a corrupt state. Or, an OOM
* exception while appending to the in-memory posting lists
* can corrupt that posting list. We call such exceptions
* "aborting exceptions". In these cases we must call
* abort() to discard all docs added since the last flush.
*
* All other exceptions ("non-aborting exceptions") can
* still partially update the index structures. These
* updates are consistent, but, they represent only a part
* of the document seen up until the exception was hit.
* When this happens, we immediately mark the document as
* deleted so that the document is always atomically ("all
* or none") added to the index.
*/
final class DocumentsWriter {
IndexWriter writer;
Directory directory;
String segment; // Current segment we are working on
private String docStoreSegment; // Current doc-store segment we are writing
private int docStoreOffset; // Current starting doc-store offset of current segment
private int nextDocID; // Next docID to be added
private int numDocsInRAM; // # docs buffered in RAM
int numDocsInStore; // # docs written to doc stores
// Max # ThreadState instances; if there are more threads
// than this they share ThreadStates
private final static int MAX_THREAD_STATE = 5;
private DocumentsWriterThreadState[] threadStates = new DocumentsWriterThreadState[0];
private final HashMap<Thread,DocumentsWriterThreadState> threadBindings = new HashMap<Thread,DocumentsWriterThreadState>();
private int pauseThreads; // Non-zero when we need all threads to
// pause (eg to flush)
boolean flushPending; // True when a thread has decided to flush
boolean bufferIsFull; // True when it's time to write segment
private boolean aborting; // True if an abort is pending
private DocFieldProcessor docFieldProcessor;
PrintStream infoStream;
int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
Similarity similarity;
List<String> newFiles;
static class DocState {
DocumentsWriter docWriter;
Analyzer analyzer;
int maxFieldLength;
PrintStream infoStream;
Similarity similarity;
int docID;
Document doc;
String maxTermPrefix;
// Only called by asserts
public boolean testPoint(String name) {
return docWriter.writer.testPoint(name);
}
public void clear() {
// don't hold onto doc nor analyzer, in case it is
// largish:
doc = null;
analyzer = null;
}
}
/** Consumer returns this on each doc. This holds any
* state that must be flushed synchronized "in docID
* order". We gather these and flush them in order. */
abstract static class DocWriter {
DocWriter next;
int docID;
abstract void finish() throws IOException;
abstract void abort();
abstract long sizeInBytes();
void setNext(DocWriter next) {
this.next = next;
}
}
/**
* Create and return a new DocWriterBuffer.
*/
PerDocBuffer newPerDocBuffer() {
return new PerDocBuffer();
}
/**
* RAMFile buffer for DocWriters.
*/
class PerDocBuffer extends RAMFile {
/**
* Allocate bytes used from shared pool.
*/
protected byte[] newBuffer(int size) {
assert size == PER_DOC_BLOCK_SIZE;
return perDocAllocator.getByteBlock(false);
}
/**
* Recycle the bytes used.
*/
synchronized void recycle() {
if (buffers.size() > 0) {
setLength(0);
// Recycle the blocks
perDocAllocator.recycleByteBlocks(buffers);
buffers.clear();
sizeInBytes = 0;
assert numBuffers() == 0;
}
}
}
/**
* The IndexingChain must define the {@link #getChain(DocumentsWriter)} method
* which returns the DocConsumer that the DocumentsWriter calls to process the
* documents.
*/
abstract static class IndexingChain {
abstract DocConsumer getChain(DocumentsWriter documentsWriter);
}
static final IndexingChain DefaultIndexingChain = new IndexingChain() {
@Override
DocConsumer getChain(DocumentsWriter documentsWriter) {
/*
This is the current indexing chain:
DocConsumer / DocConsumerPerThread
--> code: DocFieldProcessor / DocFieldProcessorPerThread
--> DocFieldConsumer / DocFieldConsumerPerThread / DocFieldConsumerPerField
--> code: DocFieldConsumers / DocFieldConsumersPerThread / DocFieldConsumersPerField
--> code: DocInverter / DocInverterPerThread / DocInverterPerField
--> InvertedDocConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: TermsHash / TermsHashPerThread / TermsHashPerField
--> TermsHashConsumer / TermsHashConsumerPerThread / TermsHashConsumerPerField
--> code: FreqProxTermsWriter / FreqProxTermsWriterPerThread / FreqProxTermsWriterPerField
--> code: TermVectorsTermsWriter / TermVectorsTermsWriterPerThread / TermVectorsTermsWriterPerField
--> InvertedDocEndConsumer / InvertedDocConsumerPerThread / InvertedDocConsumerPerField
--> code: NormsWriter / NormsWriterPerThread / NormsWriterPerField
--> code: StoredFieldsWriter / StoredFieldsWriterPerThread / StoredFieldsWriterPerField
*/
// Build up indexing chain:
final TermsHashConsumer termVectorsWriter = new TermVectorsTermsWriter(documentsWriter);
final TermsHashConsumer freqProxWriter = new FreqProxTermsWriter();
final InvertedDocConsumer termsHash = new TermsHash(documentsWriter, true, freqProxWriter,
new TermsHash(documentsWriter, false, termVectorsWriter, null));
final NormsWriter normsWriter = new NormsWriter();
final DocInverter docInverter = new DocInverter(termsHash, normsWriter);
return new DocFieldProcessor(documentsWriter, docInverter);
}
};
final DocConsumer consumer;
// Deletes done after the last flush; these are discarded
// on abort
private BufferedDeletes deletesInRAM = new BufferedDeletes(false);
// Deletes done before the last flush; these are still
// kept on abort
private BufferedDeletes deletesFlushed = new BufferedDeletes(true);
// The max number of delete terms that can be buffered before
// they must be flushed to disk.
private int maxBufferedDeleteTerms = IndexWriter.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
// How much RAM we can use before flushing. This is 0 if
// we are flushing by doc count instead.
private long ramBufferSize = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
// If we've allocated 5% over our RAM budget, we then
// free down to 95%
private long freeTrigger = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*1.05);
private long freeLevel = (long) (IndexWriter.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95);
// Flush @ this number of docs. If ramBufferSize is
// non-zero we will flush by RAM usage instead.
private int maxBufferedDocs = IndexWriter.DEFAULT_MAX_BUFFERED_DOCS;
private int flushedDocCount; // How many docs already flushed to index
synchronized void updateFlushedDocCount(int n) {
flushedDocCount += n;
}
synchronized int getFlushedDocCount() {
return flushedDocCount;
}
synchronized void setFlushedDocCount(int n) {
flushedDocCount = n;
}
private boolean closed;
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain) throws IOException {
this.directory = directory;
this.writer = writer;
this.similarity = writer.getSimilarity();
flushedDocCount = writer.maxDoc();
consumer = indexingChain.getChain(this);
if (consumer instanceof DocFieldProcessor) {
docFieldProcessor = (DocFieldProcessor) consumer;
}
}
/** Returns true if any of the fields in the current
* buffered docs have omitTermFreqAndPositions==false */
boolean hasProx() {
return (docFieldProcessor != null) ? docFieldProcessor.fieldInfos.hasProx()
: true;
}
/** If non-null, various details of indexing are printed
* here. */
synchronized void setInfoStream(PrintStream infoStream) {
this.infoStream = infoStream;
for(int i=0;i<threadStates.length;i++)
threadStates[i].docState.infoStream = infoStream;
}
synchronized void setMaxFieldLength(int maxFieldLength) {
this.maxFieldLength = maxFieldLength;
for(int i=0;i<threadStates.length;i++)
threadStates[i].docState.maxFieldLength = maxFieldLength;
}
synchronized void setSimilarity(Similarity similarity) {
this.similarity = similarity;
for(int i=0;i<threadStates.length;i++)
threadStates[i].docState.similarity = similarity;
}
/** Set how much RAM we can use before flushing. */
synchronized void setRAMBufferSizeMB(double mb) {
if (mb == IndexWriter.DISABLE_AUTO_FLUSH) {
ramBufferSize = IndexWriter.DISABLE_AUTO_FLUSH;
waitQueuePauseBytes = 4*1024*1024;
waitQueueResumeBytes = 2*1024*1024;
} else {
ramBufferSize = (long) (mb*1024*1024);
waitQueuePauseBytes = (long) (ramBufferSize*0.1);
waitQueueResumeBytes = (long) (ramBufferSize*0.05);
freeTrigger = (long) (1.05 * ramBufferSize);
freeLevel = (long) (0.95 * ramBufferSize);
}
}
synchronized double getRAMBufferSizeMB() {
if (ramBufferSize == IndexWriter.DISABLE_AUTO_FLUSH) {
return ramBufferSize;
} else {
return ramBufferSize/1024./1024.;
}
}
/** Set max buffered docs, which means we will flush by
* doc count instead of by RAM usage. */
void setMaxBufferedDocs(int count) {
maxBufferedDocs = count;
}
int getMaxBufferedDocs() {
return maxBufferedDocs;
}
/** Get current segment name we are writing. */
String getSegment() {
return segment;
}
/** Returns how many docs are currently buffered in RAM. */
int getNumDocsInRAM() {
return numDocsInRAM;
}
/** Returns the current doc store segment we are writing
* to. */
synchronized String getDocStoreSegment() {
return docStoreSegment;
}
/** Returns the doc offset into the shared doc store for
* the current buffered docs. */
int getDocStoreOffset() {
return docStoreOffset;
}
/** Closes the current open doc stores an returns the doc
* store segment name. This returns null if there are *
* no buffered documents. */
synchronized String closeDocStore() throws IOException {
assert allThreadsIdle();
if (infoStream != null)
message("closeDocStore: " + openFiles.size() + " files to flush to segment " + docStoreSegment + " numDocs=" + numDocsInStore);
boolean success = false;
try {
initFlushState(true);
closedFiles.clear();
consumer.closeDocStore(flushState);
assert 0 == openFiles.size();
String s = docStoreSegment;
docStoreSegment = null;
docStoreOffset = 0;
numDocsInStore = 0;
success = true;
return s;
} finally {
if (!success) {
abort();
}
}
}
private Collection<String> abortedFiles; // List of files that were written before last abort()
private SegmentWriteState flushState;
Collection<String> abortedFiles() {
return abortedFiles;
}
void message(String message) {
if (infoStream != null)
writer.message("DW: " + message);
}
final List<String> openFiles = new ArrayList<String>();
final List<String> closedFiles = new ArrayList<String>();
/* Returns Collection of files in use by this instance,
* including any flushed segments. */
@SuppressWarnings("unchecked")
synchronized List<String> openFiles() {
return (List<String>) ((ArrayList<String>) openFiles).clone();
}
@SuppressWarnings("unchecked")
synchronized List<String> closedFiles() {
return (List<String>) ((ArrayList<String>) closedFiles).clone();
}
synchronized void addOpenFile(String name) {
assert !openFiles.contains(name);
openFiles.add(name);
}
synchronized void removeOpenFile(String name) {
assert openFiles.contains(name);
openFiles.remove(name);
closedFiles.add(name);
}
synchronized void setAborting() {
aborting = true;
}
/** Called if we hit an exception at a bad time (when
* updating the index files) and must discard all
* currently buffered docs. This resets our state,
* discarding any docs added since last flush. */
synchronized void abort() throws IOException {
try {
if (infoStream != null) {
message("docWriter: now abort");
}
// Forcefully remove waiting ThreadStates from line
waitQueue.abort();
// Wait for all other threads to finish with
// DocumentsWriter:
pauseAllThreads();
try {
assert 0 == waitQueue.numWaiting;
waitQueue.waitingBytes = 0;
try {
abortedFiles = openFiles();
} catch (Throwable t) {
abortedFiles = null;
}
deletesInRAM.clear();
openFiles.clear();
for(int i=0;i<threadStates.length;i++)
try {
threadStates[i].consumer.abort();
} catch (Throwable t) {
}
try {
consumer.abort();
} catch (Throwable t) {
}
docStoreSegment = null;
numDocsInStore = 0;
docStoreOffset = 0;
// Reset all postings data
doAfterFlush();
} finally {
resumeAllThreads();
}
} finally {
aborting = false;
notifyAll();
if (infoStream != null) {
message("docWriter: done abort");
}
}
}
/** Reset after a flush */
private void doAfterFlush() throws IOException {
// All ThreadStates should be idle when we are called
assert allThreadsIdle();
threadBindings.clear();
waitQueue.reset();
segment = null;
numDocsInRAM = 0;
nextDocID = 0;
bufferIsFull = false;
flushPending = false;
for(int i=0;i<threadStates.length;i++)
threadStates[i].doAfterFlush();
numBytesUsed = 0;
}
// Returns true if an abort is in progress
synchronized boolean pauseAllThreads() {
pauseThreads++;
while(!allThreadsIdle()) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
return aborting;
}
synchronized void resumeAllThreads() {
pauseThreads--;
assert pauseThreads >= 0;
if (0 == pauseThreads)
notifyAll();
}
private synchronized boolean allThreadsIdle() {
for(int i=0;i<threadStates.length;i++)
if (!threadStates[i].isIdle)
return false;
return true;
}
synchronized boolean anyChanges() {
return numDocsInRAM != 0 ||
deletesInRAM.numTerms != 0 ||
deletesInRAM.docIDs.size() != 0 ||
deletesInRAM.queries.size() != 0;
}
synchronized private void initFlushState(boolean onlyDocStore) {
initSegmentName(onlyDocStore);
flushState = new SegmentWriteState(this, directory, segment, docStoreSegment, numDocsInRAM, numDocsInStore, writer.getTermIndexInterval());
}
/** Flush all pending docs to a new segment */
synchronized int flush(boolean closeDocStore) throws IOException {
assert allThreadsIdle();
assert numDocsInRAM > 0;
assert nextDocID == numDocsInRAM;
assert waitQueue.numWaiting == 0;
assert waitQueue.waitingBytes == 0;
initFlushState(false);
docStoreOffset = numDocsInStore;
if (infoStream != null)
message("flush postings as segment " + flushState.segmentName + " numDocs=" + numDocsInRAM);
boolean success = false;
try {
if (closeDocStore) {
assert flushState.docStoreSegmentName != null;
assert flushState.docStoreSegmentName.equals(flushState.segmentName);
closeDocStore();
flushState.numDocsInStore = 0;
}
Collection<DocConsumerPerThread> threads = new HashSet<DocConsumerPerThread>();
for(int i=0;i<threadStates.length;i++)
threads.add(threadStates[i].consumer);
consumer.flush(threads, flushState);
if (infoStream != null) {
SegmentInfo si = new SegmentInfo(flushState.segmentName, flushState.numDocs, directory);
final long newSegmentSize = si.sizeInBytes();
String message = " oldRAMSize=" + numBytesUsed +
" newFlushedSize=" + newSegmentSize +
" docs/MB=" + nf.format(numDocsInRAM/(newSegmentSize/1024./1024.)) +
" new/old=" + nf.format(100.0*newSegmentSize/numBytesUsed) + "%";
message(message);
}
flushedDocCount += flushState.numDocs;
doAfterFlush();
success = true;
} finally {
if (!success) {
abort();
}
}
assert waitQueue.waitingBytes == 0;
return flushState.numDocs;
}
/** Build compound file for the segment we just flushed */
void createCompoundFile(String segment) throws IOException {
CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, segment + "." + IndexFileNames.COMPOUND_FILE_EXTENSION);
for (final String flushedFile : flushState.flushedFiles)
cfsWriter.addFile(flushedFile);
// Perform the merge
cfsWriter.close();
}
/** Set flushPending if it is not already set and returns
* whether it was set. This is used by IndexWriter to
* trigger a single flush even when multiple threads are
* trying to do so. */
synchronized boolean setFlushPending() {
if (flushPending)
return false;
else {
flushPending = true;
return true;
}
}
synchronized void clearFlushPending() {
flushPending = false;
}
synchronized void pushDeletes() {
deletesFlushed.update(deletesInRAM);
}
synchronized void close() {
closed = true;
notifyAll();
}
synchronized void initSegmentName(boolean onlyDocStore) {
if (segment == null && (!onlyDocStore || docStoreSegment == null)) {
segment = writer.newSegmentName();
assert numDocsInRAM == 0;
}
if (docStoreSegment == null) {
docStoreSegment = segment;
assert numDocsInStore == 0;
}
}
/** Returns a free (idle) ThreadState that may be used for
* indexing this one document. This call also pauses if a
* flush is pending. If delTerm is non-null then we
* buffer this deleted term after the thread state has
* been acquired. */
synchronized DocumentsWriterThreadState getThreadState(Document doc, Term delTerm) throws IOException {
// First, find a thread state. If this thread already
// has affinity to a specific ThreadState, use that one
// again.
DocumentsWriterThreadState state = threadBindings.get(Thread.currentThread());
if (state == null) {
// First time this thread has called us since last
// flush. Find the least loaded thread state:
DocumentsWriterThreadState minThreadState = null;
for(int i=0;i<threadStates.length;i++) {
DocumentsWriterThreadState ts = threadStates[i];
if (minThreadState == null || ts.numThreads < minThreadState.numThreads)
minThreadState = ts;
}
if (minThreadState != null && (minThreadState.numThreads == 0 || threadStates.length >= MAX_THREAD_STATE)) {
state = minThreadState;
state.numThreads++;
} else {
// Just create a new "private" thread state
DocumentsWriterThreadState[] newArray = new DocumentsWriterThreadState[1+threadStates.length];
if (threadStates.length > 0)
System.arraycopy(threadStates, 0, newArray, 0, threadStates.length);
state = newArray[threadStates.length] = new DocumentsWriterThreadState(this);
threadStates = newArray;
}
threadBindings.put(Thread.currentThread(), state);
}
// Next, wait until my thread state is idle (in case
// it's shared with other threads) and for threads to
// not be paused nor a flush pending:
waitReady(state);
// Allocate segment name if this is the first doc since
// last flush:
initSegmentName(false);
state.isIdle = false;
boolean success = false;
try {
state.docState.docID = nextDocID;
assert writer.testPoint("DocumentsWriter.ThreadState.init start");
if (delTerm != null) {
addDeleteTerm(delTerm, state.docState.docID);
state.doFlushAfter = timeToFlushDeletes();
}
assert writer.testPoint("DocumentsWriter.ThreadState.init after delTerm");
nextDocID++;
numDocsInRAM++;
// We must at this point commit to flushing to ensure we
// always get N docs when we flush by doc count, even if
// > 1 thread is adding documents:
if (!flushPending &&
maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH
&& numDocsInRAM >= maxBufferedDocs) {
flushPending = true;
state.doFlushAfter = true;
}
success = true;
} finally {
if (!success) {
// Forcefully idle this ThreadState:
state.isIdle = true;
notifyAll();
if (state.doFlushAfter) {
state.doFlushAfter = false;
flushPending = false;
}
}
}
return state;
}
/** Returns true if the caller (IndexWriter) should now
* flush. */
boolean addDocument(Document doc, Analyzer analyzer)
throws CorruptIndexException, IOException {
return updateDocument(doc, analyzer, null);
}
boolean updateDocument(Term t, Document doc, Analyzer analyzer)
throws CorruptIndexException, IOException {
return updateDocument(doc, analyzer, t);
}
boolean updateDocument(Document doc, Analyzer analyzer, Term delTerm)
throws CorruptIndexException, IOException {
// This call is synchronized but fast
final DocumentsWriterThreadState state = getThreadState(doc, delTerm);
final DocState docState = state.docState;
docState.doc = doc;
docState.analyzer = analyzer;
boolean success = false;
try {
// This call is not synchronized and does all the
// work
final DocWriter perDoc;
try {
perDoc = state.consumer.processDocument();
} finally {
docState.clear();
}
// This call is synchronized but fast
finishDocument(state, perDoc);
success = true;
} finally {
if (!success) {
synchronized(this) {
if (aborting) {
state.isIdle = true;
notifyAll();
abort();
} else {
skipDocWriter.docID = docState.docID;
boolean success2 = false;
try {
waitQueue.add(skipDocWriter);
success2 = true;
} finally {
if (!success2) {
state.isIdle = true;
notifyAll();
abort();
return false;
}
}
state.isIdle = true;
notifyAll();
// If this thread state had decided to flush, we
// must clear it so another thread can flush
if (state.doFlushAfter) {
state.doFlushAfter = false;
flushPending = false;
notifyAll();
}
// Immediately mark this document as deleted
// since likely it was partially added. This
// keeps indexing as "all or none" (atomic) when
// adding a document:
addDeleteDocID(state.docState.docID);
}
}
}
}
return state.doFlushAfter || timeToFlushDeletes();
}
// for testing
synchronized int getNumBufferedDeleteTerms() {
return deletesInRAM.numTerms;
}
// for testing
synchronized Map<Term,BufferedDeletes.Num> getBufferedDeleteTerms() {
return deletesInRAM.terms;
}
/** Called whenever a merge has completed and the merged segments had deletions */
synchronized void remapDeletes(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergeDocCount) {
if (docMaps == null)
// The merged segments had no deletes so docIDs did not change and we have nothing to do
return;
MergeDocIDRemapper mapper = new MergeDocIDRemapper(infos, docMaps, delCounts, merge, mergeDocCount);
deletesInRAM.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
deletesFlushed.remap(mapper, infos, docMaps, delCounts, merge, mergeDocCount);
flushedDocCount -= mapper.docShift;
}
synchronized private void waitReady(DocumentsWriterThreadState state) {
while (!closed && ((state != null && !state.isIdle) || pauseThreads != 0 || flushPending || aborting)) {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
}
if (closed)
throw new AlreadyClosedException("this IndexWriter is closed");
}
synchronized boolean bufferDeleteTerms(Term[] terms) throws IOException {
waitReady(null);
for (int i = 0; i < terms.length; i++)
addDeleteTerm(terms[i], numDocsInRAM);
return timeToFlushDeletes();
}
synchronized boolean bufferDeleteTerm(Term term) throws IOException {
waitReady(null);
addDeleteTerm(term, numDocsInRAM);
return timeToFlushDeletes();
}
synchronized boolean bufferDeleteQueries(Query[] queries) throws IOException {
waitReady(null);
for (int i = 0; i < queries.length; i++)
addDeleteQuery(queries[i], numDocsInRAM);
return timeToFlushDeletes();
}
synchronized boolean bufferDeleteQuery(Query query) throws IOException {
waitReady(null);
addDeleteQuery(query, numDocsInRAM);
return timeToFlushDeletes();
}
synchronized boolean deletesFull() {
return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH &&
(deletesInRAM.bytesUsed + deletesFlushed.bytesUsed + numBytesUsed) >= ramBufferSize) ||
(maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH &&
((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
}
synchronized boolean doApplyDeletes() {
// Very similar to deletesFull(), except we don't count
// numBytesAlloc, because we are checking whether
// deletes (alone) are consuming too many resources now
// and thus should be applied. We apply deletes if RAM
// usage is > 1/2 of our allowed RAM buffer, to prevent
// too-frequent flushing of a long tail of tiny segments
// when merges (which always apply deletes) are
// infrequent.
return (ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH &&
(deletesInRAM.bytesUsed + deletesFlushed.bytesUsed) >= ramBufferSize/2) ||
(maxBufferedDeleteTerms != IndexWriter.DISABLE_AUTO_FLUSH &&
((deletesInRAM.size() + deletesFlushed.size()) >= maxBufferedDeleteTerms));
}
synchronized private boolean timeToFlushDeletes() {
return (bufferIsFull || deletesFull()) && setFlushPending();
}
void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
this.maxBufferedDeleteTerms = maxBufferedDeleteTerms;
}
int getMaxBufferedDeleteTerms() {
return maxBufferedDeleteTerms;
}
synchronized boolean hasDeletes() {
return deletesFlushed.any();
}
synchronized boolean applyDeletes(SegmentInfos infos) throws IOException {
if (!hasDeletes())
return false;
if (infoStream != null)
message("apply " + deletesFlushed.numTerms + " buffered deleted terms and " +
deletesFlushed.docIDs.size() + " deleted docIDs and " +
deletesFlushed.queries.size() + " deleted queries on " +
+ infos.size() + " segments.");
final int infosEnd = infos.size();
int docStart = 0;
boolean any = false;
for (int i = 0; i < infosEnd; i++) {
// Make sure we never attempt to apply deletes to
// segment in external dir
assert infos.info(i).dir == directory;
SegmentReader reader = writer.readerPool.get(infos.info(i), false);
try {
any |= applyDeletes(reader, docStart);
docStart += reader.maxDoc();
} finally {
writer.readerPool.release(reader);
}
}
deletesFlushed.clear();
return any;
}
// used only by assert
private Term lastDeleteTerm;
// used only by assert
private boolean checkDeleteTerm(Term term) {
if (term != null) {
assert lastDeleteTerm == null || term.compareTo(lastDeleteTerm) > 0: "lastTerm=" + lastDeleteTerm + " vs term=" + term;
}
lastDeleteTerm = term;
return true;
}
// Apply buffered delete terms, queries and docIDs to the
// provided reader
private final synchronized boolean applyDeletes(IndexReader reader, int docIDStart)
throws CorruptIndexException, IOException {
final int docEnd = docIDStart + reader.maxDoc();
boolean any = false;
assert checkDeleteTerm(null);
// Delete by term
TermDocs docs = reader.termDocs();
try {
for (Entry<Term, BufferedDeletes.Num> entry: deletesFlushed.terms.entrySet()) {
Term term = entry.getKey();
// LUCENE-2086: we should be iterating a TreeMap,
// here, so terms better be in order:
assert checkDeleteTerm(term);
docs.seek(term);
int limit = entry.getValue().getNum();
while (docs.next()) {
int docID = docs.doc();
if (docIDStart+docID >= limit)
break;
reader.deleteDocument(docID);
any = true;
}
}
} finally {
docs.close();
}
// Delete by docID
for (Integer docIdInt : deletesFlushed.docIDs) {
int docID = docIdInt.intValue();
if (docID >= docIDStart && docID < docEnd) {
reader.deleteDocument(docID-docIDStart);
any = true;
}
}
// Delete by query
IndexSearcher searcher = new IndexSearcher(reader);
for (Entry<Query, Integer> entry : deletesFlushed.queries.entrySet()) {
Query query = entry.getKey();
int limit = entry.getValue().intValue();
Weight weight = query.weight(searcher);
Scorer scorer = weight.scorer(reader, true, false);
if (scorer != null) {
while(true) {
int doc = scorer.nextDoc();
if (((long) docIDStart) + doc >= limit)
break;
reader.deleteDocument(doc);
any = true;
}
}
}
searcher.close();
return any;
}
// Buffer a term in bufferedDeleteTerms, which records the
// current number of documents buffered in ram so that the
// delete term will be applied to those documents as well
// as the disk segments.
synchronized private void addDeleteTerm(Term term, int docCount) {
BufferedDeletes.Num num = deletesInRAM.terms.get(term);
final int docIDUpto = flushedDocCount + docCount;
if (num == null)
deletesInRAM.terms.put(term, new BufferedDeletes.Num(docIDUpto));
else
num.setNum(docIDUpto);
deletesInRAM.numTerms++;
deletesInRAM.addBytesUsed(BYTES_PER_DEL_TERM + term.text.length()*CHAR_NUM_BYTE);
}
// Buffer a specific docID for deletion. Currently only
// used when we hit a exception when adding a document
synchronized private void addDeleteDocID(int docID) {
deletesInRAM.docIDs.add(Integer.valueOf(flushedDocCount+docID));
deletesInRAM.addBytesUsed(BYTES_PER_DEL_DOCID);
}
synchronized private void addDeleteQuery(Query query, int docID) {
deletesInRAM.queries.put(query, Integer.valueOf(flushedDocCount + docID));
deletesInRAM.addBytesUsed(BYTES_PER_DEL_QUERY);
}
synchronized boolean doBalanceRAM() {
return ramBufferSize != IndexWriter.DISABLE_AUTO_FLUSH && !bufferIsFull && (numBytesUsed+deletesInRAM.bytesUsed+deletesFlushed.bytesUsed >= ramBufferSize || numBytesAlloc >= freeTrigger);
}
/** Does the synchronized work to finish/flush the
* inverted document. */
private void finishDocument(DocumentsWriterThreadState perThread, DocWriter docWriter) throws IOException {
if (doBalanceRAM())
// Must call this w/o holding synchronized(this) else
// we'll hit deadlock:
balanceRAM();
synchronized(this) {
assert docWriter == null || docWriter.docID == perThread.docState.docID;
if (aborting) {
// We are currently aborting, and another thread is
// waiting for me to become idle. We just forcefully
// idle this threadState; it will be fully reset by
// abort()
if (docWriter != null)
try {
docWriter.abort();
} catch (Throwable t) {
}
perThread.isIdle = true;
notifyAll();
return;
}
final boolean doPause;
if (docWriter != null)
doPause = waitQueue.add(docWriter);
else {
skipDocWriter.docID = perThread.docState.docID;
doPause = waitQueue.add(skipDocWriter);
}
if (doPause)
waitForWaitQueue();
if (bufferIsFull && !flushPending) {
flushPending = true;
perThread.doFlushAfter = true;
}
perThread.isIdle = true;
notifyAll();
}
}
synchronized void waitForWaitQueue() {
do {
try {
wait();
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
} while (!waitQueue.doResume());
}
private static class SkipDocWriter extends DocWriter {
@Override
void finish() {
}
@Override
void abort() {
}
@Override
long sizeInBytes() {
return 0;
}
}
final SkipDocWriter skipDocWriter = new SkipDocWriter();
long getRAMUsed() {
return numBytesUsed + deletesInRAM.bytesUsed + deletesFlushed.bytesUsed;
}
long numBytesAlloc;
long numBytesUsed;
NumberFormat nf = NumberFormat.getInstance();
// Coarse estimates used to measure RAM usage of buffered deletes
final static int OBJECT_HEADER_BYTES = 8;
final static int POINTER_NUM_BYTE = Constants.JRE_IS_64BIT ? 8 : 4;
final static int INT_NUM_BYTE = 4;
final static int CHAR_NUM_BYTE = 2;
/* Rough logic: HashMap has an array[Entry] w/ varying
load factor (say 2 * POINTER). Entry is object w/ Term
key, BufferedDeletes.Num val, int hash, Entry next
(OBJ_HEADER + 3*POINTER + INT). Term is object w/
String field and String text (OBJ_HEADER + 2*POINTER).
We don't count Term's field since it's interned.
Term's text is String (OBJ_HEADER + 4*INT + POINTER +
OBJ_HEADER + string.length*CHAR). BufferedDeletes.num is
OBJ_HEADER + INT. */
final static int BYTES_PER_DEL_TERM = 8*POINTER_NUM_BYTE + 5*OBJECT_HEADER_BYTES + 6*INT_NUM_BYTE;
/* Rough logic: del docIDs are List<Integer>. Say list
allocates ~2X size (2*POINTER). Integer is OBJ_HEADER
+ int */
final static int BYTES_PER_DEL_DOCID = 2*POINTER_NUM_BYTE + OBJECT_HEADER_BYTES + INT_NUM_BYTE;
/* Rough logic: HashMap has an array[Entry] w/ varying
load factor (say 2 * POINTER). Entry is object w/
Query key, Integer val, int hash, Entry next
(OBJ_HEADER + 3*POINTER + INT). Query we often
undercount (say 24 bytes). Integer is OBJ_HEADER + INT. */
final static int BYTES_PER_DEL_QUERY = 5*POINTER_NUM_BYTE + 2*OBJECT_HEADER_BYTES + 2*INT_NUM_BYTE + 24;
/* Initial chunks size of the shared byte[] blocks used to
store postings data */
final static int BYTE_BLOCK_SHIFT = 15;
final static int BYTE_BLOCK_SIZE = 1 << BYTE_BLOCK_SHIFT;
final static int BYTE_BLOCK_MASK = BYTE_BLOCK_SIZE - 1;
final static int BYTE_BLOCK_NOT_MASK = ~BYTE_BLOCK_MASK;
private class ByteBlockAllocator extends ByteBlockPool.Allocator {
final int blockSize;
ByteBlockAllocator(int blockSize) {
this.blockSize = blockSize;
}
ArrayList<byte[]> freeByteBlocks = new ArrayList<byte[]>();
/* Allocate another byte[] from the shared pool */
@Override
byte[] getByteBlock(boolean trackAllocations) {
synchronized(DocumentsWriter.this) {
final int size = freeByteBlocks.size();
final byte[] b;
if (0 == size) {
// Always record a block allocated, even if
// trackAllocations is false. This is necessary
// because this block will be shared between
// things that don't track allocations (term
// vectors) and things that do (freq/prox
// postings).
numBytesAlloc += blockSize;
b = new byte[blockSize];
} else
b = freeByteBlocks.remove(size-1);
if (trackAllocations)
numBytesUsed += blockSize;
assert numBytesUsed <= numBytesAlloc;
return b;
}
}
/* Return byte[]'s to the pool */
@Override
void recycleByteBlocks(byte[][] blocks, int start, int end) {
synchronized(DocumentsWriter.this) {
for(int i=start;i<end;i++) {
freeByteBlocks.add(blocks[i]);
blocks[i] = null;
}
}
}
@Override
void recycleByteBlocks(List<byte[]> blocks) {
synchronized(DocumentsWriter.this) {
final int size = blocks.size();
for(int i=0;i<size;i++)
freeByteBlocks.add(blocks.get(i));
}
}
}
/* Initial chunks size of the shared int[] blocks used to
store postings data */
final static int INT_BLOCK_SHIFT = 13;
final static int INT_BLOCK_SIZE = 1 << INT_BLOCK_SHIFT;
final static int INT_BLOCK_MASK = INT_BLOCK_SIZE - 1;
private ArrayList<int[]> freeIntBlocks = new ArrayList<int[]>();
/* Allocate another int[] from the shared pool */
synchronized int[] getIntBlock(boolean trackAllocations) {
final int size = freeIntBlocks.size();
final int[] b;
if (0 == size) {
// Always record a block allocated, even if
// trackAllocations is false. This is necessary
// because this block will be shared between
// things that don't track allocations (term
// vectors) and things that do (freq/prox
// postings).
numBytesAlloc += INT_BLOCK_SIZE*INT_NUM_BYTE;
b = new int[INT_BLOCK_SIZE];
} else
b = freeIntBlocks.remove(size-1);
if (trackAllocations)
numBytesUsed += INT_BLOCK_SIZE*INT_NUM_BYTE;
assert numBytesUsed <= numBytesAlloc;
return b;
}
synchronized void bytesAllocated(long numBytes) {
numBytesAlloc += numBytes;
}
synchronized void bytesUsed(long numBytes) {
numBytesUsed += numBytes;
assert numBytesUsed <= numBytesAlloc;
}
/* Return int[]s to the pool */
synchronized void recycleIntBlocks(int[][] blocks, int start, int end) {
for(int i=start;i<end;i++) {
freeIntBlocks.add(blocks[i]);
blocks[i] = null;
}
}
ByteBlockAllocator byteBlockAllocator = new ByteBlockAllocator(BYTE_BLOCK_SIZE);
final static int PER_DOC_BLOCK_SIZE = 1024;
final ByteBlockAllocator perDocAllocator = new ByteBlockAllocator(PER_DOC_BLOCK_SIZE);
/* Initial chunk size of the shared char[] blocks used to
store term text */
final static int CHAR_BLOCK_SHIFT = 14;
final static int CHAR_BLOCK_SIZE = 1 << CHAR_BLOCK_SHIFT;
final static int CHAR_BLOCK_MASK = CHAR_BLOCK_SIZE - 1;
final static int MAX_TERM_LENGTH = CHAR_BLOCK_SIZE-1;
private ArrayList<char[]> freeCharBlocks = new ArrayList<char[]>();
/* Allocate another char[] from the shared pool */
synchronized char[] getCharBlock() {
final int size = freeCharBlocks.size();
final char[] c;
if (0 == size) {
numBytesAlloc += CHAR_BLOCK_SIZE * CHAR_NUM_BYTE;
c = new char[CHAR_BLOCK_SIZE];
} else
c = freeCharBlocks.remove(size-1);
// We always track allocations of char blocks, for now,
// because nothing that skips allocation tracking
// (currently only term vectors) uses its own char
// blocks.
numBytesUsed += CHAR_BLOCK_SIZE * CHAR_NUM_BYTE;
assert numBytesUsed <= numBytesAlloc;
return c;
}
/* Return char[]s to the pool */
synchronized void recycleCharBlocks(char[][] blocks, int numBlocks) {
for(int i=0;i<numBlocks;i++) {
freeCharBlocks.add(blocks[i]);
blocks[i] = null;
}
}
String toMB(long v) {
return nf.format(v/1024./1024.);
}
/* We have four pools of RAM: Postings, byte blocks
* (holds freq/prox posting data), char blocks (holds
* characters in the term) and per-doc buffers (stored fields/term vectors).
* Different docs require varying amount of storage from
* these four classes.
*
* For example, docs with many unique single-occurrence
* short terms will use up the Postings RAM and hardly any
* of the other two. Whereas docs with very large terms
* will use alot of char blocks RAM and relatively less of
* the other two. This method just frees allocations from
* the pools once we are over-budget, which balances the
* pools to match the current docs. */
void balanceRAM() {
// We flush when we've used our target usage
final long flushTrigger = ramBufferSize;
final long deletesRAMUsed = deletesInRAM.bytesUsed+deletesFlushed.bytesUsed;
if (numBytesAlloc+deletesRAMUsed > freeTrigger) {
if (infoStream != null)
message(" RAM: now balance allocations: usedMB=" + toMB(numBytesUsed) +
" vs trigger=" + toMB(flushTrigger) +
" allocMB=" + toMB(numBytesAlloc) +
" deletesMB=" + toMB(deletesRAMUsed) +
" vs trigger=" + toMB(freeTrigger) +
" byteBlockFree=" + toMB(byteBlockAllocator.freeByteBlocks.size()*BYTE_BLOCK_SIZE) +
" perDocFree=" + toMB(perDocAllocator.freeByteBlocks.size()*PER_DOC_BLOCK_SIZE) +
" charBlockFree=" + toMB(freeCharBlocks.size()*CHAR_BLOCK_SIZE*CHAR_NUM_BYTE));
final long startBytesAlloc = numBytesAlloc + deletesRAMUsed;
int iter = 0;
// We free equally from each pool in 32 KB
// chunks until we are below our threshold
// (freeLevel)
boolean any = true;
while(numBytesAlloc+deletesRAMUsed > freeLevel) {
synchronized(this) {
if (0 == perDocAllocator.freeByteBlocks.size()
&& 0 == byteBlockAllocator.freeByteBlocks.size()
&& 0 == freeCharBlocks.size()
&& 0 == freeIntBlocks.size()
&& !any) {
// Nothing else to free -- must flush now.
bufferIsFull = numBytesUsed+deletesRAMUsed > flushTrigger;
if (infoStream != null) {
if (bufferIsFull)
message(" nothing to free; now set bufferIsFull");
else
message(" nothing to free");
}
assert numBytesUsed <= numBytesAlloc;
break;
}
if ((0 == iter % 5) && byteBlockAllocator.freeByteBlocks.size() > 0) {
byteBlockAllocator.freeByteBlocks.remove(byteBlockAllocator.freeByteBlocks.size()-1);
numBytesAlloc -= BYTE_BLOCK_SIZE;
}
if ((1 == iter % 5) && freeCharBlocks.size() > 0) {
freeCharBlocks.remove(freeCharBlocks.size()-1);
numBytesAlloc -= CHAR_BLOCK_SIZE * CHAR_NUM_BYTE;
}
if ((2 == iter % 5) && freeIntBlocks.size() > 0) {
freeIntBlocks.remove(freeIntBlocks.size()-1);
numBytesAlloc -= INT_BLOCK_SIZE * INT_NUM_BYTE;
}
if ((3 == iter % 5) && perDocAllocator.freeByteBlocks.size() > 0) {
// Remove upwards of 32 blocks (each block is 1K)
for (int i = 0; i < 32; ++i) {
perDocAllocator.freeByteBlocks.remove(perDocAllocator.freeByteBlocks.size() - 1);
numBytesAlloc -= PER_DOC_BLOCK_SIZE;
if (perDocAllocator.freeByteBlocks.size() == 0) {
break;
}
}
}
}
if ((4 == iter % 5) && any)
// Ask consumer to free any recycled state
any = consumer.freeRAM();
iter++;
}
if (infoStream != null)
message(" after free: freedMB=" + nf.format((startBytesAlloc-numBytesAlloc-deletesRAMUsed)/1024./1024.) + " usedMB=" + nf.format((numBytesUsed+deletesRAMUsed)/1024./1024.) + " allocMB=" + nf.format(numBytesAlloc/1024./1024.));
} else {
// If we have not crossed the 100% mark, but have
// crossed the 95% mark of RAM we are actually
// using, go ahead and flush. This prevents
// over-allocating and then freeing, with every
// flush.
synchronized(this) {
if (numBytesUsed+deletesRAMUsed > flushTrigger) {
if (infoStream != null)
message(" RAM: now flush @ usedMB=" + nf.format(numBytesUsed/1024./1024.) +
" allocMB=" + nf.format(numBytesAlloc/1024./1024.) +
" deletesMB=" + nf.format(deletesRAMUsed/1024./1024.) +
" triggerMB=" + nf.format(flushTrigger/1024./1024.));
bufferIsFull = true;
}
}
}
}
final WaitQueue waitQueue = new WaitQueue();
private class WaitQueue {
DocWriter[] waiting;
int nextWriteDocID;
int nextWriteLoc;
int numWaiting;
long waitingBytes;
public WaitQueue() {
waiting = new DocWriter[10];
}
synchronized void reset() {
// NOTE: nextWriteLoc doesn't need to be reset
assert numWaiting == 0;
assert waitingBytes == 0;
nextWriteDocID = 0;
}
synchronized boolean doResume() {
return waitingBytes <= waitQueueResumeBytes;
}
synchronized boolean doPause() {
return waitingBytes > waitQueuePauseBytes;
}
synchronized void abort() {
int count = 0;
for(int i=0;i<waiting.length;i++) {
final DocWriter doc = waiting[i];
if (doc != null) {
doc.abort();
waiting[i] = null;
count++;
}
}
waitingBytes = 0;
assert count == numWaiting;
numWaiting = 0;
}
private void writeDocument(DocWriter doc) throws IOException {
assert doc == skipDocWriter || nextWriteDocID == doc.docID;
boolean success = false;
try {
doc.finish();
nextWriteDocID++;
numDocsInStore++;
nextWriteLoc++;
assert nextWriteLoc <= waiting.length;
if (nextWriteLoc == waiting.length)
nextWriteLoc = 0;
success = true;
} finally {
if (!success)
setAborting();
}
}
synchronized public boolean add(DocWriter doc) throws IOException {
assert doc.docID >= nextWriteDocID;
if (doc.docID == nextWriteDocID) {
writeDocument(doc);
while(true) {
doc = waiting[nextWriteLoc];
if (doc != null) {
numWaiting--;
waiting[nextWriteLoc] = null;
waitingBytes -= doc.sizeInBytes();
writeDocument(doc);
} else
break;
}
} else {
// I finished before documents that were added
// before me. This can easily happen when I am a
// small doc and the docs before me were large, or,
// just due to luck in the thread scheduling. Just
// add myself to the queue and when that large doc
// finishes, it will flush me:
int gap = doc.docID - nextWriteDocID;
if (gap >= waiting.length) {
// Grow queue
DocWriter[] newArray = new DocWriter[ArrayUtil.getNextSize(gap)];
assert nextWriteLoc >= 0;
System.arraycopy(waiting, nextWriteLoc, newArray, 0, waiting.length-nextWriteLoc);
System.arraycopy(waiting, 0, newArray, waiting.length-nextWriteLoc, nextWriteLoc);
nextWriteLoc = 0;
waiting = newArray;
gap = doc.docID - nextWriteDocID;
}
int loc = nextWriteLoc + gap;
if (loc >= waiting.length)
loc -= waiting.length;
// We should only wrap one time
assert loc < waiting.length;
// Nobody should be in my spot!
assert waiting[loc] == null;
waiting[loc] = doc;
numWaiting++;
waitingBytes += doc.sizeInBytes();
}
return doPause();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocumentsWriter.java | Java | art | 52,592 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Set;
/** <p>This class implements a {@link MergePolicy} that tries
* to merge segments into levels of exponentially
* increasing size, where each level has fewer segments than
* the value of the merge factor. Whenever extra segments
* (beyond the merge factor upper bound) are encountered,
* all segments within the level are merged. You can get or
* set the merge factor using {@link #getMergeFactor()} and
* {@link #setMergeFactor(int)} respectively.</p>
*
* <p>This class is abstract and requires a subclass to
* define the {@link #size} method which specifies how a
* segment's size is determined. {@link LogDocMergePolicy}
* is one subclass that measures size by document count in
* the segment. {@link LogByteSizeMergePolicy} is another
* subclass that measures size as the total byte size of the
* file(s) for the segment.</p>
*/
public abstract class LogMergePolicy extends MergePolicy {
/** Defines the allowed range of log(size) for each
* level. A level is computed by taking the max segment
* log size, minus LEVEL_LOG_SPAN, and finding all
* segments falling within that range. */
public static final double LEVEL_LOG_SPAN = 0.75;
/** Default merge factor, which is how many segments are
* merged at a time */
public static final int DEFAULT_MERGE_FACTOR = 10;
/** Default maximum segment size. A segment of this size
* or larger will never be merged. @see setMaxMergeDocs */
public static final int DEFAULT_MAX_MERGE_DOCS = Integer.MAX_VALUE;
private int mergeFactor = DEFAULT_MERGE_FACTOR;
long minMergeSize;
long maxMergeSize;
int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
/* TODO 3.0: change this default to true */
protected boolean calibrateSizeByDeletes = false;
private boolean useCompoundFile = true;
private boolean useCompoundDocStore = true;
public LogMergePolicy(IndexWriter writer) {
super(writer);
}
protected boolean verbose() {
return writer != null && writer.verbose();
}
private void message(String message) {
if (verbose())
writer.message("LMP: " + message);
}
/** <p>Returns the number of segments that are merged at
* once and also controls the total number of segments
* allowed to accumulate in the index.</p> */
public int getMergeFactor() {
return mergeFactor;
}
/** Determines how often segment indices are merged by
* addDocument(). With smaller values, less RAM is used
* while indexing, and searches on unoptimized indices are
* faster, but indexing speed is slower. With larger
* values, more RAM is used during indexing, and while
* searches on unoptimized indices are slower, indexing is
* faster. Thus larger values (> 10) are best for batch
* index creation, and smaller values (< 10) for indices
* that are interactively maintained. */
public void setMergeFactor(int mergeFactor) {
if (mergeFactor < 2)
throw new IllegalArgumentException("mergeFactor cannot be less than 2");
this.mergeFactor = mergeFactor;
}
// Javadoc inherited
@Override
public boolean useCompoundFile(SegmentInfos infos, SegmentInfo info) {
return useCompoundFile;
}
/** Sets whether compound file format should be used for
* newly flushed and newly merged segments. */
public void setUseCompoundFile(boolean useCompoundFile) {
this.useCompoundFile = useCompoundFile;
}
/** Returns true if newly flushed and newly merge segments
* are written in compound file format. @see
* #setUseCompoundFile */
public boolean getUseCompoundFile() {
return useCompoundFile;
}
// Javadoc inherited
@Override
public boolean useCompoundDocStore(SegmentInfos infos) {
return useCompoundDocStore;
}
/** Sets whether compound file format should be used for
* newly flushed and newly merged doc store
* segment files (term vectors and stored fields). */
public void setUseCompoundDocStore(boolean useCompoundDocStore) {
this.useCompoundDocStore = useCompoundDocStore;
}
/** Returns true if newly flushed and newly merge doc
* store segment files (term vectors and stored fields)
* are written in compound file format. @see
* #setUseCompoundDocStore */
public boolean getUseCompoundDocStore() {
return useCompoundDocStore;
}
/** Sets whether the segment size should be calibrated by
* the number of deletes when choosing segments for merge. */
public void setCalibrateSizeByDeletes(boolean calibrateSizeByDeletes) {
this.calibrateSizeByDeletes = calibrateSizeByDeletes;
}
/** Returns true if the segment size should be calibrated
* by the number of deletes when choosing segments for merge. */
public boolean getCalibrateSizeByDeletes() {
return calibrateSizeByDeletes;
}
@Override
public void close() {}
abstract protected long size(SegmentInfo info) throws IOException;
protected long sizeDocs(SegmentInfo info) throws IOException {
if (calibrateSizeByDeletes) {
int delCount = writer.numDeletedDocs(info);
return (info.docCount - (long)delCount);
} else {
return info.docCount;
}
}
protected long sizeBytes(SegmentInfo info) throws IOException {
long byteSize = info.sizeInBytes();
if (calibrateSizeByDeletes) {
int delCount = writer.numDeletedDocs(info);
float delRatio = (info.docCount <= 0 ? 0.0f : ((float)delCount / (float)info.docCount));
return (info.docCount <= 0 ? byteSize : (long)(byteSize * (1.0f - delRatio)));
} else {
return byteSize;
}
}
private boolean isOptimized(SegmentInfos infos, int maxNumSegments, Set<SegmentInfo> segmentsToOptimize) throws IOException {
final int numSegments = infos.size();
int numToOptimize = 0;
SegmentInfo optimizeInfo = null;
for(int i=0;i<numSegments && numToOptimize <= maxNumSegments;i++) {
final SegmentInfo info = infos.info(i);
if (segmentsToOptimize.contains(info)) {
numToOptimize++;
optimizeInfo = info;
}
}
return numToOptimize <= maxNumSegments &&
(numToOptimize != 1 || isOptimized(optimizeInfo));
}
/** Returns true if this single info is optimized (has no
* pending norms or deletes, is in the same dir as the
* writer, and matches the current compound file setting */
private boolean isOptimized(SegmentInfo info)
throws IOException {
boolean hasDeletions = writer.numDeletedDocs(info) > 0;
return !hasDeletions &&
!info.hasSeparateNorms() &&
info.dir == writer.getDirectory() &&
info.getUseCompoundFile() == useCompoundFile;
}
/** Returns the merges necessary to optimize the index.
* This merge policy defines "optimized" to mean only one
* segment in the index, where that segment has no
* deletions pending nor separate norms, and it is in
* compound file format if the current useCompoundFile
* setting is true. This method returns multiple merges
* (mergeFactor at a time) so the {@link MergeScheduler}
* in use may make use of concurrency. */
@Override
public MergeSpecification findMergesForOptimize(SegmentInfos infos,
int maxNumSegments, Set<SegmentInfo> segmentsToOptimize) throws IOException {
MergeSpecification spec;
assert maxNumSegments > 0;
if (!isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
// Find the newest (rightmost) segment that needs to
// be optimized (other segments may have been flushed
// since optimize started):
int last = infos.size();
while(last > 0) {
final SegmentInfo info = infos.info(--last);
if (segmentsToOptimize.contains(info)) {
last++;
break;
}
}
if (last > 0) {
spec = new MergeSpecification();
// First, enroll all "full" merges (size
// mergeFactor) to potentially be run concurrently:
while (last - maxNumSegments + 1 >= mergeFactor) {
spec.add(new OneMerge(infos.range(last-mergeFactor, last), useCompoundFile));
last -= mergeFactor;
}
// Only if there are no full merges pending do we
// add a final partial (< mergeFactor segments) merge:
if (0 == spec.merges.size()) {
if (maxNumSegments == 1) {
// Since we must optimize down to 1 segment, the
// choice is simple:
if (last > 1 || !isOptimized(infos.info(0)))
spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
} else if (last > maxNumSegments) {
// Take care to pick a partial merge that is
// least cost, but does not make the index too
// lopsided. If we always just picked the
// partial tail then we could produce a highly
// lopsided index over time:
// We must merge this many segments to leave
// maxNumSegments in the index (from when
// optimize was first kicked off):
final int finalMergeSize = last - maxNumSegments + 1;
// Consider all possible starting points:
long bestSize = 0;
int bestStart = 0;
for(int i=0;i<last-finalMergeSize+1;i++) {
long sumSize = 0;
for(int j=0;j<finalMergeSize;j++)
sumSize += size(infos.info(j+i));
if (i == 0 || (sumSize < 2*size(infos.info(i-1)) && sumSize < bestSize)) {
bestStart = i;
bestSize = sumSize;
}
}
spec.add(new OneMerge(infos.range(bestStart, bestStart+finalMergeSize), useCompoundFile));
}
}
} else
spec = null;
} else
spec = null;
return spec;
}
/**
* Finds merges necessary to expunge all deletes from the
* index. We simply merge adjacent segments that have
* deletes, up to mergeFactor at a time.
*/
@Override
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
throws CorruptIndexException, IOException {
final int numSegments = segmentInfos.size();
if (verbose())
message("findMergesToExpungeDeletes: " + numSegments + " segments");
MergeSpecification spec = new MergeSpecification();
int firstSegmentWithDeletions = -1;
for(int i=0;i<numSegments;i++) {
final SegmentInfo info = segmentInfos.info(i);
int delCount = writer.numDeletedDocs(info);
if (delCount > 0) {
if (verbose())
message(" segment " + info.name + " has deletions");
if (firstSegmentWithDeletions == -1)
firstSegmentWithDeletions = i;
else if (i - firstSegmentWithDeletions == mergeFactor) {
// We've seen mergeFactor segments in a row with
// deletions, so force a merge now:
if (verbose())
message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
firstSegmentWithDeletions = i;
}
} else if (firstSegmentWithDeletions != -1) {
// End of a sequence of segments with deletions, so,
// merge those past segments even if it's fewer than
// mergeFactor segments
if (verbose())
message(" add merge " + firstSegmentWithDeletions + " to " + (i-1) + " inclusive");
spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, i), useCompoundFile));
firstSegmentWithDeletions = -1;
}
}
if (firstSegmentWithDeletions != -1) {
if (verbose())
message(" add merge " + firstSegmentWithDeletions + " to " + (numSegments-1) + " inclusive");
spec.add(new OneMerge(segmentInfos.range(firstSegmentWithDeletions, numSegments), useCompoundFile));
}
return spec;
}
/** Checks if any merges are now necessary and returns a
* {@link MergePolicy.MergeSpecification} if so. A merge
* is necessary when there are more than {@link
* #setMergeFactor} segments at a given level. When
* multiple levels have too many segments, this method
* will return multiple merges, allowing the {@link
* MergeScheduler} to use concurrency. */
@Override
public MergeSpecification findMerges(SegmentInfos infos) throws IOException {
final int numSegments = infos.size();
if (verbose())
message("findMerges: " + numSegments + " segments");
// Compute levels, which is just log (base mergeFactor)
// of the size of each segment
float[] levels = new float[numSegments];
final float norm = (float) Math.log(mergeFactor);
for(int i=0;i<numSegments;i++) {
final SegmentInfo info = infos.info(i);
long size = size(info);
// Floor tiny segments
if (size < 1)
size = 1;
levels[i] = (float) Math.log(size)/norm;
}
final float levelFloor;
if (minMergeSize <= 0)
levelFloor = (float) 0.0;
else
levelFloor = (float) (Math.log(minMergeSize)/norm);
// Now, we quantize the log values into levels. The
// first level is any segment whose log size is within
// LEVEL_LOG_SPAN of the max size, or, who has such as
// segment "to the right". Then, we find the max of all
// other segments and use that to define the next level
// segment, etc.
MergeSpecification spec = null;
int start = 0;
while(start < numSegments) {
// Find max level of all segments not already
// quantized.
float maxLevel = levels[start];
for(int i=1+start;i<numSegments;i++) {
final float level = levels[i];
if (level > maxLevel)
maxLevel = level;
}
// Now search backwards for the rightmost segment that
// falls into this level:
float levelBottom;
if (maxLevel < levelFloor)
// All remaining segments fall into the min level
levelBottom = -1.0F;
else {
levelBottom = (float) (maxLevel - LEVEL_LOG_SPAN);
// Force a boundary at the level floor
if (levelBottom < levelFloor && maxLevel >= levelFloor)
levelBottom = levelFloor;
}
int upto = numSegments-1;
while(upto >= start) {
if (levels[upto] >= levelBottom) {
break;
}
upto--;
}
if (verbose())
message(" level " + levelBottom + " to " + maxLevel + ": " + (1+upto-start) + " segments");
// Finally, record all merges that are viable at this level:
int end = start + mergeFactor;
while(end <= 1+upto) {
boolean anyTooLarge = false;
for(int i=start;i<end;i++) {
final SegmentInfo info = infos.info(i);
anyTooLarge |= (size(info) >= maxMergeSize || sizeDocs(info) >= maxMergeDocs);
}
if (!anyTooLarge) {
if (spec == null)
spec = new MergeSpecification();
if (verbose())
message(" " + start + " to " + end + ": add this merge");
spec.add(new OneMerge(infos.range(start, end), useCompoundFile));
} else if (verbose())
message(" " + start + " to " + end + ": contains segment over maxMergeSize or maxMergeDocs; skipping");
start = end;
end = start + mergeFactor;
}
start = 1+upto;
}
return spec;
}
/** <p>Determines the largest segment (measured by
* document count) that may be merged with other segments.
* Small values (e.g., less than 10,000) are best for
* interactive indexing, as this limits the length of
* pauses while indexing to a few seconds. Larger values
* are best for batched indexing and speedier
* searches.</p>
*
* <p>The default value is {@link Integer#MAX_VALUE}.</p>
*
* <p>The default merge policy ({@link
* LogByteSizeMergePolicy}) also allows you to set this
* limit by net size (in MB) of the segment, using {@link
* LogByteSizeMergePolicy#setMaxMergeMB}.</p>
*/
public void setMaxMergeDocs(int maxMergeDocs) {
this.maxMergeDocs = maxMergeDocs;
}
/** Returns the largest segment (measured by document
* count) that may be merged with other segments.
* @see #setMaxMergeDocs */
public int getMaxMergeDocs() {
return maxMergeDocs;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/LogMergePolicy.java | Java | art | 17,209 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.IOException;
/**
* This exception is thrown when an {@link IndexReader}
* tries to make changes to the index (via {@link
* IndexReader#deleteDocument}, {@link
* IndexReader#undeleteAll} or {@link IndexReader#setNorm})
* but changes have already been committed to the index
* since this reader was instantiated. When this happens
* you must open a new reader on the current index to make
* the changes.
*/
public class StaleReaderException extends IOException {
public StaleReaderException(String message) {
super(message);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/StaleReaderException.java | Java | art | 1,399 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Fieldable;
abstract class InvertedDocConsumerPerField {
// Called once per field, and is given all Fieldable
// occurrences for this field in the document. Return
// true if you wish to see inverted tokens for these
// fields:
abstract boolean start(Fieldable[] fields, int count) throws IOException;
// Called before a field instance is being processed
abstract void start(Fieldable field);
// Called once per inverted token
abstract void add() throws IOException;
// Called once per field per document, after all Fieldable
// occurrences are inverted
abstract void finish() throws IOException;
// Called on hitting an aborting exception
abstract void abort();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java | Java | art | 1,602 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.BufferedIndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import java.util.HashMap;
import java.io.IOException;
/**
* Class for accessing a compound stream.
* This class implements a directory, but is limited to only read operations.
* Directory methods that would normally modify data throw an exception.
*/
class CompoundFileReader extends Directory {
private int readBufferSize;
private static final class FileEntry {
long offset;
long length;
}
// Base info
private Directory directory;
private String fileName;
private IndexInput stream;
private HashMap<String,FileEntry> entries = new HashMap<String,FileEntry>();
public CompoundFileReader(Directory dir, String name) throws IOException {
this(dir, name, BufferedIndexInput.BUFFER_SIZE);
}
public CompoundFileReader(Directory dir, String name, int readBufferSize)
throws IOException
{
directory = dir;
fileName = name;
this.readBufferSize = readBufferSize;
boolean success = false;
try {
stream = dir.openInput(name, readBufferSize);
// read the directory and init files
int count = stream.readVInt();
FileEntry entry = null;
for (int i=0; i<count; i++) {
long offset = stream.readLong();
String id = stream.readString();
if (entry != null) {
// set length of the previous entry
entry.length = offset - entry.offset;
}
entry = new FileEntry();
entry.offset = offset;
entries.put(id, entry);
}
// set the length of the final entry
if (entry != null) {
entry.length = stream.length() - entry.offset;
}
success = true;
} finally {
if (! success && (stream != null)) {
try {
stream.close();
} catch (IOException e) { }
}
}
}
public Directory getDirectory() {
return directory;
}
public String getName() {
return fileName;
}
@Override
public synchronized void close() throws IOException {
if (stream == null)
throw new IOException("Already closed");
entries.clear();
stream.close();
stream = null;
}
@Override
public synchronized IndexInput openInput(String id)
throws IOException
{
// Default to readBufferSize passed in when we were opened
return openInput(id, readBufferSize);
}
@Override
public synchronized IndexInput openInput(String id, int readBufferSize)
throws IOException
{
if (stream == null)
throw new IOException("Stream closed");
FileEntry entry = entries.get(id);
if (entry == null)
throw new IOException("No sub-file with id " + id + " found");
return new CSIndexInput(stream, entry.offset, entry.length, readBufferSize);
}
/** Returns an array of strings, one for each file in the directory. */
@Override
public String[] listAll() {
String res[] = new String[entries.size()];
return entries.keySet().toArray(res);
}
/** Returns true iff a file with the given name exists. */
@Override
public boolean fileExists(String name) {
return entries.containsKey(name);
}
/** Returns the time the compound file was last modified. */
@Override
public long fileModified(String name) throws IOException {
return directory.fileModified(fileName);
}
/** Set the modified time of the compound file to now. */
@Override
public void touchFile(String name) throws IOException {
directory.touchFile(fileName);
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public void deleteFile(String name)
{
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
public void renameFile(String from, String to)
{
throw new UnsupportedOperationException();
}
/** Returns the length of a file in the directory.
* @throws IOException if the file does not exist */
@Override
public long fileLength(String name)
throws IOException
{
FileEntry e = entries.get(name);
if (e == null)
throw new IOException("File " + name + " does not exist");
return e.length;
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public IndexOutput createOutput(String name)
{
throw new UnsupportedOperationException();
}
/** Not implemented
* @throws UnsupportedOperationException */
@Override
public Lock makeLock(String name)
{
throw new UnsupportedOperationException();
}
/** Implementation of an IndexInput that reads from a portion of the
* compound file. The visibility is left as "package" *only* because
* this helps with testing since JUnit test cases in a different class
* can then access package fields of this class.
*/
static final class CSIndexInput extends BufferedIndexInput {
IndexInput base;
long fileOffset;
long length;
CSIndexInput(final IndexInput base, final long fileOffset, final long length)
{
this(base, fileOffset, length, BufferedIndexInput.BUFFER_SIZE);
}
CSIndexInput(final IndexInput base, final long fileOffset, final long length, int readBufferSize)
{
super(readBufferSize);
this.base = (IndexInput)base.clone();
this.fileOffset = fileOffset;
this.length = length;
}
@Override
public Object clone() {
CSIndexInput clone = (CSIndexInput)super.clone();
clone.base = (IndexInput)base.clone();
clone.fileOffset = fileOffset;
clone.length = length;
return clone;
}
/** Expert: implements buffer refill. Reads bytes from the current
* position in the input.
* @param b the array to read bytes into
* @param offset the offset in the array to start storing bytes
* @param len the number of bytes to read
*/
@Override
protected void readInternal(byte[] b, int offset, int len)
throws IOException
{
long start = getFilePointer();
if(start + len > length)
throw new IOException("read past EOF");
base.seek(fileOffset + start);
base.readBytes(b, offset, len, false);
}
/** Expert: implements seek. Sets current position in this file, where
* the next {@link #readInternal(byte[],int,int)} will occur.
* @see #readInternal(byte[],int,int)
*/
@Override
protected void seekInternal(long pos) {}
/** Closes the stream to further operations. */
@Override
public void close() throws IOException {
base.close();
}
@Override
public long length() {
return length;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/CompoundFileReader.java | Java | art | 8,367 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Remaps docIDs after a merge has completed, where the
* merged segments had at least one deletion. This is used
* to renumber the buffered deletes in IndexWriter when a
* merge of segments with deletions commits. */
final class MergeDocIDRemapper {
int[] starts; // used for binary search of mapped docID
int[] newStarts; // starts, minus the deletes
int[][] docMaps; // maps docIDs in the merged set
int minDocID; // minimum docID that needs renumbering
int maxDocID; // 1+ the max docID that needs renumbering
int docShift; // total # deleted docs that were compacted by this merge
public MergeDocIDRemapper(SegmentInfos infos, int[][] docMaps, int[] delCounts, MergePolicy.OneMerge merge, int mergedDocCount) {
this.docMaps = docMaps;
SegmentInfo firstSegment = merge.segments.info(0);
int i = 0;
while(true) {
SegmentInfo info = infos.info(i);
if (info.equals(firstSegment))
break;
minDocID += info.docCount;
i++;
}
int numDocs = 0;
for(int j=0;j<docMaps.length;i++,j++) {
numDocs += infos.info(i).docCount;
assert infos.info(i).equals(merge.segments.info(j));
}
maxDocID = minDocID + numDocs;
starts = new int[docMaps.length];
newStarts = new int[docMaps.length];
starts[0] = minDocID;
newStarts[0] = minDocID;
for(i=1;i<docMaps.length;i++) {
final int lastDocCount = merge.segments.info(i-1).docCount;
starts[i] = starts[i-1] + lastDocCount;
newStarts[i] = newStarts[i-1] + lastDocCount - delCounts[i-1];
}
docShift = numDocs - mergedDocCount;
// There are rare cases when docShift is 0. It happens
// if you try to delete a docID that's out of bounds,
// because the SegmentReader still allocates deletedDocs
// and pretends it has deletions ... so we can't make
// this assert here
// assert docShift > 0;
// Make sure it all adds up:
assert docShift == maxDocID - (newStarts[docMaps.length-1] + merge.segments.info(docMaps.length-1).docCount - delCounts[docMaps.length-1]);
}
public int remap(int oldDocID) {
if (oldDocID < minDocID)
// Unaffected by merge
return oldDocID;
else if (oldDocID >= maxDocID)
// This doc was "after" the merge, so simple shift
return oldDocID - docShift;
else {
// Binary search to locate this document & find its new docID
int lo = 0; // search starts array
int hi = docMaps.length - 1; // for first element less
while (hi >= lo) {
int mid = (lo + hi) >>> 1;
int midValue = starts[mid];
if (oldDocID < midValue)
hi = mid - 1;
else if (oldDocID > midValue)
lo = mid + 1;
else { // found a match
while (mid+1 < docMaps.length && starts[mid+1] == midValue) {
mid++; // scan to last match
}
if (docMaps[mid] != null)
return newStarts[mid] + docMaps[mid][oldDocID-starts[mid]];
else
return newStarts[mid] + oldDocID-starts[mid];
}
}
if (docMaps[hi] != null)
return newStarts[hi] + docMaps[hi][oldDocID-starts[hi]];
else
return newStarts[hi] + oldDocID-starts[hi];
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/MergeDocIDRemapper.java | Java | art | 4,392 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.document.Fieldable;
final class StoredFieldsWriterPerThread {
final FieldsWriter localFieldsWriter;
final StoredFieldsWriter storedFieldsWriter;
final DocumentsWriter.DocState docState;
StoredFieldsWriter.PerDoc doc;
public StoredFieldsWriterPerThread(DocumentsWriter.DocState docState, StoredFieldsWriter storedFieldsWriter) throws IOException {
this.storedFieldsWriter = storedFieldsWriter;
this.docState = docState;
localFieldsWriter = new FieldsWriter((IndexOutput) null, (IndexOutput) null, storedFieldsWriter.fieldInfos);
}
public void startDocument() {
if (doc != null) {
// Only happens if previous document hit non-aborting
// exception while writing stored fields into
// localFieldsWriter:
doc.reset();
doc.docID = docState.docID;
}
}
public void addField(Fieldable field, FieldInfo fieldInfo) throws IOException {
if (doc == null) {
doc = storedFieldsWriter.getPerDoc();
doc.docID = docState.docID;
localFieldsWriter.setFieldsStream(doc.fdt);
assert doc.numStoredFields == 0: "doc.numStoredFields=" + doc.numStoredFields;
assert 0 == doc.fdt.length();
assert 0 == doc.fdt.getFilePointer();
}
localFieldsWriter.writeField(fieldInfo, field);
assert docState.testPoint("StoredFieldsWriterPerThread.processFields.writeField");
doc.numStoredFields++;
}
public DocumentsWriter.DocWriter finishDocument() {
// If there were any stored fields in this doc, doc will
// be non-null; else it's null.
try {
return doc;
} finally {
doc = null;
}
}
public void abort() {
if (doc != null) {
doc.abort();
doc = null;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/StoredFieldsWriterPerThread.java | Java | art | 2,649 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.UnicodeUtil;
final class TermVectorsTermsWriterPerField extends TermsHashConsumerPerField {
final TermVectorsTermsWriterPerThread perThread;
final TermsHashPerField termsHashPerField;
final TermVectorsTermsWriter termsWriter;
final FieldInfo fieldInfo;
final DocumentsWriter.DocState docState;
final FieldInvertState fieldState;
boolean doVectors;
boolean doVectorPositions;
boolean doVectorOffsets;
int maxNumPostings;
OffsetAttribute offsetAttribute = null;
public TermVectorsTermsWriterPerField(TermsHashPerField termsHashPerField, TermVectorsTermsWriterPerThread perThread, FieldInfo fieldInfo) {
this.termsHashPerField = termsHashPerField;
this.perThread = perThread;
this.termsWriter = perThread.termsWriter;
this.fieldInfo = fieldInfo;
docState = termsHashPerField.docState;
fieldState = termsHashPerField.fieldState;
}
@Override
int getStreamCount() {
return 2;
}
@Override
boolean start(Fieldable[] fields, int count) {
doVectors = false;
doVectorPositions = false;
doVectorOffsets = false;
for(int i=0;i<count;i++) {
Fieldable field = fields[i];
if (field.isIndexed() && field.isTermVectorStored()) {
doVectors = true;
doVectorPositions |= field.isStorePositionWithTermVector();
doVectorOffsets |= field.isStoreOffsetWithTermVector();
}
}
if (doVectors) {
if (perThread.doc == null) {
perThread.doc = termsWriter.getPerDoc();
perThread.doc.docID = docState.docID;
assert perThread.doc.numVectorFields == 0;
assert 0 == perThread.doc.perDocTvf.length();
assert 0 == perThread.doc.perDocTvf.getFilePointer();
} else {
assert perThread.doc.docID == docState.docID;
if (termsHashPerField.numPostings != 0)
// Only necessary if previous doc hit a
// non-aborting exception while writing vectors in
// this field:
termsHashPerField.reset();
}
}
// TODO: only if needed for performance
//perThread.postingsCount = 0;
return doVectors;
}
public void abort() {}
/** Called once per field per document if term vectors
* are enabled, to write the vectors to
* RAMOutputStream, which is then quickly flushed to
* * the real term vectors files in the Directory. */
@Override
void finish() throws IOException {
assert docState.testPoint("TermVectorsTermsWriterPerField.finish start");
final int numPostings = termsHashPerField.numPostings;
assert numPostings >= 0;
if (!doVectors || numPostings == 0)
return;
if (numPostings > maxNumPostings)
maxNumPostings = numPostings;
final IndexOutput tvf = perThread.doc.perDocTvf;
// This is called once, after inverting all occurrences
// of a given field in the doc. At this point we flush
// our hash into the DocWriter.
assert fieldInfo.storeTermVector;
assert perThread.vectorFieldsInOrder(fieldInfo);
perThread.doc.addField(termsHashPerField.fieldInfo.number);
final RawPostingList[] postings = termsHashPerField.sortPostings();
tvf.writeVInt(numPostings);
byte bits = 0x0;
if (doVectorPositions)
bits |= TermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR;
if (doVectorOffsets)
bits |= TermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR;
tvf.writeByte(bits);
int encoderUpto = 0;
int lastTermBytesCount = 0;
final ByteSliceReader reader = perThread.vectorSliceReader;
final char[][] charBuffers = perThread.termsHashPerThread.charPool.buffers;
for(int j=0;j<numPostings;j++) {
final TermVectorsTermsWriter.PostingList posting = (TermVectorsTermsWriter.PostingList) postings[j];
final int freq = posting.freq;
final char[] text2 = charBuffers[posting.textStart >> DocumentsWriter.CHAR_BLOCK_SHIFT];
final int start2 = posting.textStart & DocumentsWriter.CHAR_BLOCK_MASK;
// We swap between two encoders to save copying
// last Term's byte array
final UnicodeUtil.UTF8Result utf8Result = perThread.utf8Results[encoderUpto];
// TODO: we could do this incrementally
UnicodeUtil.UTF16toUTF8(text2, start2, utf8Result);
final int termBytesCount = utf8Result.length;
// TODO: UTF16toUTF8 could tell us this prefix
// Compute common prefix between last term and
// this term
int prefix = 0;
if (j > 0) {
final byte[] lastTermBytes = perThread.utf8Results[1-encoderUpto].result;
final byte[] termBytes = perThread.utf8Results[encoderUpto].result;
while(prefix < lastTermBytesCount && prefix < termBytesCount) {
if (lastTermBytes[prefix] != termBytes[prefix])
break;
prefix++;
}
}
encoderUpto = 1-encoderUpto;
lastTermBytesCount = termBytesCount;
final int suffix = termBytesCount - prefix;
tvf.writeVInt(prefix);
tvf.writeVInt(suffix);
tvf.writeBytes(utf8Result.result, prefix, suffix);
tvf.writeVInt(freq);
if (doVectorPositions) {
termsHashPerField.initReader(reader, posting, 0);
reader.writeTo(tvf);
}
if (doVectorOffsets) {
termsHashPerField.initReader(reader, posting, 1);
reader.writeTo(tvf);
}
}
termsHashPerField.reset();
perThread.termsHashPerThread.reset(false);
}
void shrinkHash() {
termsHashPerField.shrinkHash(maxNumPostings);
maxNumPostings = 0;
}
@Override
void start(Fieldable f) {
if (doVectorOffsets) {
offsetAttribute = fieldState.attributeSource.addAttribute(OffsetAttribute.class);
} else {
offsetAttribute = null;
}
}
@Override
void newTerm(RawPostingList p0) {
assert docState.testPoint("TermVectorsTermsWriterPerField.newTerm start");
TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
p.freq = 1;
if (doVectorOffsets) {
int startOffset = fieldState.offset + offsetAttribute.startOffset();;
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset);
termsHashPerField.writeVInt(1, endOffset - startOffset);
p.lastOffset = endOffset;
}
if (doVectorPositions) {
termsHashPerField.writeVInt(0, fieldState.position);
p.lastPosition = fieldState.position;
}
}
@Override
void addTerm(RawPostingList p0) {
assert docState.testPoint("TermVectorsTermsWriterPerField.addTerm start");
TermVectorsTermsWriter.PostingList p = (TermVectorsTermsWriter.PostingList) p0;
p.freq++;
if (doVectorOffsets) {
int startOffset = fieldState.offset + offsetAttribute.startOffset();;
int endOffset = fieldState.offset + offsetAttribute.endOffset();
termsHashPerField.writeVInt(1, startOffset - p.lastOffset);
termsHashPerField.writeVInt(1, endOffset - startOffset);
p.lastOffset = endOffset;
}
if (doVectorPositions) {
termsHashPerField.writeVInt(0, fieldState.position - p.lastPosition);
p.lastPosition = fieldState.position;
}
}
@Override
void skippingLongTerm() {}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorsTermsWriterPerField.java | Java | art | 8,283 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.RAMOutputStream;
import org.apache.lucene.util.ArrayUtil;
/** This is a DocFieldConsumer that writes stored fields. */
final class StoredFieldsWriter {
FieldsWriter fieldsWriter;
final DocumentsWriter docWriter;
final FieldInfos fieldInfos;
int lastDocID;
PerDoc[] docFreeList = new PerDoc[1];
int freeCount;
public StoredFieldsWriter(DocumentsWriter docWriter, FieldInfos fieldInfos) {
this.docWriter = docWriter;
this.fieldInfos = fieldInfos;
}
public StoredFieldsWriterPerThread addThread(DocumentsWriter.DocState docState) throws IOException {
return new StoredFieldsWriterPerThread(docState, this);
}
synchronized public void flush(SegmentWriteState state) throws IOException {
if (state.numDocsInStore > 0) {
// It's possible that all documents seen in this segment
// hit non-aborting exceptions, in which case we will
// not have yet init'd the FieldsWriter:
initFieldsWriter();
// Fill fdx file to include any final docs that we
// skipped because they hit non-aborting exceptions
fill(state.numDocsInStore - docWriter.getDocStoreOffset());
}
if (fieldsWriter != null)
fieldsWriter.flush();
}
private void initFieldsWriter() throws IOException {
if (fieldsWriter == null) {
final String docStoreSegment = docWriter.getDocStoreSegment();
if (docStoreSegment != null) {
assert docStoreSegment != null;
fieldsWriter = new FieldsWriter(docWriter.directory,
docStoreSegment,
fieldInfos);
docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_EXTENSION);
docWriter.addOpenFile(docStoreSegment + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
lastDocID = 0;
}
}
}
synchronized public void closeDocStore(SegmentWriteState state) throws IOException {
final int inc = state.numDocsInStore - lastDocID;
if (inc > 0) {
initFieldsWriter();
fill(state.numDocsInStore - docWriter.getDocStoreOffset());
}
if (fieldsWriter != null) {
fieldsWriter.close();
fieldsWriter = null;
lastDocID = 0;
assert state.docStoreSegmentName != null;
state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION);
state.flushedFiles.add(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
state.docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_EXTENSION);
state.docWriter.removeOpenFile(state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION);
final String fileName = state.docStoreSegmentName + "." + IndexFileNames.FIELDS_INDEX_EXTENSION;
if (4+((long) state.numDocsInStore)*8 != state.directory.fileLength(fileName))
throw new RuntimeException("after flush: fdx size mismatch: " + state.numDocsInStore + " docs vs " + state.directory.fileLength(fileName) + " length in bytes of " + fileName + " file exists?=" + state.directory.fileExists(fileName));
}
}
int allocCount;
synchronized PerDoc getPerDoc() {
if (freeCount == 0) {
allocCount++;
if (allocCount > docFreeList.length) {
// Grow our free list up front to make sure we have
// enough space to recycle all outstanding PerDoc
// instances
assert allocCount == 1+docFreeList.length;
docFreeList = new PerDoc[ArrayUtil.getNextSize(allocCount)];
}
return new PerDoc();
} else
return docFreeList[--freeCount];
}
synchronized void abort() {
if (fieldsWriter != null) {
try {
fieldsWriter.close();
} catch (Throwable t) {
}
fieldsWriter = null;
lastDocID = 0;
}
}
/** Fills in any hole in the docIDs */
void fill(int docID) throws IOException {
final int docStoreOffset = docWriter.getDocStoreOffset();
// We must "catch up" for all docs before us
// that had no stored fields:
final int end = docID+docStoreOffset;
while(lastDocID < end) {
fieldsWriter.skipDocument();
lastDocID++;
}
}
synchronized void finishDocument(PerDoc perDoc) throws IOException {
assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument start");
initFieldsWriter();
fill(perDoc.docID);
// Append stored fields to the real FieldsWriter:
fieldsWriter.flushDocument(perDoc.numStoredFields, perDoc.fdt);
lastDocID++;
perDoc.reset();
free(perDoc);
assert docWriter.writer.testPoint("StoredFieldsWriter.finishDocument end");
}
public boolean freeRAM() {
return false;
}
synchronized void free(PerDoc perDoc) {
assert freeCount < docFreeList.length;
assert 0 == perDoc.numStoredFields;
assert 0 == perDoc.fdt.length();
assert 0 == perDoc.fdt.getFilePointer();
docFreeList[freeCount++] = perDoc;
}
class PerDoc extends DocumentsWriter.DocWriter {
final DocumentsWriter.PerDocBuffer buffer = docWriter.newPerDocBuffer();
RAMOutputStream fdt = new RAMOutputStream(buffer);
int numStoredFields;
void reset() {
fdt.reset();
buffer.recycle();
numStoredFields = 0;
}
@Override
void abort() {
reset();
free(this);
}
@Override
public long sizeInBytes() {
return buffer.getSizeInBytes();
}
@Override
public void finish() throws IOException {
finishDocument(this);
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/StoredFieldsWriter.java | Java | art | 6,410 |
package org.apache.lucene.index;
/**
* Copyright 2007 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Comparator;
/**
* Compares {@link org.apache.lucene.index.TermVectorEntry}s first by frequency and then by
* the term (case-sensitive)
*
**/
public class TermVectorEntryFreqSortedComparator implements Comparator<TermVectorEntry> {
public int compare(TermVectorEntry entry, TermVectorEntry entry1) {
int result = 0;
result = entry1.getFrequency() - entry.getFrequency();
if (result == 0)
{
result = entry.getTerm().compareTo(entry1.getTerm());
if (result == 0)
{
result = entry.getField().compareTo(entry1.getField());
}
}
return result;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermVectorEntryFreqSortedComparator.java | Java | art | 1,273 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** Abstract API that consumes terms, doc, freq, prox and
* payloads postings. Concrete implementations of this
* actually do "something" with the postings (write it into
* the index in a specific format).
*
* NOTE: this API is experimental and will likely change
*/
abstract class FormatPostingsFieldsConsumer {
/** Add a new field */
abstract FormatPostingsTermsConsumer addField(FieldInfo field) throws IOException;
/** Called when we are done adding everything. */
abstract void finish() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsFieldsConsumer.java | Java | art | 1,401 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
final class IntBlockPool {
public int[][] buffers = new int[10][];
int bufferUpto = -1; // Which buffer we are upto
public int intUpto = DocumentsWriter.INT_BLOCK_SIZE; // Where we are in head buffer
public int[] buffer; // Current head buffer
public int intOffset = -DocumentsWriter.INT_BLOCK_SIZE; // Current head offset
final private DocumentsWriter docWriter;
final boolean trackAllocations;
public IntBlockPool(DocumentsWriter docWriter, boolean trackAllocations) {
this.docWriter = docWriter;
this.trackAllocations = trackAllocations;
}
public void reset() {
if (bufferUpto != -1) {
if (bufferUpto > 0)
// Recycle all but the first buffer
docWriter.recycleIntBlocks(buffers, 1, 1+bufferUpto);
// Reuse first buffer
bufferUpto = 0;
intUpto = 0;
intOffset = 0;
buffer = buffers[0];
}
}
public void nextBuffer() {
if (1+bufferUpto == buffers.length) {
int[][] newBuffers = new int[(int) (buffers.length*1.5)][];
System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
buffers = newBuffers;
}
buffer = buffers[1+bufferUpto] = docWriter.getIntBlock(trackAllocations);
bufferUpto++;
intUpto = 0;
intOffset += DocumentsWriter.INT_BLOCK_SIZE;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/IntBlockPool.java | Java | art | 2,207 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* NOTE: this API is experimental and will likely change
*/
abstract class FormatPostingsDocsConsumer {
/** Adds a new doc in this term. If this returns null
* then we just skip consuming positions/payloads. */
abstract FormatPostingsPositionsConsumer addDoc(int docID, int termDocFreq) throws IOException;
/** Called when we are done adding docs to this term */
abstract void finish() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsDocsConsumer.java | Java | art | 1,296 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
final class SegmentMergeInfo {
Term term;
int base;
int ord; // the position of the segment in a MultiReader
TermEnum termEnum;
IndexReader reader;
int delCount;
private TermPositions postings; // use getPositions()
private int[] docMap; // use getDocMap()
SegmentMergeInfo(int b, TermEnum te, IndexReader r)
throws IOException {
base = b;
reader = r;
termEnum = te;
term = te.term();
}
// maps around deleted docs
int[] getDocMap() {
if (docMap == null) {
delCount = 0;
// build array which maps document numbers around deletions
if (reader.hasDeletions()) {
int maxDoc = reader.maxDoc();
docMap = new int[maxDoc];
int j = 0;
for (int i = 0; i < maxDoc; i++) {
if (reader.isDeleted(i)) {
delCount++;
docMap[i] = -1;
} else
docMap[i] = j++;
}
}
}
return docMap;
}
TermPositions getPositions() throws IOException {
if (postings == null) {
postings = reader.termPositions();
}
return postings;
}
final boolean next() throws IOException {
if (termEnum.next()) {
term = termEnum.term();
return true;
} else {
term = null;
return false;
}
}
final void close() throws IOException {
termEnum.close();
if (postings != null) {
postings.close();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentMergeInfo.java | Java | art | 2,275 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.document.Fieldable;
/**
* Holds all per thread, per field state.
*/
final class DocFieldProcessorPerField {
final DocFieldConsumerPerField consumer;
final FieldInfo fieldInfo;
DocFieldProcessorPerField next;
int lastGen = -1;
int fieldCount;
Fieldable[] fields = new Fieldable[1];
public DocFieldProcessorPerField(final DocFieldProcessorPerThread perThread, final FieldInfo fieldInfo) {
this.consumer = perThread.consumer.addField(fieldInfo);
this.fieldInfo = fieldInfo;
}
public void abort() {
consumer.abort();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldProcessorPerField.java | Java | art | 1,421 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.HashMap;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.HashSet;
import java.io.IOException;
import org.apache.lucene.util.ArrayUtil;
/** This is just a "splitter" class: it lets you wrap two
* DocFieldConsumer instances as a single consumer. */
final class DocFieldConsumers extends DocFieldConsumer {
final DocFieldConsumer one;
final DocFieldConsumer two;
public DocFieldConsumers(DocFieldConsumer one, DocFieldConsumer two) {
this.one = one;
this.two = two;
}
@Override
void setFieldInfos(FieldInfos fieldInfos) {
super.setFieldInfos(fieldInfos);
one.setFieldInfos(fieldInfos);
two.setFieldInfos(fieldInfos);
}
@Override
public void flush(Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException {
Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> oneThreadsAndFields = new HashMap<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>>();
Map<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> twoThreadsAndFields = new HashMap<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>>();
for (Map.Entry<DocFieldConsumerPerThread,Collection<DocFieldConsumerPerField>> entry : threadsAndFields.entrySet()) {
final DocFieldConsumersPerThread perThread = (DocFieldConsumersPerThread) entry.getKey();
final Collection<DocFieldConsumerPerField> fields = entry.getValue();
Iterator<DocFieldConsumerPerField> fieldsIt = fields.iterator();
Collection<DocFieldConsumerPerField> oneFields = new HashSet<DocFieldConsumerPerField>();
Collection<DocFieldConsumerPerField> twoFields = new HashSet<DocFieldConsumerPerField>();
while(fieldsIt.hasNext()) {
DocFieldConsumersPerField perField = (DocFieldConsumersPerField) fieldsIt.next();
oneFields.add(perField.one);
twoFields.add(perField.two);
}
oneThreadsAndFields.put(perThread.one, oneFields);
twoThreadsAndFields.put(perThread.two, twoFields);
}
one.flush(oneThreadsAndFields, state);
two.flush(twoThreadsAndFields, state);
}
@Override
public void closeDocStore(SegmentWriteState state) throws IOException {
try {
one.closeDocStore(state);
} finally {
two.closeDocStore(state);
}
}
@Override
public void abort() {
try {
one.abort();
} finally {
two.abort();
}
}
@Override
public boolean freeRAM() {
boolean any = one.freeRAM();
any |= two.freeRAM();
return any;
}
@Override
public DocFieldConsumerPerThread addThread(DocFieldProcessorPerThread docFieldProcessorPerThread) throws IOException {
return new DocFieldConsumersPerThread(docFieldProcessorPerThread, this, one.addThread(docFieldProcessorPerThread), two.addThread(docFieldProcessorPerThread));
}
PerDoc[] docFreeList = new PerDoc[1];
int freeCount;
int allocCount;
synchronized PerDoc getPerDoc() {
if (freeCount == 0) {
allocCount++;
if (allocCount > docFreeList.length) {
// Grow our free list up front to make sure we have
// enough space to recycle all outstanding PerDoc
// instances
assert allocCount == 1+docFreeList.length;
docFreeList = new PerDoc[ArrayUtil.getNextSize(allocCount)];
}
return new PerDoc();
} else
return docFreeList[--freeCount];
}
synchronized void freePerDoc(PerDoc perDoc) {
assert freeCount < docFreeList.length;
docFreeList[freeCount++] = perDoc;
}
class PerDoc extends DocumentsWriter.DocWriter {
DocumentsWriter.DocWriter one;
DocumentsWriter.DocWriter two;
@Override
public long sizeInBytes() {
return one.sizeInBytes() + two.sizeInBytes();
}
@Override
public void finish() throws IOException {
try {
try {
one.finish();
} finally {
two.finish();
}
} finally {
freePerDoc(this);
}
}
@Override
public void abort() {
try {
try {
one.abort();
} finally {
two.abort();
}
} finally {
freePerDoc(this);
}
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldConsumers.java | Java | art | 5,126 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import java.io.IOException;
/* IndexInput that knows how to read the byte slices written
* by Posting and PostingVector. We read the bytes in
* each slice until we hit the end of that slice at which
* point we read the forwarding address of the next slice
* and then jump to it.*/
final class ByteSliceReader extends IndexInput {
ByteBlockPool pool;
int bufferUpto;
byte[] buffer;
public int upto;
int limit;
int level;
public int bufferOffset;
public int endIndex;
public void init(ByteBlockPool pool, int startIndex, int endIndex) {
assert endIndex-startIndex >= 0;
assert startIndex >= 0;
assert endIndex >= 0;
this.pool = pool;
this.endIndex = endIndex;
level = 0;
bufferUpto = startIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
buffer = pool.buffers[bufferUpto];
upto = startIndex & DocumentsWriter.BYTE_BLOCK_MASK;
final int firstSize = ByteBlockPool.levelSizeArray[0];
if (startIndex+firstSize >= endIndex) {
// There is only this one slice to read
limit = endIndex & DocumentsWriter.BYTE_BLOCK_MASK;
} else
limit = upto+firstSize-4;
}
public boolean eof() {
assert upto + bufferOffset <= endIndex;
return upto + bufferOffset == endIndex;
}
@Override
public byte readByte() {
assert !eof();
assert upto <= limit;
if (upto == limit)
nextSlice();
return buffer[upto++];
}
public long writeTo(IndexOutput out) throws IOException {
long size = 0;
while(true) {
if (limit + bufferOffset == endIndex) {
assert endIndex - bufferOffset >= upto;
out.writeBytes(buffer, upto, limit-upto);
size += limit-upto;
break;
} else {
out.writeBytes(buffer, upto, limit-upto);
size += limit-upto;
nextSlice();
}
}
return size;
}
public void nextSlice() {
// Skip to our next slice
final int nextIndex = ((buffer[limit]&0xff)<<24) + ((buffer[1+limit]&0xff)<<16) + ((buffer[2+limit]&0xff)<<8) + (buffer[3+limit]&0xff);
level = ByteBlockPool.nextLevelArray[level];
final int newSize = ByteBlockPool.levelSizeArray[level];
bufferUpto = nextIndex / DocumentsWriter.BYTE_BLOCK_SIZE;
bufferOffset = bufferUpto * DocumentsWriter.BYTE_BLOCK_SIZE;
buffer = pool.buffers[bufferUpto];
upto = nextIndex & DocumentsWriter.BYTE_BLOCK_MASK;
if (nextIndex + newSize >= endIndex) {
// We are advancing to the final slice
assert endIndex - nextIndex > 0;
limit = endIndex - bufferOffset;
} else {
// This is not the final slice (subtract 4 for the
// forwarding address at the end of this new slice)
limit = upto+newSize-4;
}
}
@Override
public void readBytes(byte[] b, int offset, int len) {
while(len > 0) {
final int numLeft = limit-upto;
if (numLeft < len) {
// Read entire slice
System.arraycopy(buffer, upto, b, offset, numLeft);
offset += numLeft;
len -= numLeft;
nextSlice();
} else {
// This slice is the last one
System.arraycopy(buffer, upto, b, offset, len);
upto += len;
break;
}
}
}
@Override
public long getFilePointer() {throw new RuntimeException("not implemented");}
@Override
public long length() {throw new RuntimeException("not implemented");}
@Override
public void seek(long pos) {throw new RuntimeException("not implemented");}
@Override
public void close() {throw new RuntimeException("not implemented");}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ByteSliceReader.java | Java | art | 4,541 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Arrays;
import org.apache.lucene.store.IndexOutput;
/**
* Implements the skip list writer for the default posting list format
* that stores positions and payloads.
*
*/
class DefaultSkipListWriter extends MultiLevelSkipListWriter {
private int[] lastSkipDoc;
private int[] lastSkipPayloadLength;
private long[] lastSkipFreqPointer;
private long[] lastSkipProxPointer;
private IndexOutput freqOutput;
private IndexOutput proxOutput;
private int curDoc;
private boolean curStorePayloads;
private int curPayloadLength;
private long curFreqPointer;
private long curProxPointer;
DefaultSkipListWriter(int skipInterval, int numberOfSkipLevels, int docCount, IndexOutput freqOutput, IndexOutput proxOutput) {
super(skipInterval, numberOfSkipLevels, docCount);
this.freqOutput = freqOutput;
this.proxOutput = proxOutput;
lastSkipDoc = new int[numberOfSkipLevels];
lastSkipPayloadLength = new int[numberOfSkipLevels];
lastSkipFreqPointer = new long[numberOfSkipLevels];
lastSkipProxPointer = new long[numberOfSkipLevels];
}
void setFreqOutput(IndexOutput freqOutput) {
this.freqOutput = freqOutput;
}
void setProxOutput(IndexOutput proxOutput) {
this.proxOutput = proxOutput;
}
/**
* Sets the values for the current skip data.
*/
void setSkipData(int doc, boolean storePayloads, int payloadLength) {
this.curDoc = doc;
this.curStorePayloads = storePayloads;
this.curPayloadLength = payloadLength;
this.curFreqPointer = freqOutput.getFilePointer();
if (proxOutput != null)
this.curProxPointer = proxOutput.getFilePointer();
}
@Override
protected void resetSkip() {
super.resetSkip();
Arrays.fill(lastSkipDoc, 0);
Arrays.fill(lastSkipPayloadLength, -1); // we don't have to write the first length in the skip list
Arrays.fill(lastSkipFreqPointer, freqOutput.getFilePointer());
if (proxOutput != null)
Arrays.fill(lastSkipProxPointer, proxOutput.getFilePointer());
}
@Override
protected void writeSkipData(int level, IndexOutput skipBuffer) throws IOException {
// To efficiently store payloads in the posting lists we do not store the length of
// every payload. Instead we omit the length for a payload if the previous payload had
// the same length.
// However, in order to support skipping the payload length at every skip point must be known.
// So we use the same length encoding that we use for the posting lists for the skip data as well:
// Case 1: current field does not store payloads
// SkipDatum --> DocSkip, FreqSkip, ProxSkip
// DocSkip,FreqSkip,ProxSkip --> VInt
// DocSkip records the document number before every SkipInterval th document in TermFreqs.
// Document numbers are represented as differences from the previous value in the sequence.
// Case 2: current field stores payloads
// SkipDatum --> DocSkip, PayloadLength?, FreqSkip,ProxSkip
// DocSkip,FreqSkip,ProxSkip --> VInt
// PayloadLength --> VInt
// In this case DocSkip/2 is the difference between
// the current and the previous value. If DocSkip
// is odd, then a PayloadLength encoded as VInt follows,
// if DocSkip is even, then it is assumed that the
// current payload length equals the length at the previous
// skip point
if (curStorePayloads) {
int delta = curDoc - lastSkipDoc[level];
if (curPayloadLength == lastSkipPayloadLength[level]) {
// the current payload length equals the length at the previous skip point,
// so we don't store the length again
skipBuffer.writeVInt(delta * 2);
} else {
// the payload length is different from the previous one. We shift the DocSkip,
// set the lowest bit and store the current payload length as VInt.
skipBuffer.writeVInt(delta * 2 + 1);
skipBuffer.writeVInt(curPayloadLength);
lastSkipPayloadLength[level] = curPayloadLength;
}
} else {
// current field does not store payloads
skipBuffer.writeVInt(curDoc - lastSkipDoc[level]);
}
skipBuffer.writeVInt((int) (curFreqPointer - lastSkipFreqPointer[level]));
skipBuffer.writeVInt((int) (curProxPointer - lastSkipProxPointer[level]));
lastSkipDoc[level] = curDoc;
//System.out.println("write doc at level " + level + ": " + curDoc);
lastSkipFreqPointer[level] = curFreqPointer;
lastSkipProxPointer[level] = curProxPointer;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DefaultSkipListWriter.java | Java | art | 5,548 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** Used by DocumentsWriter to maintain per-thread state.
* We keep a separate Posting hash and other state for each
* thread and then merge postings hashes from all threads
* when writing the segment. */
final class DocumentsWriterThreadState {
boolean isIdle = true; // false if this is currently in use by a thread
int numThreads = 1; // Number of threads that share this instance
boolean doFlushAfter; // true if we should flush after processing current doc
final DocConsumerPerThread consumer;
final DocumentsWriter.DocState docState;
final DocumentsWriter docWriter;
public DocumentsWriterThreadState(DocumentsWriter docWriter) throws IOException {
this.docWriter = docWriter;
docState = new DocumentsWriter.DocState();
docState.maxFieldLength = docWriter.maxFieldLength;
docState.infoStream = docWriter.infoStream;
docState.similarity = docWriter.similarity;
docState.docWriter = docWriter;
consumer = docWriter.consumer.addThread(this);
}
void doAfterFlush() {
numThreads = 0;
doFlushAfter = false;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocumentsWriterThreadState.java | Java | art | 2,023 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
final class FormatPostingsTermsWriter extends FormatPostingsTermsConsumer {
final FormatPostingsFieldsWriter parent;
final FormatPostingsDocsWriter docsWriter;
final TermInfosWriter termsOut;
FieldInfo fieldInfo;
FormatPostingsTermsWriter(SegmentWriteState state, FormatPostingsFieldsWriter parent) throws IOException {
super();
this.parent = parent;
termsOut = parent.termsOut;
docsWriter = new FormatPostingsDocsWriter(state, this);
}
void setField(FieldInfo fieldInfo) {
this.fieldInfo = fieldInfo;
docsWriter.setField(fieldInfo);
}
char[] currentTerm;
int currentTermStart;
long freqStart;
long proxStart;
/** Adds a new term in this field */
@Override
FormatPostingsDocsConsumer addTerm(char[] text, int start) {
currentTerm = text;
currentTermStart = start;
// TODO: this is abstraction violation -- ideally this
// terms writer is not so "invasive", looking for file
// pointers in its child consumers.
freqStart = docsWriter.out.getFilePointer();
if (docsWriter.posWriter.out != null)
proxStart = docsWriter.posWriter.out.getFilePointer();
parent.skipListWriter.resetSkip();
return docsWriter;
}
/** Called when we are done adding terms to this field */
@Override
void finish() {
}
void close() throws IOException {
docsWriter.close();
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsTermsWriter.java | Java | art | 2,240 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.UnicodeUtil;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.ArrayList;
import java.util.List;
final class FreqProxTermsWriter extends TermsHashConsumer {
@Override
public TermsHashConsumerPerThread addThread(TermsHashPerThread perThread) {
return new FreqProxTermsWriterPerThread(perThread);
}
@Override
void createPostings(RawPostingList[] postings, int start, int count) {
final int end = start + count;
for(int i=start;i<end;i++)
postings[i] = new PostingList();
}
private static int compareText(final char[] text1, int pos1, final char[] text2, int pos2) {
while(true) {
final char c1 = text1[pos1++];
final char c2 = text2[pos2++];
if (c1 != c2) {
if (0xffff == c2)
return 1;
else if (0xffff == c1)
return -1;
else
return c1-c2;
} else if (0xffff == c1)
return 0;
}
}
@Override
void closeDocStore(SegmentWriteState state) {}
@Override
void abort() {}
// TODO: would be nice to factor out more of this, eg the
// FreqProxFieldMergeState, and code to visit all Fields
// under the same FieldInfo together, up into TermsHash*.
// Other writers would presumably share alot of this...
@Override
public void flush(Map<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> threadsAndFields, final SegmentWriteState state) throws IOException {
// Gather all FieldData's that have postings, across all
// ThreadStates
List<FreqProxTermsWriterPerField> allFields = new ArrayList<FreqProxTermsWriterPerField>();
for (Map.Entry<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> entry : threadsAndFields.entrySet()) {
Collection<TermsHashConsumerPerField> fields = entry.getValue();
for (final TermsHashConsumerPerField i : fields) {
final FreqProxTermsWriterPerField perField = (FreqProxTermsWriterPerField) i;
if (perField.termsHashPerField.numPostings > 0)
allFields.add(perField);
}
}
// Sort by field name
Collections.sort(allFields);
final int numAllFields = allFields.size();
// TODO: allow Lucene user to customize this consumer:
final FormatPostingsFieldsConsumer consumer = new FormatPostingsFieldsWriter(state, fieldInfos);
/*
Current writer chain:
FormatPostingsFieldsConsumer
-> IMPL: FormatPostingsFieldsWriter
-> FormatPostingsTermsConsumer
-> IMPL: FormatPostingsTermsWriter
-> FormatPostingsDocConsumer
-> IMPL: FormatPostingsDocWriter
-> FormatPostingsPositionsConsumer
-> IMPL: FormatPostingsPositionsWriter
*/
int start = 0;
while(start < numAllFields) {
final FieldInfo fieldInfo = allFields.get(start).fieldInfo;
final String fieldName = fieldInfo.name;
int end = start+1;
while(end < numAllFields && allFields.get(end).fieldInfo.name.equals(fieldName))
end++;
FreqProxTermsWriterPerField[] fields = new FreqProxTermsWriterPerField[end-start];
for(int i=start;i<end;i++) {
fields[i-start] = allFields.get(i);
// Aggregate the storePayload as seen by the same
// field across multiple threads
fieldInfo.storePayloads |= fields[i-start].hasPayloads;
}
// If this field has postings then add them to the
// segment
appendPostings(fields, consumer);
for(int i=0;i<fields.length;i++) {
TermsHashPerField perField = fields[i].termsHashPerField;
int numPostings = perField.numPostings;
perField.reset();
perField.shrinkHash(numPostings);
fields[i].reset();
}
start = end;
}
for (Map.Entry<TermsHashConsumerPerThread,Collection<TermsHashConsumerPerField>> entry : threadsAndFields.entrySet()) {
FreqProxTermsWriterPerThread perThread = (FreqProxTermsWriterPerThread) entry.getKey();
perThread.termsHashPerThread.reset(true);
}
consumer.finish();
}
private byte[] payloadBuffer;
/* Walk through all unique text tokens (Posting
* instances) found in this field and serialize them
* into a single RAM segment. */
void appendPostings(FreqProxTermsWriterPerField[] fields,
FormatPostingsFieldsConsumer consumer)
throws CorruptIndexException, IOException {
int numFields = fields.length;
final FreqProxFieldMergeState[] mergeStates = new FreqProxFieldMergeState[numFields];
for(int i=0;i<numFields;i++) {
FreqProxFieldMergeState fms = mergeStates[i] = new FreqProxFieldMergeState(fields[i]);
assert fms.field.fieldInfo == fields[0].fieldInfo;
// Should always be true
boolean result = fms.nextTerm();
assert result;
}
final FormatPostingsTermsConsumer termsConsumer = consumer.addField(fields[0].fieldInfo);
FreqProxFieldMergeState[] termStates = new FreqProxFieldMergeState[numFields];
final boolean currentFieldOmitTermFreqAndPositions = fields[0].fieldInfo.omitTermFreqAndPositions;
while(numFields > 0) {
// Get the next term to merge
termStates[0] = mergeStates[0];
int numToMerge = 1;
for(int i=1;i<numFields;i++) {
final char[] text = mergeStates[i].text;
final int textOffset = mergeStates[i].textOffset;
final int cmp = compareText(text, textOffset, termStates[0].text, termStates[0].textOffset);
if (cmp < 0) {
termStates[0] = mergeStates[i];
numToMerge = 1;
} else if (cmp == 0)
termStates[numToMerge++] = mergeStates[i];
}
final FormatPostingsDocsConsumer docConsumer = termsConsumer.addTerm(termStates[0].text, termStates[0].textOffset);
// Now termStates has numToMerge FieldMergeStates
// which all share the same term. Now we must
// interleave the docID streams.
while(numToMerge > 0) {
FreqProxFieldMergeState minState = termStates[0];
for(int i=1;i<numToMerge;i++)
if (termStates[i].docID < minState.docID)
minState = termStates[i];
final int termDocFreq = minState.termFreq;
final FormatPostingsPositionsConsumer posConsumer = docConsumer.addDoc(minState.docID, termDocFreq);
final ByteSliceReader prox = minState.prox;
// Carefully copy over the prox + payload info,
// changing the format to match Lucene's segment
// format.
if (!currentFieldOmitTermFreqAndPositions) {
// omitTermFreqAndPositions == false so we do write positions &
// payload
int position = 0;
for(int j=0;j<termDocFreq;j++) {
final int code = prox.readVInt();
position += code >> 1;
final int payloadLength;
if ((code & 1) != 0) {
// This position has a payload
payloadLength = prox.readVInt();
if (payloadBuffer == null || payloadBuffer.length < payloadLength)
payloadBuffer = new byte[payloadLength];
prox.readBytes(payloadBuffer, 0, payloadLength);
} else
payloadLength = 0;
posConsumer.addPosition(position, payloadBuffer, 0, payloadLength);
} //End for
posConsumer.finish();
}
if (!minState.nextDoc()) {
// Remove from termStates
int upto = 0;
for(int i=0;i<numToMerge;i++)
if (termStates[i] != minState)
termStates[upto++] = termStates[i];
numToMerge--;
assert upto == numToMerge;
// Advance this state to the next term
if (!minState.nextTerm()) {
// OK, no more terms, so remove from mergeStates
// as well
upto = 0;
for(int i=0;i<numFields;i++)
if (mergeStates[i] != minState)
mergeStates[upto++] = mergeStates[i];
numFields--;
assert upto == numFields;
}
}
}
docConsumer.finish();
}
termsConsumer.finish();
}
final UnicodeUtil.UTF8Result termsUTF8 = new UnicodeUtil.UTF8Result();
static final class PostingList extends RawPostingList {
int docFreq; // # times this term occurs in the current doc
int lastDocID; // Last docID where this term occurred
int lastDocCode; // Code for prior doc
int lastPosition; // Last position where this term occurred
}
@Override
int bytesPerPosting() {
return RawPostingList.BYTES_SIZE + 4 * DocumentsWriter.INT_NUM_BYTE;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FreqProxTermsWriter.java | Java | art | 9,722 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.search.Similarity;
/** Taps into DocInverter, as an InvertedDocEndConsumer,
* which is called at the end of inverting each field. We
* just look at the length for the field (docState.length)
* and record the norm. */
final class NormsWriterPerField extends InvertedDocEndConsumerPerField implements Comparable<NormsWriterPerField> {
final NormsWriterPerThread perThread;
final FieldInfo fieldInfo;
final DocumentsWriter.DocState docState;
// Holds all docID/norm pairs we've seen
int[] docIDs = new int[1];
byte[] norms = new byte[1];
int upto;
final FieldInvertState fieldState;
public void reset() {
// Shrink back if we are overallocated now:
docIDs = ArrayUtil.shrink(docIDs, upto);
norms = ArrayUtil.shrink(norms, upto);
upto = 0;
}
public NormsWriterPerField(final DocInverterPerField docInverterPerField, final NormsWriterPerThread perThread, final FieldInfo fieldInfo) {
this.perThread = perThread;
this.fieldInfo = fieldInfo;
docState = perThread.docState;
fieldState = docInverterPerField.fieldState;
}
@Override
void abort() {
upto = 0;
}
public int compareTo(NormsWriterPerField other) {
return fieldInfo.name.compareTo(other.fieldInfo.name);
}
@Override
void finish() {
assert docIDs.length == norms.length;
if (fieldInfo.isIndexed && !fieldInfo.omitNorms) {
if (docIDs.length <= upto) {
assert docIDs.length == upto;
docIDs = ArrayUtil.grow(docIDs, 1+upto);
norms = ArrayUtil.grow(norms, 1+upto);
}
final float norm = docState.similarity.computeNorm(fieldInfo.name, fieldState);
norms[upto] = Similarity.encodeNorm(norm);
docIDs[upto] = docState.docID;
upto++;
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/NormsWriterPerField.java | Java | art | 2,656 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.UnicodeUtil;
final class TermBuffer implements Cloneable {
private String field;
private Term term; // cached
private boolean preUTF8Strings; // true if strings are stored in modified UTF8 encoding (LUCENE-510)
private boolean dirty; // true if text was set externally (ie not read via UTF8 bytes)
private UnicodeUtil.UTF16Result text = new UnicodeUtil.UTF16Result();
private UnicodeUtil.UTF8Result bytes = new UnicodeUtil.UTF8Result();
public final int compareTo(TermBuffer other) {
if (field == other.field) // fields are interned
return compareChars(text.result, text.length, other.text.result, other.text.length);
else
return field.compareTo(other.field);
}
private static final int compareChars(char[] chars1, int len1,
char[] chars2, int len2) {
final int end = len1 < len2 ? len1:len2;
for (int k = 0; k < end; k++) {
char c1 = chars1[k];
char c2 = chars2[k];
if (c1 != c2) {
return c1 - c2;
}
}
return len1 - len2;
}
/** Call this if the IndexInput passed to {@link #read}
* stores terms in the "modified UTF8" (pre LUCENE-510)
* format. */
void setPreUTF8Strings() {
preUTF8Strings = true;
}
public final void read(IndexInput input, FieldInfos fieldInfos)
throws IOException {
this.term = null; // invalidate cache
int start = input.readVInt();
int length = input.readVInt();
int totalLength = start + length;
if (preUTF8Strings) {
text.setLength(totalLength);
input.readChars(text.result, start, length);
} else {
if (dirty) {
// Fully convert all bytes since bytes is dirty
UnicodeUtil.UTF16toUTF8(text.result, 0, text.length, bytes);
bytes.setLength(totalLength);
input.readBytes(bytes.result, start, length);
UnicodeUtil.UTF8toUTF16(bytes.result, 0, totalLength, text);
dirty = false;
} else {
// Incrementally convert only the UTF8 bytes that are new:
bytes.setLength(totalLength);
input.readBytes(bytes.result, start, length);
UnicodeUtil.UTF8toUTF16(bytes.result, start, length, text);
}
}
this.field = fieldInfos.fieldName(input.readVInt());
}
public final void set(Term term) {
if (term == null) {
reset();
return;
}
final String termText = term.text();
final int termLen = termText.length();
text.setLength(termLen);
termText.getChars(0, termLen, text.result, 0);
dirty = true;
field = term.field();
this.term = term;
}
public final void set(TermBuffer other) {
text.copyText(other.text);
dirty = true;
field = other.field;
term = other.term;
}
public void reset() {
field = null;
text.setLength(0);
term = null;
dirty = true;
}
public Term toTerm() {
if (field == null) // unset
return null;
if (term == null)
term = new Term(field, new String(text.result, 0, text.length), false);
return term;
}
@Override
protected Object clone() {
TermBuffer clone = null;
try {
clone = (TermBuffer)super.clone();
} catch (CloneNotSupportedException e) {}
clone.dirty = true;
clone.bytes = new UnicodeUtil.UTF8Result();
clone.text = new UnicodeUtil.UTF16Result();
clone.text.copyText(text);
return clone;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermBuffer.java | Java | art | 4,453 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.HashMap;
import java.util.Map;
import java.util.List;
import java.util.ArrayList;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.search.Similarity;
// TODO FI: norms could actually be stored as doc store
/** Writes norms. Each thread X field accumulates the norms
* for the doc/fields it saw, then the flush method below
* merges all of these together into a single _X.nrm file.
*/
final class NormsWriter extends InvertedDocEndConsumer {
private static final byte defaultNorm = Similarity.encodeNorm(1.0f);
private FieldInfos fieldInfos;
@Override
public InvertedDocEndConsumerPerThread addThread(DocInverterPerThread docInverterPerThread) {
return new NormsWriterPerThread(docInverterPerThread, this);
}
@Override
public void abort() {}
// We only write the _X.nrm file at flush
void files(Collection<String> files) {}
@Override
void setFieldInfos(FieldInfos fieldInfos) {
this.fieldInfos = fieldInfos;
}
/** Produce _X.nrm if any document had a field with norms
* not disabled */
@Override
public void flush(Map<InvertedDocEndConsumerPerThread,Collection<InvertedDocEndConsumerPerField>> threadsAndFields, SegmentWriteState state) throws IOException {
final Map<FieldInfo,List<NormsWriterPerField>> byField = new HashMap<FieldInfo,List<NormsWriterPerField>>();
// Typically, each thread will have encountered the same
// field. So first we collate by field, ie, all
// per-thread field instances that correspond to the
// same FieldInfo
for (final Map.Entry<InvertedDocEndConsumerPerThread,Collection<InvertedDocEndConsumerPerField>> entry : threadsAndFields.entrySet()) {
final Collection<InvertedDocEndConsumerPerField> fields = entry.getValue();
final Iterator<InvertedDocEndConsumerPerField> fieldsIt = fields.iterator();
while (fieldsIt.hasNext()) {
final NormsWriterPerField perField = (NormsWriterPerField) fieldsIt.next();
if (perField.upto > 0) {
// It has some norms
List<NormsWriterPerField> l = byField.get(perField.fieldInfo);
if (l == null) {
l = new ArrayList<NormsWriterPerField>();
byField.put(perField.fieldInfo, l);
}
l.add(perField);
} else
// Remove this field since we haven't seen it
// since the previous flush
fieldsIt.remove();
}
}
final String normsFileName = state.segmentName + "." + IndexFileNames.NORMS_EXTENSION;
state.flushedFiles.add(normsFileName);
IndexOutput normsOut = state.directory.createOutput(normsFileName);
try {
normsOut.writeBytes(SegmentMerger.NORMS_HEADER, 0, SegmentMerger.NORMS_HEADER.length);
final int numField = fieldInfos.size();
int normCount = 0;
for(int fieldNumber=0;fieldNumber<numField;fieldNumber++) {
final FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber);
List<NormsWriterPerField> toMerge = byField.get(fieldInfo);
int upto = 0;
if (toMerge != null) {
final int numFields = toMerge.size();
normCount++;
final NormsWriterPerField[] fields = new NormsWriterPerField[numFields];
int[] uptos = new int[numFields];
for(int j=0;j<numFields;j++)
fields[j] = toMerge.get(j);
int numLeft = numFields;
while(numLeft > 0) {
assert uptos[0] < fields[0].docIDs.length : " uptos[0]=" + uptos[0] + " len=" + (fields[0].docIDs.length);
int minLoc = 0;
int minDocID = fields[0].docIDs[uptos[0]];
for(int j=1;j<numLeft;j++) {
final int docID = fields[j].docIDs[uptos[j]];
if (docID < minDocID) {
minDocID = docID;
minLoc = j;
}
}
assert minDocID < state.numDocs;
// Fill hole
for(;upto<minDocID;upto++)
normsOut.writeByte(defaultNorm);
normsOut.writeByte(fields[minLoc].norms[uptos[minLoc]]);
(uptos[minLoc])++;
upto++;
if (uptos[minLoc] == fields[minLoc].upto) {
fields[minLoc].reset();
if (minLoc != numLeft-1) {
fields[minLoc] = fields[numLeft-1];
uptos[minLoc] = uptos[numLeft-1];
}
numLeft--;
}
}
// Fill final hole with defaultNorm
for(;upto<state.numDocs;upto++)
normsOut.writeByte(defaultNorm);
} else if (fieldInfo.isIndexed && !fieldInfo.omitNorms) {
normCount++;
// Fill entire field with default norm:
for(;upto<state.numDocs;upto++)
normsOut.writeByte(defaultNorm);
}
assert 4+normCount*state.numDocs == normsOut.getFilePointer() : ".nrm file size mismatch: expected=" + (4+normCount*state.numDocs) + " actual=" + normsOut.getFilePointer();
}
} finally {
normsOut.close();
}
}
@Override
void closeDocStore(SegmentWriteState state) {}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/NormsWriter.java | Java | art | 6,077 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Class to write byte streams into slices of shared
* byte[]. This is used by DocumentsWriter to hold the
* posting list for many terms in RAM.
*/
final class ByteSliceWriter {
private byte[] slice;
private int upto;
private final ByteBlockPool pool;
int offset0;
public ByteSliceWriter(ByteBlockPool pool) {
this.pool = pool;
}
/**
* Set up the writer to write at address.
*/
public void init(int address) {
slice = pool.buffers[address >> DocumentsWriter.BYTE_BLOCK_SHIFT];
assert slice != null;
upto = address & DocumentsWriter.BYTE_BLOCK_MASK;
offset0 = address;
assert upto < slice.length;
}
/** Write byte into byte slice stream */
public void writeByte(byte b) {
assert slice != null;
if (slice[upto] != 0) {
upto = pool.allocSlice(slice, upto);
slice = pool.buffer;
offset0 = pool.byteOffset;
assert slice != null;
}
slice[upto++] = b;
assert upto != slice.length;
}
public void writeBytes(final byte[] b, int offset, final int len) {
final int offsetEnd = offset + len;
while(offset < offsetEnd) {
if (slice[upto] != 0) {
// End marker
upto = pool.allocSlice(slice, upto);
slice = pool.buffer;
offset0 = pool.byteOffset;
}
slice[upto++] = b[offset++];
assert upto != slice.length;
}
}
public int getAddress() {
return upto + (offset0 & DocumentsWriter.BYTE_BLOCK_NOT_MASK);
}
public void writeVInt(int i) {
while ((i & ~0x7F) != 0) {
writeByte((byte)((i & 0x7f) | 0x80));
i >>>= 7;
}
writeByte((byte) i);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ByteSliceWriter.java | Java | art | 2,483 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class ReadOnlySegmentReader extends SegmentReader {
static void noWrite() {
throw new UnsupportedOperationException("This IndexReader cannot make any changes to the index (it was opened with readOnly = true)");
}
@Override
protected void acquireWriteLock() {
noWrite();
}
// Not synchronized
@Override
public boolean isDeleted(int n) {
return deletedDocs != null && deletedDocs.get(n);
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ReadOnlySegmentReader.java | Java | art | 1,263 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* TermPositions provides an interface for enumerating the <document,
* frequency, <position>* > tuples for a term. <p> The document and
* frequency are the same as for a TermDocs. The positions portion lists the ordinal
* positions of each occurrence of a term in a document.
*
* @see IndexReader#termPositions()
*/
public interface TermPositions
extends TermDocs
{
/** Returns next position in the current document. It is an error to call
this more than {@link #freq()} times
without calling {@link #next()}<p> This is
invalid until {@link #next()} is called for
the first time.
*/
int nextPosition() throws IOException;
/**
* Returns the length of the payload at the current term position.
* This is invalid until {@link #nextPosition()} is called for
* the first time.<br>
* @return length of the current payload in number of bytes
*/
int getPayloadLength();
/**
* Returns the payload data at the current term position.
* This is invalid until {@link #nextPosition()} is called for
* the first time.
* This method must not be called more than once after each call
* of {@link #nextPosition()}. However, payloads are loaded lazily,
* so if the payload data for the current position is not needed,
* this method may not be called at all for performance reasons.<br>
*
* @param data the array into which the data of this payload is to be
* stored, if it is big enough; otherwise, a new byte[] array
* is allocated for this purpose.
* @param offset the offset in the array into which the data of this payload
* is to be stored.
* @return a byte[] array containing the data of this payload
* @throws IOException
*/
byte[] getPayload(byte[] data, int offset) throws IOException;
/**
* Checks if a payload can be loaded at this position.
* <p>
* Payloads can only be loaded once per call to
* {@link #nextPosition()}.
*
* @return true if there is a payload available at this position that can be loaded
*/
public boolean isPayloadAvailable();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermPositions.java | Java | art | 3,071 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
class SegmentTermPositionVector extends SegmentTermVector implements TermPositionVector {
protected int[][] positions;
protected TermVectorOffsetInfo[][] offsets;
public static final int[] EMPTY_TERM_POS = new int[0];
public SegmentTermPositionVector(String field, String terms[], int termFreqs[], int[][] positions, TermVectorOffsetInfo[][] offsets) {
super(field, terms, termFreqs);
this.offsets = offsets;
this.positions = positions;
}
/**
* Returns an array of TermVectorOffsetInfo in which the term is found.
*
* @param index The position in the array to get the offsets from
* @return An array of TermVectorOffsetInfo objects or the empty list
* @see org.apache.lucene.analysis.Token
*/
public TermVectorOffsetInfo[] getOffsets(int index) {
TermVectorOffsetInfo[] result = TermVectorOffsetInfo.EMPTY_OFFSET_INFO;
if(offsets == null)
return null;
if (index >=0 && index < offsets.length)
{
result = offsets[index];
}
return result;
}
/**
* Returns an array of positions in which the term is found.
* Terms are identified by the index at which its number appears in the
* term String array obtained from the <code>indexOf</code> method.
*/
public int[] getTermPositions(int index) {
int[] result = EMPTY_TERM_POS;
if(positions == null)
return null;
if (index >=0 && index < positions.length)
{
result = positions[index];
}
return result;
}
} | zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/SegmentTermPositionVector.java | Java | art | 2,331 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Fieldable;
final class DocFieldConsumersPerField extends DocFieldConsumerPerField {
final DocFieldConsumerPerField one;
final DocFieldConsumerPerField two;
final DocFieldConsumersPerThread perThread;
public DocFieldConsumersPerField(DocFieldConsumersPerThread perThread, DocFieldConsumerPerField one, DocFieldConsumerPerField two) {
this.perThread = perThread;
this.one = one;
this.two = two;
}
@Override
public void processFields(Fieldable[] fields, int count) throws IOException {
one.processFields(fields, count);
two.processFields(fields, count);
}
@Override
public void abort() {
try {
one.abort();
} finally {
two.abort();
}
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocFieldConsumersPerField.java | Java | art | 1,606 |
package org.apache.lucene.index;
import java.util.*;
/**
* Copyright 2007 The Apache Software Foundation
* <p/>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p/>
* http://www.apache.org/licenses/LICENSE-2.0
* <p/>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* For each Field, store a sorted collection of {@link TermVectorEntry}s
* <p/>
* This is not thread-safe.
*/
public class FieldSortedTermVectorMapper extends TermVectorMapper{
private Map<String,SortedSet<TermVectorEntry>> fieldToTerms = new HashMap<String,SortedSet<TermVectorEntry>>();
private SortedSet<TermVectorEntry> currentSet;
private String currentField;
private Comparator<TermVectorEntry> comparator;
/**
*
* @param comparator A Comparator for sorting {@link TermVectorEntry}s
*/
public FieldSortedTermVectorMapper(Comparator<TermVectorEntry> comparator) {
this(false, false, comparator);
}
public FieldSortedTermVectorMapper(boolean ignoringPositions, boolean ignoringOffsets, Comparator<TermVectorEntry> comparator) {
super(ignoringPositions, ignoringOffsets);
this.comparator = comparator;
}
@Override
public void map(String term, int frequency, TermVectorOffsetInfo[] offsets, int[] positions) {
TermVectorEntry entry = new TermVectorEntry(currentField, term, frequency, offsets, positions);
currentSet.add(entry);
}
@Override
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
currentSet = new TreeSet<TermVectorEntry>(comparator);
currentField = field;
fieldToTerms.put(field, currentSet);
}
/**
* Get the mapping between fields and terms, sorted by the comparator
*
* @return A map between field names and {@link java.util.SortedSet}s per field. SortedSet entries are {@link TermVectorEntry}
*/
public Map<String,SortedSet<TermVectorEntry>> getFieldToTerms() {
return fieldToTerms;
}
public Comparator<TermVectorEntry> getComparator() {
return comparator;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FieldSortedTermVectorMapper.java | Java | art | 2,431 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.util.ArrayUtil;
/**
* NOTE: this API is experimental and will likely change
*/
abstract class FormatPostingsTermsConsumer {
/** Adds a new term in this field; term ends with U+FFFF
* char */
abstract FormatPostingsDocsConsumer addTerm(char[] text, int start) throws IOException;
char[] termBuffer;
FormatPostingsDocsConsumer addTerm(String text) throws IOException {
final int len = text.length();
if (termBuffer == null || termBuffer.length < 1+len)
termBuffer = new char[ArrayUtil.getNextSize(1+len)];
text.getChars(0, len, termBuffer, 0);
termBuffer[len] = 0xffff;
return addTerm(termBuffer, 0);
}
/** Called when we are done adding terms to this field */
abstract void finish() throws IOException;
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/FormatPostingsTermsConsumer.java | Java | art | 1,647 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Provides access to stored term vector of
* a document field. The vector consists of the name of the field, an array of the terms that occur in the field of the
* {@link org.apache.lucene.document.Document} and a parallel array of frequencies. Thus, getTermFrequencies()[5] corresponds with the
* frequency of getTerms()[5], assuming there are at least 5 terms in the Document.
*/
public interface TermFreqVector {
/**
* The {@link org.apache.lucene.document.Fieldable} name.
* @return The name of the field this vector is associated with.
*
*/
public String getField();
/**
* @return The number of terms in the term vector.
*/
public int size();
/**
* @return An Array of term texts in ascending order.
*/
public String[] getTerms();
/** Array of term frequencies. Locations of the array correspond one to one
* to the terms in the array obtained from <code>getTerms</code>
* method. Each location in the array contains the number of times this
* term occurs in the document or the document field.
*/
public int[] getTermFrequencies();
/** Return an index in the term numbers array returned from
* <code>getTerms</code> at which the term with the specified
* <code>term</code> appears. If this term does not appear in the array,
* return -1.
*/
public int indexOf(String term);
/** Just like <code>indexOf(int)</code> but searches for a number of terms
* at the same time. Returns an array that has the same size as the number
* of terms searched for, each slot containing the result of searching for
* that term number.
*
* @param terms array containing terms to look for
* @param start index in the array where the list of terms starts
* @param len the number of terms in the list
*/
public int[] indexesOf(String[] terms, int start, int len);
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermFreqVector.java | Java | art | 2,724 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/** This is a {@link LogMergePolicy} that measures size of a
* segment as the total byte size of the segment's files. */
public class LogByteSizeMergePolicy extends LogMergePolicy {
/** Default minimum segment size. @see setMinMergeMB */
public static final double DEFAULT_MIN_MERGE_MB = 1.6;
/** Default maximum segment size. A segment of this size
* or larger will never be merged. @see setMaxMergeMB */
public static final double DEFAULT_MAX_MERGE_MB = Long.MAX_VALUE;
public LogByteSizeMergePolicy(IndexWriter writer) {
super(writer);
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
}
@Override
protected long size(SegmentInfo info) throws IOException {
return sizeBytes(info);
}
/** <p>Determines the largest segment (measured by total
* byte size of the segment's files, in MB) that may be
* merged with other segments. Small values (e.g., less
* than 50 MB) are best for interactive indexing, as this
* limits the length of pauses while indexing to a few
* seconds. Larger values are best for batched indexing
* and speedier searches.</p>
*
* <p>Note that {@link #setMaxMergeDocs} is also
* used to check whether a segment is too large for
* merging (it's either or).</p>*/
public void setMaxMergeMB(double mb) {
maxMergeSize = (long) (mb*1024*1024);
}
/** Returns the largest segment (measured by total byte
* size of the segment's files, in MB) that may be merged
* with other segments.
* @see #setMaxMergeMB */
public double getMaxMergeMB() {
return ((double) maxMergeSize)/1024/1024;
}
/** Sets the minimum size for the lowest level segments.
* Any segments below this size are considered to be on
* the same level (even if they vary drastically in size)
* and will be merged whenever there are mergeFactor of
* them. This effectively truncates the "long tail" of
* small segments that would otherwise be created into a
* single level. If you set this too large, it could
* greatly increase the merging cost during indexing (if
* you flush many small segments). */
public void setMinMergeMB(double mb) {
minMergeSize = (long) (mb*1024*1024);
}
/** Get the minimum size for a segment to remain
* un-merged.
* @see #setMinMergeMB **/
public double getMinMergeMB() {
return ((double) minMergeSize)/1024/1024;
}
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/LogByteSizeMergePolicy.java | Java | art | 3,329 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.Reader;
/** Used by DocumentsWriter to implemented a StringReader
* that can be reset to a new string; we use this when
* tokenizing the string value from a Field. */
final class ReusableStringReader extends Reader {
int upto;
int left;
String s;
void init(String s) {
this.s = s;
left = s.length();
this.upto = 0;
}
@Override
public int read(char[] c) {
return read(c, 0, c.length);
}
@Override
public int read(char[] c, int off, int len) {
if (left > len) {
s.getChars(upto, upto+len, c, off);
upto += len;
left -= len;
return len;
} else if (0 == left) {
// don't keep a reference (s could have been very large)
s = null;
return -1;
} else {
s.getChars(upto, upto+left, c, off);
int r = left;
left = 0;
upto = s.length();
return r;
}
}
@Override
public void close() {};
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/ReusableStringReader.java | Java | art | 1,764 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.Collection;
abstract class DocConsumer {
abstract DocConsumerPerThread addThread(DocumentsWriterThreadState perThread) throws IOException;
abstract void flush(final Collection<DocConsumerPerThread> threads, final SegmentWriteState state) throws IOException;
abstract void closeDocStore(final SegmentWriteState state) throws IOException;
abstract void abort();
abstract boolean freeRAM();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/DocConsumer.java | Java | art | 1,284 |
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** Implement this class to plug into the TermsHash
* processor, which inverts & stores Tokens into a hash
* table and provides an API for writing bytes into
* multiple streams for each unique Token. */
import java.io.IOException;
import org.apache.lucene.document.Fieldable;
abstract class TermsHashConsumerPerField {
abstract boolean start(Fieldable[] fields, int count) throws IOException;
abstract void finish() throws IOException;
abstract void skippingLongTerm() throws IOException;
abstract void start(Fieldable field);
abstract void newTerm(RawPostingList p) throws IOException;
abstract void addTerm(RawPostingList p) throws IOException;
abstract int getStreamCount();
}
| zzh-simple-hr | Zlucene/src/java/org/apache/lucene/index/TermsHashConsumerPerField.java | Java | art | 1,541 |