Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
5,700
|
{
@Override
public STATE initialState( Path branch )
{
return initialState;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_Traversal.java
|
5,701
|
@Deprecated
public class Traversal
{
/**
* Creates a new {@link TraversalDescription} with default value for
* everything so that it's OK to call
* {@link TraversalDescription#traverse(org.neo4j.graphdb.Node)} without
* modification. But it isn't a very useful traversal, instead you should
* add rules and behaviors to it before traversing.
*
* @return a new {@link TraversalDescription} with default values.
* @deprecated See {@link org.neo4j.graphdb.GraphDatabaseService#traversalDescription}
*/
@Deprecated
public static TraversalDescription description()
{
return new MonoDirectionalTraversalDescription();
}
/**
* More convenient name than {@link #description()} when using static imports.
* Does the same thing.
*
* @deprecated See {@link org.neo4j.graphdb.GraphDatabaseService#traversalDescription}
*/
@Deprecated
public static TraversalDescription traversal()
{
return new MonoDirectionalTraversalDescription();
}
/**
* @deprecated See {@link org.neo4j.graphdb.GraphDatabaseService#traversalDescription}
*/
@Deprecated
public static TraversalDescription traversal( UniquenessFactory uniqueness )
{
return new MonoDirectionalTraversalDescription().uniqueness( uniqueness );
}
/**
* @deprecated See {@link org.neo4j.graphdb.GraphDatabaseService#traversalDescription}
*/
@Deprecated
public static TraversalDescription traversal( UniquenessFactory uniqueness, Object optionalUniquenessParameter )
{
return new MonoDirectionalTraversalDescription().uniqueness( uniqueness, optionalUniquenessParameter );
}
/**
* @deprecated See {@link org.neo4j.graphdb.GraphDatabaseService#bidirectionalTraversalDescription}
*/
@Deprecated
public static BidirectionalTraversalDescription bidirectionalTraversal()
{
return new BidirectionalTraversalDescriptionImpl();
}
/**
* {@link InitialStateFactory} which always returns the supplied {@code initialState}.
* @param initialState the initial state for a traversal branch.
* @return an {@link InitialStateFactory} which always will return the supplied
* {@code initialState}.
*
* @deprecated because InitialStateFactory is deprecated.
*/
@Deprecated
public static <STATE> InitialStateFactory<STATE> initialState( final STATE initialState )
{
return new InitialStateFactory<STATE>()
{
@Override
public STATE initialState( Path branch )
{
return initialState;
}
};
}
/**
* Creates a new {@link RelationshipExpander} which is set to expand
* relationships with {@code type} and {@code direction}.
*
* @param type the {@link RelationshipType} to expand.
* @param dir the {@link Direction} to expand.
* @return a new {@link RelationshipExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forTypeAndDirection}
*/
@Deprecated
public static Expander expanderForTypes( RelationshipType type,
Direction dir )
{
return StandardExpander.create( type, dir );
}
/**
* Creates a new {@link PathExpander} which is set to expand
* relationships with {@code type} and {@code direction}.
*
* @param type the {@link RelationshipType} to expand.
* @param dir the {@link Direction} to expand.
* @return a new {@link PathExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forTypeAndDirection}
*/
@Deprecated
@SuppressWarnings( "unchecked" )
public static <STATE> PathExpander<STATE> pathExpanderForTypes( RelationshipType type, Direction dir )
{
return StandardExpander.create( type, dir );
}
/**
* Creates a new {@link RelationshipExpander} which is set to expand
* relationships with {@code type} in any direction.
*
* @param type the {@link RelationshipType} to expand.
* @return a new {@link RelationshipExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forType}
*/
@Deprecated
public static Expander expanderForTypes( RelationshipType type )
{
return StandardExpander.create( type, Direction.BOTH );
}
/**
* Creates a new {@link PathExpander} which is set to expand
* relationships with {@code type} in any direction.
*
* @param type the {@link RelationshipType} to expand.
* @return a new {@link PathExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forType}
*/
@Deprecated
@SuppressWarnings( "unchecked" )
public static <STATE> PathExpander<STATE> pathExpanderForTypes( RelationshipType type )
{
return StandardExpander.create( type, Direction.BOTH );
}
/**
* Returns an empty {@link Expander} which, if not modified, will expand
* all relationships when asked to expand a {@link Node}. Criteria
* can be added to narrow the {@link Expansion}.
* @return an empty {@link Expander} which, if not modified, will expand
* all relationship for {@link Node}s.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#allTypesAndDirections}
*/
@Deprecated
public static Expander emptyExpander()
{
return StandardExpander.DEFAULT; // TODO: should this be a PROPER empty?
}
/**
* Returns an empty {@link PathExpander} which, if not modified, will expand
* all relationships when asked to expand a {@link Node}. Criteria
* can be added to narrow the {@link Expansion}.
* @return an empty {@link PathExpander} which, if not modified, will expand
* all relationship for {@link Path}s.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#allTypesAndDirections}
*/
@Deprecated
@SuppressWarnings( "unchecked" )
public static <STATE> PathExpander<STATE> emptyPathExpander()
{
return StandardExpander.DEFAULT; // TODO: should this be a PROPER empty?
}
/**
* Creates a new {@link RelationshipExpander} which is set to expand
* relationships with two different types and directions.
*
* @param type1 a {@link RelationshipType} to expand.
* @param dir1 a {@link Direction} to expand.
* @param type2 another {@link RelationshipType} to expand.
* @param dir2 another {@link Direction} to expand.
* @return a new {@link RelationshipExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forTypesAndDirections}
*/
@Deprecated
public static Expander expanderForTypes( RelationshipType type1,
Direction dir1, RelationshipType type2, Direction dir2 )
{
return StandardExpander.create( type1, dir1, type2, dir2 );
}
/**
* Creates a new {@link PathExpander} which is set to expand
* relationships with two different types and directions.
*
* @param type1 a {@link RelationshipType} to expand.
* @param dir1 a {@link Direction} to expand.
* @param type2 another {@link RelationshipType} to expand.
* @param dir2 another {@link Direction} to expand.
* @return a new {@link PathExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forTypesAndDirections}
*/
@Deprecated
@SuppressWarnings( "unchecked" )
public static <STATE> PathExpander<STATE> pathExpanderForTypes( RelationshipType type1,
Direction dir1, RelationshipType type2, Direction dir2 )
{
return StandardExpander.create( type1, dir1, type2, dir2 );
}
/**
* Creates a new {@link RelationshipExpander} which is set to expand
* relationships with multiple types and directions.
*
* @param type1 a {@link RelationshipType} to expand.
* @param dir1 a {@link Direction} to expand.
* @param type2 another {@link RelationshipType} to expand.
* @param dir2 another {@link Direction} to expand.
* @param more additional pairs or type/direction to expand.
* @return a new {@link RelationshipExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forTypesAndDirections}
*/
@Deprecated
public static Expander expanderForTypes( RelationshipType type1,
Direction dir1, RelationshipType type2, Direction dir2,
Object... more )
{
return StandardExpander.create( type1, dir1, type2, dir2, more );
}
/**
* Creates a new {@link PathExpander} which is set to expand
* relationships with multiple types and directions.
*
* @param type1 a {@link RelationshipType} to expand.
* @param dir1 a {@link Direction} to expand.
* @param type2 another {@link RelationshipType} to expand.
* @param dir2 another {@link Direction} to expand.
* @param more additional pairs or type/direction to expand.
* @return a new {@link PathExpander}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forTypesAndDirections}
*/
@Deprecated
@SuppressWarnings( "unchecked" )
public static <STATE> PathExpander<STATE> pathExpanderForTypes( RelationshipType type1,
Direction dir1, RelationshipType type2, Direction dir2,
Object... more )
{
return StandardExpander.create( type1, dir1, type2, dir2, more );
}
/**
* Returns a {@link RelationshipExpander} which expands relationships
* of all types and directions.
* @return a relationship expander which expands all relationships.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#allTypesAndDirections}
*/
@Deprecated
public static Expander expanderForAllTypes()
{
return expanderForAllTypes( Direction.BOTH );
}
/**
* Returns a {@link RelationshipExpander} which expands relationships
* of all types and directions.
* @return a relationship expander which expands all relationships.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#allTypesAndDirections}
*/
@Deprecated
public static <STATE> PathExpander<STATE> pathExpanderForAllTypes()
{
return pathExpanderForAllTypes( Direction.BOTH );
}
/**
* Returns a {@link RelationshipExpander} which expands relationships
* of all types in the given {@code direction}.
* @return a relationship expander which expands all relationships in
* the given {@code direction}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forDirection}
*/
@Deprecated
public static Expander expanderForAllTypes( Direction direction )
{
return StandardExpander.create( direction );
}
/**
* Returns a {@link PathExpander} which expands relationships
* of all types in the given {@code direction}.
* @return a path expander which expands all relationships in
* the given {@code direction}.
*
* @deprecated See {@link org.neo4j.graphdb.PathExpanders#forDirection}
*/
@Deprecated
@SuppressWarnings( "unchecked" )
public static <STATE> PathExpander<STATE> pathExpanderForAllTypes( Direction direction )
{
return StandardExpander.create( direction );
}
public static Expander expander( PathExpander expander )
{
if ( expander instanceof Expander )
{
return (Expander) expander;
}
return StandardExpander.wrap( expander );
}
/**
* Returns a {@link RelationshipExpander} wrapped as an {@link Expander}.
* @param expander {@link RelationshipExpander} to wrap.
* @return a {@link RelationshipExpander} wrapped as an {@link Expander}.
*/
public static Expander expander( RelationshipExpander expander )
{
if ( expander instanceof Expander )
{
return (Expander) expander;
}
return StandardExpander.wrap( expander );
}
/**
* Returns a "preorder depth first" ordering policy. A depth first selector
* always tries to select positions (from the current position) which are
* deeper than the current position.
*
* @return a {@link BranchOrderingPolicy} for a preorder depth first
* selector.
*
* @deprecated See {@link org.neo4j.graphdb.traversal.BranchOrderingPolicies#PREORDER_DEPTH_FIRST}
*/
@Deprecated
public static BranchOrderingPolicy preorderDepthFirst()
{
return CommonBranchOrdering.PREORDER_DEPTH_FIRST;
}
/**
* Returns a "postorder depth first" ordering policy. A depth first selector
* always tries to select positions (from the current position) which are
* deeper than the current position. A postorder depth first selector
* selects deeper position before the shallower ones.
*
* @return a {@link BranchOrderingPolicy} for a postorder depth first
* selector.
*
* @deprecated See {@link org.neo4j.graphdb.traversal.BranchOrderingPolicies#POSTORDER_DEPTH_FIRST}
*/
@Deprecated
public static BranchOrderingPolicy postorderDepthFirst()
{
return CommonBranchOrdering.POSTORDER_DEPTH_FIRST;
}
/**
* Returns a "preorder breadth first" ordering policy. A breadth first
* selector always selects all positions on the current depth before
* advancing to the next depth.
*
* @return a {@link BranchOrderingPolicy} for a preorder breadth first
* selector.
*
* @deprecated See {@link org.neo4j.graphdb.traversal.BranchOrderingPolicies#PREORDER_BREADTH_FIRST}
*/
@Deprecated
public static BranchOrderingPolicy preorderBreadthFirst()
{
return CommonBranchOrdering.PREORDER_BREADTH_FIRST;
}
/**
* Returns a "postorder breadth first" ordering policy. A breadth first
* selector always selects all positions on the current depth before
* advancing to the next depth. A postorder breadth first selector selects
* the levels in the reversed order, starting with the deepest.
*
* @return a {@link BranchOrderingPolicy} for a postorder breadth first
* selector.
*
* @deprecated See {@link org.neo4j.graphdb.traversal.BranchOrderingPolicies#POSTORDER_BREADTH_FIRST}
*/
@Deprecated
public static BranchOrderingPolicy postorderBreadthFirst()
{
return CommonBranchOrdering.POSTORDER_BREADTH_FIRST;
}
/**
* @deprecated See {@link org.neo4j.graphdb.traversal.SideSelectorPolicies#ALTERNATING}
*/
@Deprecated
public static SideSelectorPolicy alternatingSelectorOrdering()
{
return SideSelectorPolicies.ALTERNATING;
}
/**
* @deprecated See {@link org.neo4j.graphdb.traversal.SideSelectorPolicies#LEVEL}
*/
@Deprecated
public static SideSelectorPolicy levelSelectorOrdering()
{
return SideSelectorPolicies.LEVEL;
}
/**
* @deprecated See {@link org.neo4j.graphdb.traversal.BranchCollisionPolicies#SHORTEST_PATH}
*/
@Deprecated
public static BranchCollisionDetector shortestPathsCollisionDetector( int maxDepth )
{
return new ShortestPathsBranchCollisionDetector( Evaluators.toDepth( maxDepth ) );
}
/**
* Provides hooks to help build a string representation of a {@link Path}.
* @param <T> the type of {@link Path}.
*/
public interface PathDescriptor<T extends Path>
{
/**
* Returns a string representation of a {@link Node}.
* @param path the {@link Path} we're building a string representation
* from.
* @param node the {@link Node} to return a string representation of.
* @return a string representation of a {@link Node}.
*/
String nodeRepresentation( T path, Node node );
/**
* Returns a string representation of a {@link Relationship}.
* @param path the {@link Path} we're building a string representation
* from.
* @param from the previous {@link Node} in the path.
* @param relationship the {@link Relationship} to return a string
* representation of.
* @return a string representation of a {@link Relationship}.
*/
String relationshipRepresentation( T path, Node from,
Relationship relationship );
}
/**
* The default {@link PathDescriptor} used in common toString()
* representations in classes implementing {@link Path}.
* @param <T> the type of {@link Path}.
*/
public static class DefaultPathDescriptor<T extends Path> implements PathDescriptor<T>
{
@Override
public String nodeRepresentation( Path path, Node node )
{
return "(" + node.getId() + ")";
}
@Override
public String relationshipRepresentation( Path path,
Node from, Relationship relationship )
{
String prefix = "--", suffix = "--";
if ( from.equals( relationship.getEndNode() ) )
{
prefix = "<--";
}
else
{
suffix = "-->";
}
return prefix + "[" + relationship.getType().name() + "," +
relationship.getId() + "]" + suffix;
}
}
/**
* Method for building a string representation of a {@link Path}, using
* the given {@code builder}.
* @param <T> the type of {@link Path}.
* @param path the {@link Path} to build a string representation of.
* @param builder the {@link PathDescriptor} to get
* {@link Node} and {@link Relationship} representations from.
* @return a string representation of a {@link Path}.
*/
public static <T extends Path> String pathToString( T path, PathDescriptor<T> builder )
{
Node current = path.startNode();
StringBuilder result = new StringBuilder();
for ( Relationship rel : path.relationships() )
{
result.append( builder.nodeRepresentation( path, current ) );
result.append( builder.relationshipRepresentation( path, current, rel ) );
current = rel.getOtherNode( current );
}
result.append( builder.nodeRepresentation( path, current ) );
return result.toString();
}
/**
* TODO: This method re-binds nodes and relationships. It should not.
*
* Returns the default string representation of a {@link Path}. It uses
* the {@link DefaultPathDescriptor} to get representations.
* @param path the {@link Path} to build a string representation of.
* @return the default string representation of a {@link Path}.
*/
public static String defaultPathToString( Path path )
{
return pathToString( path, new DefaultPathDescriptor<Path>() );
}
/**
* Returns a quite simple string representation of a {@link Path}. It
* doesn't print relationship types or ids, just directions.
* @param path the {@link Path} to build a string representation of.
* @return a quite simple representation of a {@link Path}.
*/
public static String simplePathToString( Path path )
{
return pathToString( path, new DefaultPathDescriptor<Path>()
{
@Override
public String relationshipRepresentation( Path path, Node from,
Relationship relationship )
{
return relationship.getStartNode().equals( from ) ? "-->" : "<--";
}
} );
}
/**
* Returns a quite simple string representation of a {@link Path}. It
* doesn't print relationship types or ids, just directions. it uses the
* {@code nodePropertyKey} to try to display that property value as in the
* node representation instead of the node id. If that property doesn't
* exist, the id is used.
* @param path the {@link Path} to build a string representation of.
* @return a quite simple representation of a {@link Path}.
*/
public static String simplePathToString( Path path, final String nodePropertyKey )
{
return pathToString( path, new DefaultPathDescriptor<Path>()
{
@Override
public String nodeRepresentation( Path path, Node node )
{
return "(" + node.getProperty( nodePropertyKey, node.getId() ) + ")";
}
@Override
public String relationshipRepresentation( Path path, Node from,
Relationship relationship )
{
return relationship.getStartNode().equals( from ) ? "-->" : "<--";
}
} );
}
public static PathDescription path()
{
return new PathDescription();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_Traversal.java
|
5,702
|
public class TransactionLifecycleTest
{
@Rule
public DatabaseRule database = new ImpermanentDatabaseRule();
@Test
public void givenACallToFailATransactionSubsequentSuccessCallsShouldBeSwallowedSilently()
{
GraphDatabaseService graphdb = database.getGraphDatabaseService();
Transaction tx = graphdb.beginTx();
try
{
graphdb.createNode();
tx.failure();
tx.success();
}
finally
{
tx.finish();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TransactionLifecycleTest.java
|
5,703
|
@Deprecated
public class TransactionInterceptorProviders
{
private final Iterable<TransactionInterceptorProvider> providers;
private final DependencyResolver resolver;
private final Config config;
public TransactionInterceptorProviders( Iterable<TransactionInterceptorProvider> providers, DependencyResolver
resolver )
{
this.providers = providers;
this.resolver = resolver;
config = resolver.resolveDependency( Config.class );
}
/**
* A utility method that given some TransactionInterceptorProviders and
* their configuration objects returns a fully resolved chain of
* TransactionInterceptors - the return object is the first interceptor
* in the chain.
*
* @param ds The datasource to instantiate the TransactionInterceptors with
* @return The fist interceptor in the chain, possibly null
*/
public TransactionInterceptor resolveChain( XaDataSource ds )
{
TransactionInterceptor first = null;
for ( TransactionInterceptorProvider provider : providers )
{
String prov = getConfigForInterceptor( provider );
if ( first == null )
{
first = provider.create( ds, prov, resolver );
}
else
{
TransactionInterceptor temp = provider.create( first, ds,
prov, resolver );
if ( temp != null )
{
first = temp;
}
}
}
return first;
}
public boolean shouldInterceptCommitting()
{
return config.get( GraphDatabaseSettings.intercept_committing_transactions ) && providers.iterator().hasNext();
}
public boolean shouldInterceptDeserialized()
{
return config.get( GraphDatabaseSettings.intercept_deserialized_transactions ) && providers.iterator().hasNext();
}
public boolean hasAnyInterceptorConfigured()
{
for ( TransactionInterceptorProvider provider : providers )
if ( getConfigForInterceptor( provider ) != null )
return true;
return false;
}
private String getConfigForInterceptor( TransactionInterceptorProvider provider )
{
String prov = config.getParams().get(
TransactionInterceptorProvider.class.getSimpleName() + "." + provider.name() );
return prov;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_TransactionInterceptorProviders.java
|
5,704
|
public static class HandlerAndState
{
private final TransactionEventHandler handler;
private final Object state;
public HandlerAndState( TransactionEventHandler<?> handler, Object state )
{
this.handler = handler;
this.state = state;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_TransactionEventHandlers.java
|
5,705
|
@Deprecated
class TransactionBuilderImpl implements TransactionBuilder
{
private final InternalAbstractGraphDatabase db;
private final ForceMode forceMode;
TransactionBuilderImpl( InternalAbstractGraphDatabase db, ForceMode forceMode )
{
this.db = db;
this.forceMode = forceMode;
}
@Override
public Transaction begin()
{
return this.db.beginTx( forceMode );
}
@Override
public TransactionBuilder unforced()
{
return new TransactionBuilderImpl( db, ForceMode.unforced );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_TransactionBuilderImpl.java
|
5,706
|
@SuppressWarnings("deprecation"/*getGuard() is deprecated (GraphDatabaseAPI), and used all throughout this test*/)
public class TestGuard
{
@Test( expected = IllegalArgumentException.class )
public void testGuardNotInsertedByDefault()
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().newImpermanentDatabase();
try
{
getGuard( db );
}
finally
{
db.shutdown();
}
}
@Test
public void testGuardInsertedByDefault()
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().
newImpermanentDatabaseBuilder().
setConfig( GraphDatabaseSettings.execution_guard_enabled, Settings.TRUE ).
newGraphDatabase();
assertNotNull( getGuard( db ) );
db.shutdown();
}
@Test
public void testGuardOnDifferentGraphOps()
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().
newImpermanentDatabaseBuilder().
setConfig( GraphDatabaseSettings.execution_guard_enabled, Settings.TRUE ).
newGraphDatabase();
try ( Transaction ignored = db.beginTx() )
{
getGuard( db ).startOperationsCount( MAX_VALUE );
db.createNode();
db.createNode();
db.createNode();
Guard.OperationsCount ops1 = getGuard( db ).stop();
assertEquals( 3, ops1.getOpsCount() );
getGuard( db ).startOperationsCount( MAX_VALUE );
Node n0 = db.getNodeById( 0 );
Node n1 = db.getNodeById( 1 );
db.getNodeById( 2 );
Guard.OperationsCount ops2 = getGuard( db ).stop();
assertEquals( 3, ops2.getOpsCount() );
getGuard( db ).startOperationsCount( MAX_VALUE );
n0.createRelationshipTo( n1, withName( "REL" ));
Guard.OperationsCount ops3 = getGuard( db ).stop();
assertEquals( 2, ops3.getOpsCount() );
getGuard( db ).startOperationsCount( MAX_VALUE );
for ( Path position : Traversal.description().breadthFirst().relationships( withName( "REL" ) ).traverse( n0 ) )
{
ignore( position );
}
Guard.OperationsCount ops4 = getGuard( db ).stop();
assertEquals( 3, ops4.getOpsCount() );
}
db.shutdown();
}
@Test
public void testOpsCountGuardFail()
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().
newImpermanentDatabaseBuilder().
setConfig( GraphDatabaseSettings.execution_guard_enabled, Settings.TRUE ).
newGraphDatabase();
Guard guard = getGuard( db );
guard.startOperationsCount( 2 );
try ( Transaction ignored = db.beginTx() )
{
db.createNode();
db.createNode();
try
{
db.createNode();
fail();
} catch ( GuardOperationsCountException e )
{
// expected
}
}
db.shutdown();
}
@Test
public void testTimeoutGuardFail() throws InterruptedException
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().
newImpermanentDatabaseBuilder().
setConfig( GraphDatabaseSettings.execution_guard_enabled, Settings.TRUE ).
newGraphDatabase();
db.getDependencyResolver().resolveDependency( Guard.class ).startTimeout( 50 );
try ( Transaction ignore = db.beginTx() )
{
sleep( 100 );
try
{
db.createNode();
fail( "Expected guard to stop this" );
}
catch ( GuardTimeoutException e )
{
// expected
}
}
db.shutdown();
}
@Test
public void testTimeoutGuardPass()
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().
newImpermanentDatabaseBuilder().
setConfig( GraphDatabaseSettings.execution_guard_enabled, Settings.TRUE ).
newGraphDatabase();
int timeout = 1000;
getGuard( db ).startTimeout( timeout );
try ( Transaction ignored = db.beginTx() )
{
db.createNode(); // This should not throw
}
db.shutdown();
}
private Guard getGuard( GraphDatabaseAPI db )
{
return db.getDependencyResolver().resolveDependency( Guard.class );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestGuard.java
|
5,707
|
static class TransactionOutcome
{
private boolean success = false;
private boolean failure = false;
public void failed()
{
failure = true;
}
public void success()
{
success = true;
}
public boolean canCommit()
{
return success && !failure;
}
public boolean successCalled()
{
return success;
}
public boolean failureCalled()
{
return failure;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_TopLevelTransaction.java
|
5,708
|
@Deprecated
public class TopLevelTransaction implements Transaction
{
static class TransactionOutcome
{
private boolean success = false;
private boolean failure = false;
public void failed()
{
failure = true;
}
public void success()
{
success = true;
}
public boolean canCommit()
{
return success && !failure;
}
public boolean successCalled()
{
return success;
}
public boolean failureCalled()
{
return failure;
}
}
private final PersistenceManager persistenceManager;
private final AbstractTransactionManager transactionManager;
protected final TransactionOutcome transactionOutcome = new TransactionOutcome();
private final TransactionState state;
public TopLevelTransaction( PersistenceManager persistenceManager, AbstractTransactionManager transactionManager,
TransactionState state )
{
this.persistenceManager = persistenceManager;
this.transactionManager = transactionManager;
this.state = state;
}
@Override
public void failure()
{
transactionOutcome.failed();
markAsRollbackOnly();
}
protected void markAsRollbackOnly()
{
try
{
transactionManager.getTransaction().setRollbackOnly();
}
catch ( Exception e )
{
throw new TransactionFailureException(
"Failed to mark transaction as rollback only.", e );
}
}
@Override
public void success()
{
transactionOutcome.success();
}
@Override
public final void finish()
{
close();
}
@Override
public void close()
{
try
{
javax.transaction.Transaction transaction = transactionManager.getTransaction();
if ( transaction != null )
{
if ( transactionOutcome.canCommit() )
{
transaction.commit();
}
else
{
transaction.rollback();
}
}
}
catch ( RollbackException e )
{
throw new TransactionFailureException( "Unable to commit transaction", e );
}
catch ( Exception e )
{
if ( transactionOutcome.successCalled() )
{
throw new TransactionFailureException( "Unable to commit transaction", e );
}
else
{
throw new TransactionFailureException( "Unable to rollback transaction", e );
}
}
}
@Override
public Lock acquireWriteLock( PropertyContainer entity )
{
persistenceManager.ensureKernelIsEnlisted();
return state.acquireWriteLock( entity );
}
@Override
public Lock acquireReadLock( PropertyContainer entity )
{
persistenceManager.ensureKernelIsEnlisted();
return state.acquireReadLock( entity );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_TopLevelTransaction.java
|
5,709
|
public class TestVersion
{
@Test
public void canGetKernelRevision() throws Exception
{
assertFalse( "Kernel revision not specified", "".equals( Version.getKernelRevision() ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestVersion.java
|
5,710
|
public class TestTraversal
{
private static RelationshipType T1 = withName( "T1" ),
T2 = withName( "T2" ), T3 = withName( "T3" );
@Test
public void canCreateExpanderWithMultipleTypesAndDirections()
{
assertNotNull( Traversal.expanderForTypes( T1, INCOMING, T2,
OUTGOING, T3, BOTH ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestTraversal.java
|
5,711
|
{
@SuppressWarnings( "boxing" )
@Override
public Void beforeCommit( TransactionData data ) throws Exception
{
// TODO Hmm, makes me think... should we really call transaction event handlers
// for these relationship type / property index transasctions?
if ( count( data.createdRelationships() ) == 0 )
return null;
root.setProperty( "counter", ( (Long) root.removeProperty( "counter" ) ) + 1 );
return null;
}
@Override
public void afterCommit( TransactionData data, Void state )
{
// nothing
}
@Override
public void afterRollback( TransactionData data, Void state )
{
// nothing
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestTransactionEventDeadlocks.java
|
5,712
|
public class TestTransactionEventDeadlocks
{
@Rule
public DatabaseRule database = new ImpermanentDatabaseRule();
@Test
public void canAvoidDeadlockThatWouldHappenIfTheRelationshipTypeCreationTransactionModifiedData() throws Exception
{
GraphDatabaseService graphdb = database.getGraphDatabaseService();
Transaction tx = graphdb.beginTx();
final Node root = graphdb.createNode();
try
{
root.setProperty( "counter", Long.valueOf( 0L ) );
tx.success();
}
finally
{
tx.finish();
}
graphdb.registerTransactionEventHandler( new TransactionEventHandler<Void>()
{
@SuppressWarnings( "boxing" )
@Override
public Void beforeCommit( TransactionData data ) throws Exception
{
// TODO Hmm, makes me think... should we really call transaction event handlers
// for these relationship type / property index transasctions?
if ( count( data.createdRelationships() ) == 0 )
return null;
root.setProperty( "counter", ( (Long) root.removeProperty( "counter" ) ) + 1 );
return null;
}
@Override
public void afterCommit( TransactionData data, Void state )
{
// nothing
}
@Override
public void afterRollback( TransactionData data, Void state )
{
// nothing
}
} );
tx = graphdb.beginTx();
try
{
root.setProperty( "state", "not broken yet" );
root.createRelationshipTo( graphdb.createNode(), DynamicRelationshipType.withName( "TEST" ) );
root.removeProperty( "state" );
tx.success();
}
finally
{
tx.finish();
}
assertThat( root, inTx( graphdb, hasProperty( "counter" ).withValue( 1L ) ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestTransactionEventDeadlocks.java
|
5,713
|
public class TestPlaceboTransaction
{
private AbstractTransactionManager mockTxManager;
private Transaction mockTopLevelTx;
private PlaceboTransaction placeboTx;
private TransactionState state;
private PropertyContainer resource;
private PersistenceManager persistenceManager;
@Before
public void before() throws Exception
{
mockTxManager = mock( AbstractTransactionManager.class );
mockTopLevelTx = mock( Transaction.class );
when( mockTxManager.getTransaction() ).thenReturn( mockTopLevelTx );
state = mock( TransactionState.class );
persistenceManager = mock( PersistenceManager.class );
placeboTx = new PlaceboTransaction( persistenceManager,mockTxManager, state );
resource = mock( PropertyContainer.class );
}
@Test
public void shouldRollbackParentByDefault() throws SystemException
{
// When
placeboTx.finish();
// Then
verify( mockTopLevelTx ).setRollbackOnly();
}
@Test
public void shouldRollbackParentIfFailureCalled() throws SystemException
{
// When
placeboTx.failure();
placeboTx.finish();
// Then
verify( mockTopLevelTx ).setRollbackOnly();
}
@Test
public void shouldNotRollbackParentIfSuccessCalled() throws SystemException
{
// When
placeboTx.success();
placeboTx.finish();
// Then
verifyNoMoreInteractions( mockTopLevelTx );
}
@Test
public void successCannotOverrideFailure() throws Exception
{
// When
placeboTx.failure();
placeboTx.success();
placeboTx.finish();
// Then
verify( mockTopLevelTx ).setRollbackOnly();
}
@Test
public void canAcquireReadLock() throws Exception
{
// when
placeboTx.acquireReadLock( resource );
// then
verify( persistenceManager ).ensureKernelIsEnlisted();
verify( state ).acquireReadLock( resource );
}
@Test
public void canAcquireWriteLock() throws Exception
{
// when
placeboTx.acquireWriteLock( resource );
// then
verify( persistenceManager ).ensureKernelIsEnlisted();
verify( state ).acquireWriteLock( resource );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestPlaceboTransaction.java
|
5,714
|
public final class TestKernelExtension extends KernelExtensionFactoryContractTest
{
public TestKernelExtension()
{
super( DummyExtensionFactory.EXTENSION_ID, DummyExtensionFactory.class );
}
/**
* Check that lifecycle status of extension is STARTED
*/
@Test
public void shouldBeStarted() throws Exception
{
GraphDatabaseAPI graphdb = graphdb( "graphdb", 0 );
try
{
assertEquals( LifecycleStatus.STARTED, graphdb.getDependencyResolver().resolveDependency(
KernelExtensions.class ).resolveDependency( DummyExtension.class ).getStatus() );
}
finally
{
graphdb.shutdown();
}
}
/**
* Check that dependencies can be accessed
*/
@Test
public void dependenciesCanBeRetrieved() throws Exception
{
GraphDatabaseAPI graphdb = graphdb( "graphdb", 0 );
try
{
assertEquals( graphdb.getDependencyResolver().resolveDependency( Config.class ),
graphdb.getDependencyResolver().resolveDependency( KernelExtensions.class ).resolveDependency(
DummyExtension.class ).getDependencies().getConfig() );
}
finally
{
graphdb.shutdown();
}
}
/**
* Check that lifecycle status of extension is SHUTDOWN
*/
@Test
public void shouldBeShutdown() throws Exception
{
GraphDatabaseAPI graphdb = graphdb( "graphdb", 0 );
graphdb.shutdown();
assertEquals( LifecycleStatus.SHUTDOWN, graphdb.getDependencyResolver().resolveDependency( KernelExtensions
.class ).resolveDependency( DummyExtension.class ).getStatus() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_TestKernelExtension.java
|
5,715
|
{
@Override
public void run()
{
count.incrementAndGet();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_CannedFileSystemAbstraction.java
|
5,716
|
public class CannedFileSystemAbstraction implements FileSystemAbstraction
{
public static Runnable NOTHING = new Runnable()
{
@Override
public void run()
{
}
};
public static Runnable callCounter( final AtomicInteger count )
{
return new Runnable()
{
@Override
public void run()
{
count.incrementAndGet();
}
};
}
private final boolean fileExists;
private final IOException cannotCreateStoreDir;
private final IOException cannotOpenLockFile;
private final boolean lockSuccess;
private final Runnable onClose;
public CannedFileSystemAbstraction( boolean fileExists,
IOException cannotCreateStoreDir,
IOException cannotOpenLockFile,
boolean lockSuccess,
Runnable onClose )
{
this.fileExists = fileExists;
this.cannotCreateStoreDir = cannotCreateStoreDir;
this.cannotOpenLockFile = cannotOpenLockFile;
this.lockSuccess = lockSuccess;
this.onClose = onClose;
}
@Override
public StoreChannel open( File fileName, String mode ) throws IOException
{
if ( cannotOpenLockFile != null )
{
throw cannotOpenLockFile;
}
return emptyFileChannel;
}
private final StoreChannel emptyFileChannel = new AbstractStoreChannel()
{
@Override
public int read( ByteBuffer dst ) throws IOException
{
return 0;
}
@Override
public long read( ByteBuffer[] dsts, int offset, int length ) throws IOException
{
return 0;
}
@Override
public long position() throws IOException
{
return 0;
}
@Override
public StoreChannel position( long newPosition ) throws IOException
{
if ( newPosition != 0 )
throw unsupported();
return this;
}
@Override
public long size() throws IOException
{
return 0;
}
@Override
public StoreChannel truncate( long size ) throws IOException
{
if ( size != 0 )
throw unsupported();
return this;
}
@Override
public void force( boolean metaData ) throws IOException
{
}
@Override
public int read( ByteBuffer dst, long position ) throws IOException
{
return 0;
}
@Override
public int write( ByteBuffer src, long position ) throws IOException
{
if ( position != 0 )
throw unsupported();
return 0;
}
@Override
public void close() throws IOException
{
onClose.run();
}
private IOException unsupported()
{
return new IOException( "Unsupported" );
}
};
@Override
public OutputStream openAsOutputStream( File fileName, boolean append ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public InputStream openAsInputStream( File fileName ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public Reader openAsReader( File fileName, String encoding ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public Writer openAsWriter( File fileName, String encoding, boolean append ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public FileLock tryLock( File fileName, StoreChannel channel ) throws IOException
{
if ( !lockSuccess )
{
throw new IOException( "Unable to create lock file " + fileName );
}
return SYMBOLIC_FILE_LOCK;
}
@Override
public StoreChannel create( File fileName ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public boolean fileExists( File fileName )
{
return fileExists;
}
@Override
public boolean mkdir( File fileName )
{
return false;
}
@Override
public void mkdirs( File fileName ) throws IOException
{
if ( cannotCreateStoreDir != null )
{
throw cannotCreateStoreDir;
}
}
@Override
public long getFileSize( File fileName )
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public boolean deleteFile( File fileName )
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public void deleteRecursively( File directory ) throws IOException
{
}
@Override
public boolean renameFile( File from, File to ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public boolean isDirectory( File file )
{
return false;
}
@Override
public File[] listFiles( File directory )
{
return new File[0];
}
@Override
public void moveToDirectory( File file, File toDirectory ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public void copyFile( File file, File toDirectory ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public void copyRecursively( File fromDirectory, File toDirectory ) throws IOException
{
throw new UnsupportedOperationException( "TODO" );
}
@Override
public <K extends ThirdPartyFileSystem> K getOrCreateThirdPartyFileSystem( Class<K> clazz, Function<Class<K>, K>
creator )
{
throw new UnsupportedOperationException( "not implemented" );
}
private static final FileLock SYMBOLIC_FILE_LOCK = new FileLock()
{
@Override
public void release() throws IOException
{
}
};
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_CannedFileSystemAbstraction.java
|
5,717
|
{
@Override
protected Indicator newIndicator( String process )
{
return Indicator.NONE;
}
@Override
protected Indicator.OpenEnded newOpenEndedIndicator( String process, int resolution )
{
return Indicator.NONE;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_helpers_progress_ProgressMonitorFactory.java
|
5,718
|
static class AddCommand extends LuceneCommand
{
AddCommand( IndexIdentifier indexId, byte entityType, Object entityId, String key, Object value )
{
super( indexId, entityType, entityId, key, value, ADD_COMMAND );
}
@Override
void perform( CommitContext context )
{
context.ensureWriterInstantiated();
context.indexType.addToDocument( context.getDocument( entityId, true ).document, key, value );
context.dataSource.invalidateCache( context.identifier, key, value );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return true;
}
@Override
public String toString()
{
return "Add[" + indexId + "," + entityId + "," + key + "," + value + "]";
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneCommand.java
|
5,719
|
{
@Override
public Document newDocument( Object entityId )
{
RelationshipId relId = null;
if ( entityId instanceof Long )
{
BatchRelationship relationship = inserter
.getRelationshipById( (Long) entityId );
relId = new RelationshipId( relationship.getId(), relationship.getStartNode(),
relationship.getEndNode() );
}
else if ( entityId instanceof RelationshipId )
{
relId = (RelationshipId) entityId;
}
else
{
throw new IllegalArgumentException( "Ids of type " + entityId.getClass()
+ " are not supported." );
}
Document doc = IndexType.newBaseDocument( relId.id );
doc.add( new Field( LuceneIndex.KEY_START_NODE_ID, "" + relId.startNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
doc.add( new Field( LuceneIndex.KEY_END_NODE_ID, "" + relId.endNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
return doc;
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Relationship.class;
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneBatchInserterIndexProviderNewImpl.java
|
5,720
|
{
@Override
public Document newDocument( Object entityId )
{
return IndexType.newBaseDocument( (Long) entityId );
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Node.class;
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneBatchInserterIndexProviderNewImpl.java
|
5,721
|
public class LuceneBatchInserterIndexProviderNewImpl implements BatchInserterIndexProvider
{
private final BatchInserter inserter;
private final Map<IndexIdentifier, LuceneBatchInserterIndex> indexes =
new HashMap<IndexIdentifier, LuceneBatchInserterIndex>();
final IndexStore indexStore;
final EntityType nodeEntityType;
final EntityType relationshipEntityType;
public LuceneBatchInserterIndexProviderNewImpl( final BatchInserter inserter )
{
this.inserter = inserter;
this.indexStore = ((BatchInserterImpl) inserter).getIndexStore();
this.nodeEntityType = new EntityType()
{
@Override
public Document newDocument( Object entityId )
{
return IndexType.newBaseDocument( (Long) entityId );
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Node.class;
}
};
this.relationshipEntityType = new EntityType()
{
@Override
public Document newDocument( Object entityId )
{
RelationshipId relId = null;
if ( entityId instanceof Long )
{
BatchRelationship relationship = inserter
.getRelationshipById( (Long) entityId );
relId = new RelationshipId( relationship.getId(), relationship.getStartNode(),
relationship.getEndNode() );
}
else if ( entityId instanceof RelationshipId )
{
relId = (RelationshipId) entityId;
}
else
{
throw new IllegalArgumentException( "Ids of type " + entityId.getClass()
+ " are not supported." );
}
Document doc = IndexType.newBaseDocument( relId.id );
doc.add( new Field( LuceneIndex.KEY_START_NODE_ID, "" + relId.startNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
doc.add( new Field( LuceneIndex.KEY_END_NODE_ID, "" + relId.endNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
return doc;
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Relationship.class;
}
};
}
@Override
public BatchInserterIndex nodeIndex( String indexName, Map<String, String> config )
{
config( Node.class, indexName, config );
return index( new IndexIdentifier( LuceneCommand.NODE, nodeEntityType, indexName ), config );
}
private Map<String, String> config( Class<? extends PropertyContainer> cls,
String indexName, Map<String, String> config )
{
// TODO Doesn't look right
if ( config != null )
{
config = MapUtil.stringMap( new HashMap<String, String>( config ),
IndexManager.PROVIDER, LuceneIndexImplementation.SERVICE_NAME );
indexStore.setIfNecessary( cls, indexName, config );
return config;
}
else
{
return indexStore.get( cls, indexName );
}
}
@Override
public BatchInserterIndex relationshipIndex( String indexName, Map<String, String> config )
{
config( Relationship.class, indexName, config );
return index( new IndexIdentifier( LuceneCommand.RELATIONSHIP, relationshipEntityType, indexName ), config );
}
private BatchInserterIndex index( IndexIdentifier identifier, Map<String, String> config )
{
// We don't care about threads here... c'mon... it's a
// single-threaded batch inserter
LuceneBatchInserterIndex index = indexes.get( identifier );
if ( index == null )
{
index = new LuceneBatchInserterIndex( new File(inserter.getStoreDir()),
identifier,
config );
indexes.put( identifier, index );
}
return index;
}
@Override
public void shutdown()
{
for ( LuceneBatchInserterIndex index : indexes.values() )
{
index.shutdown();
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneBatchInserterIndexProviderNewImpl.java
|
5,722
|
{
private final Collection<Long> ids = new ArrayList<Long>();
@Override
protected Long fetchNextOrNull()
{
Long result = super.fetchNextOrNull();
if ( result != null )
{
ids.add( result );
}
return result;
}
@Override
protected void endReached()
{
super.endReached();
addToCache( ids, key, value );
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneBatchInserterIndex.java
|
5,723
|
class LuceneBatchInserterIndex implements BatchInserterIndex
{
private final IndexIdentifier identifier;
private final IndexType type;
private IndexWriter writer;
private boolean writerModified;
private IndexSearcher searcher;
private final boolean createdNow;
private Map<String, LruCache<String, Collection<Long>>> cache;
private int updateCount;
private int commitBatchSize = 500000;
LuceneBatchInserterIndex( File dbStoreDir,
IndexIdentifier identifier, Map<String, String> config )
{
File storeDir = getStoreDir( dbStoreDir );
this.createdNow = !LuceneDataSource.getFileDirectory( storeDir, identifier ).exists();
this.identifier = identifier;
this.type = IndexType.getIndexType( identifier, config );
this.writer = instantiateWriter( storeDir );
}
/**
* Sets the number of modifications that will be the threshold for a commit
* to happen. This will free up memory.
*
* @param size the threshold for triggering a commit.
*/
public void setCommitBatchSize( int size )
{
this.commitBatchSize = size;
}
@Override
public void add( long entityId, Map<String, Object> properties )
{
try
{
Document document = identifier.entityType.newDocument( entityId );
for ( Map.Entry<String, Object> entry : properties.entrySet() )
{
String key = entry.getKey();
Object value = entry.getValue();
addSingleProperty(entityId, document, key, value);
}
writer.addDocument( document );
if ( ++updateCount == commitBatchSize )
{
writer.commit();
updateCount = 0;
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private void addSingleProperty( long entityId, Document document, String key, Object value ) {
for ( Object oneValue : IoPrimitiveUtils.asArray(value) )
{
boolean isValueContext = oneValue instanceof ValueContext;
oneValue = isValueContext ? ((ValueContext) oneValue).getCorrectValue() : oneValue.toString();
type.addToDocument( document, key, oneValue );
if ( createdNow )
{
// If we know that the index was created this session
// then we can go ahead and add stuff to the cache directly
// when adding to the index.
addToCache( entityId, key, oneValue );
}
}
}
private void addToCache( long entityId, String key, Object value )
{
if ( this.cache == null )
{
return;
}
String valueAsString = value.toString();
LruCache<String, Collection<Long>> cache = this.cache.get( key );
if ( cache != null )
{
Collection<Long> ids = cache.get( valueAsString );
if ( ids == null )
{
ids = new HashSet<Long>();
cache.put( valueAsString, ids );
}
ids.add( entityId );
}
}
private void addToCache( Collection<Long> ids, String key, Object value )
{
if ( this.cache == null )
{
return;
}
String valueAsString = value.toString();
LruCache<String, Collection<Long>> cache = this.cache.get( key );
if ( cache != null )
{
cache.put( valueAsString, ids );
}
}
private IndexHits<Long> getFromCache( String key, Object value )
{
if ( this.cache == null )
{
return null;
}
String valueAsString = value.toString();
LruCache<String, Collection<Long>> cache = this.cache.get( key );
if ( cache != null )
{
Collection<Long> ids = cache.get( valueAsString );
if ( ids != null )
{
return new ConstantScoreIterator<Long>( ids, Float.NaN );
}
}
return null;
}
@Override
public void updateOrAdd( long entityId, Map<String, Object> properties )
{
try
{
removeFromCache( entityId );
writer.deleteDocuments( type.idTermQuery( entityId ) );
add( entityId, properties );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private void removeFromCache( long entityId ) throws IOException, CorruptIndexException
{
IndexSearcher searcher = searcher();
Query query = type.idTermQuery( entityId );
TopDocs docs = searcher.search( query, 1 );
if ( docs.totalHits > 0 )
{
Document document = searcher.doc( docs.scoreDocs[0].doc );
for ( Fieldable field : document.getFields() )
{
String key = field.name();
Object value = field.stringValue();
removeFromCache( entityId, key, value );
}
}
}
private void removeFromCache( long entityId, String key, Object value )
{
if ( this.cache == null )
{
return;
}
String valueAsString = value.toString();
LruCache<String, Collection<Long>> cache = this.cache.get( key );
if ( cache != null )
{
Collection<Long> ids = cache.get( valueAsString );
if ( ids != null )
{
ids.remove( entityId );
}
}
}
private IndexWriter instantiateWriter( File directory )
{
try
{
IndexWriterConfig writerConfig = new IndexWriterConfig( LUCENE_VERSION, type.analyzer );
writerConfig.setRAMBufferSizeMB( determineGoodBufferSize( writerConfig.getRAMBufferSizeMB() ) );
IndexWriter writer = new IndexWriter( getDirectory( directory, identifier ), writerConfig );
return writer;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private double determineGoodBufferSize( double atLeast )
{
double heapHint = Runtime.getRuntime().maxMemory()/(1024*1024*14);
double result = Math.max( atLeast, heapHint );
return Math.min( result, 700 );
}
private void closeSearcher()
{
try
{
LuceneUtil.close( this.searcher );
}
finally
{
this.searcher = null;
}
}
private IndexSearcher searcher()
{
IndexSearcher result = this.searcher;
try
{
if ( result == null || writerModified )
{
if ( result != null )
{
result.getIndexReader().close();
result.close();
}
IndexReader newReader = IndexReader.open( writer, true );
result = new IndexSearcher( newReader );
writerModified = false;
}
return result;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
this.searcher = result;
}
}
private void closeWriter()
{
try
{
if ( this.writer != null )
{
this.writer.optimize( true );
}
LuceneUtil.close( this.writer );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
this.writer = null;
}
}
private IndexHits<Long> query( Query query, final String key, final Object value )
{
try
{
Hits hits = new Hits( searcher(), query, null );
HitsIterator result = new HitsIterator( hits );
if ( key == null || this.cache == null || !this.cache.containsKey( key ) )
{
return new DocToIdIterator( result, Collections.<Long>emptyList(), null );
}
else
{
return new DocToIdIterator( result, Collections.<Long>emptyList(), null )
{
private final Collection<Long> ids = new ArrayList<Long>();
@Override
protected Long fetchNextOrNull()
{
Long result = super.fetchNextOrNull();
if ( result != null )
{
ids.add( result );
}
return result;
}
@Override
protected void endReached()
{
super.endReached();
addToCache( ids, key, value );
}
};
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
@Override
public IndexHits<Long> get( String key, Object value )
{
IndexHits<Long> cached = getFromCache( key, value );
return cached != null ? cached : query( type.get( key, value ), key, value );
}
@Override
public IndexHits<Long> query( String key, Object queryOrQueryObject )
{
return query( type.query( key, queryOrQueryObject, null ), null, null );
}
@Override
public IndexHits<Long> query( Object queryOrQueryObject )
{
return query( type.query( null, queryOrQueryObject, null ), null, null );
}
public void shutdown()
{
closeSearcher();
closeWriter();
}
private File getStoreDir( File dbStoreDir )
{
File dir = new File( dbStoreDir, "index" );
if ( !dir.exists() && !dir.mkdirs() )
{
throw new RuntimeException( "Unable to create directory path["
+ dir.getAbsolutePath() + "] for Neo4j store." );
}
return dir;
}
@Override
public void flush()
{
writerModified = true;
}
@Override
public void setCacheCapacity( String key, int size )
{
if ( this.cache == null )
{
this.cache = new HashMap<String, LruCache<String,Collection<Long>>>();
}
LruCache<String, Collection<Long>> cache = this.cache.get( key );
if ( cache != null )
{
cache.resize( size );
}
else
{
cache = new LruCache<String, Collection<Long>>( "Batch inserter cache for " + key, size );
this.cache.put( key, cache );
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneBatchInserterIndex.java
|
5,724
|
public final class LowerCaseKeywordAnalyzer extends Analyzer
{
@Override
public TokenStream tokenStream( String fieldName, Reader reader )
{
return new LowerCaseFilter( LuceneDataSource.LUCENE_VERSION, new KeywordTokenizer( reader ) );
}
@Override
public String toString()
{
return getClass().getSimpleName();
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LowerCaseKeywordAnalyzer.java
|
5,725
|
public class IsEmpty extends TypeSafeMatcher<Iterable<?>>
{
private Iterable<?> iterable;
@Override
public boolean matchesSafely( Iterable<?> iterable )
{
this.iterable = iterable;
return !iterable.iterator().hasNext();
}
public void describeTo( Description description )
{
description.appendValueList("[", ",", "]", iterable);
}
public static Matcher<Iterable<?>> isEmpty() {
return new IsEmpty();
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_IsEmpty.java
|
5,726
|
{
@Override
public void run()
{
while ( true )
{
Transaction tx = db.beginTx();
try
{
for ( int i = 0; i < 100; i++ )
{
String key = keys[i%keys.length];
String value = values[i%values.length]+i;
Node node = db.createNode();
node.setProperty( key, value );
index.add( node, key, value );
}
tx.success();
}
finally
{
tx.finish();
}
}
}
}.start();
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_Inserter.java
|
5,727
|
public class Inserter
{
public static void main( String[] args ) throws IOException
{
String path = args[0];
final GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase(path );
final Index<Node> index = getIndex( db );
final String[] keys = new String[] { "apoc", "zion", "morpheus" };
final String[] values = new String[] { "hej", "yo", "something", "just a value", "anything" };
for ( int i = 0; i < 5; i++ )
{
new Thread()
{
@Override
public void run()
{
while ( true )
{
Transaction tx = db.beginTx();
try
{
for ( int i = 0; i < 100; i++ )
{
String key = keys[i%keys.length];
String value = values[i%values.length]+i;
Node node = db.createNode();
node.setProperty( key, value );
index.add( node, key, value );
}
tx.success();
}
finally
{
tx.finish();
}
}
}
}.start();
}
new File( path, "started" ).createNewFile();
}
private static Index<Node> getIndex( GraphDatabaseService db )
{
Transaction transaction = db.beginTx();
try
{
Index<Node> index = db.index().forNodes( "myIndex" );
transaction.success();
return index;
}
finally
{
transaction.finish();
}
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_Inserter.java
|
5,728
|
public class IndexWriterLruCache extends LruCache<IndexIdentifier, IndexWriter>
{
/**
* Creates a LRU cache. If <CODE>maxSize < 1</CODE> an
* IllegalArgumentException is thrown.
*
* @param maxSize maximum size of this cache
*/
public IndexWriterLruCache( int maxSize )
{
super( "IndexWriterCache", maxSize );
}
@Override
public void elementCleaned(IndexWriter writer)
{
try {
writer.close( true );
} catch (IOException e) {
throw new RuntimeException( e );
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexWriterLruCache.java
|
5,729
|
class IndexTypeCache
{
private final Map<IndexIdentifier, Pair<Integer, IndexType>> cache = Collections.synchronizedMap(
new HashMap<IndexIdentifier, Pair<Integer, IndexType>>() );
private final IndexStore indexStore;
IndexTypeCache( IndexStore indexStore )
{
this.indexStore = indexStore;
}
IndexType getIndexType( IndexIdentifier identifier, boolean recovery )
{
Pair<Integer, IndexType> type = cache.get( identifier );
Map<String, String> config = indexStore.get( identifier.entityType.getType(), identifier.indexName );
if ( type != null && config.hashCode() == type.first() )
{
return type.other();
}
if ( config == null )
{
if ( recovery )
return null;
throw new IllegalArgumentException( "Unknown index " + identifier );
}
type = Pair.of( config.hashCode(), IndexType.getIndexType( identifier, config ) );
cache.put( identifier, type );
return type.other();
}
void invalidate( IndexIdentifier identifier )
{
cache.remove( identifier );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexTypeCache.java
|
5,730
|
private static class CustomType extends IndexType
{
private final Similarity similarity;
CustomType( Analyzer analyzer, boolean toLowerCase, Similarity similarity )
{
super( analyzer, toLowerCase );
this.similarity = similarity;
}
@Override
Similarity getSimilarity()
{
return this.similarity;
}
@Override
public Query deletionQuery( long entityId, String key, Object value )
{
BooleanQuery q = new BooleanQuery();
q.add( idTermQuery( entityId ), Occur.MUST );
q.add( new TermQuery( new Term( exactKey( key ), value.toString() ) ), Occur.MUST );
return q;
}
@Override
public Query get( String key, Object value )
{
// TODO we do value.toString() here since initially #addToDocument didn't
// honor ValueContext, and changing it would mean changing store format.
return new TermQuery( new Term( exactKey( key ), value.toString() ) );
}
private String exactKey( String key )
{
return key + "_e";
}
@Override
public void addToDocument( Document document, String key, Object value )
{
// TODO We should honor ValueContext instead of doing value.toString() here.
// if changing it, also change #get to honor ValueContext.
document.add( new Field( exactKey( key ), value.toString(), Store.YES, Index.NOT_ANALYZED ) );
document.add( instantiateField( key, value, Index.ANALYZED ) );
}
@Override
void removeFieldsFromDocument( Document document, String key, Object value )
{
String exactKey = exactKey( key );
Set<String> values = null;
if ( value != null )
{
String stringValue = value.toString();
values = new HashSet<>( Arrays.asList( document.getValues( exactKey ) ) );
if ( !values.remove( stringValue ) )
{
return;
}
}
document.removeFields( exactKey );
document.removeFields( key );
if ( value != null )
{
for ( String existingValue : values )
{
addToDocument( document, key, existingValue );
}
}
}
@Override
public String toString()
{
return "FULLTEXT";
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexType.java
|
5,731
|
{
@Override
public Query deletionQuery( long entityId, String key, Object value )
{
BooleanQuery q = new BooleanQuery();
q.add( idTermQuery( entityId ), Occur.MUST );
q.add( new TermQuery( new Term( key, value.toString() ) ), Occur.MUST );
return q;
}
@Override
public Query get( String key, Object value )
{
return queryForGet( key, value );
}
@Override
public void addToDocument( Document document, String key, Object value )
{
document.add( instantiateField( key, value, Index.NOT_ANALYZED ) );
}
@Override
void removeFieldsFromDocument( Document document, String key, Object value )
{
Set<String> values = null;
if ( value != null )
{
String stringValue = value.toString();
values = new HashSet<>( Arrays.asList(
document.getValues( key ) ) );
if ( !values.remove( stringValue ) )
{
return;
}
}
document.removeFields( key );
if ( value != null )
{
for ( String existingValue : values )
{
addToDocument( document, key, existingValue );
}
}
}
@Override
public String toString()
{
return "EXACT";
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexType.java
|
5,732
|
public abstract class IndexType
{
public static final IndexType EXACT = new IndexType( LuceneDataSource.KEYWORD_ANALYZER, false )
{
@Override
public Query deletionQuery( long entityId, String key, Object value )
{
BooleanQuery q = new BooleanQuery();
q.add( idTermQuery( entityId ), Occur.MUST );
q.add( new TermQuery( new Term( key, value.toString() ) ), Occur.MUST );
return q;
}
@Override
public Query get( String key, Object value )
{
return queryForGet( key, value );
}
@Override
public void addToDocument( Document document, String key, Object value )
{
document.add( instantiateField( key, value, Index.NOT_ANALYZED ) );
}
@Override
void removeFieldsFromDocument( Document document, String key, Object value )
{
Set<String> values = null;
if ( value != null )
{
String stringValue = value.toString();
values = new HashSet<>( Arrays.asList(
document.getValues( key ) ) );
if ( !values.remove( stringValue ) )
{
return;
}
}
document.removeFields( key );
if ( value != null )
{
for ( String existingValue : values )
{
addToDocument( document, key, existingValue );
}
}
}
@Override
public String toString()
{
return "EXACT";
}
};
private static class CustomType extends IndexType
{
private final Similarity similarity;
CustomType( Analyzer analyzer, boolean toLowerCase, Similarity similarity )
{
super( analyzer, toLowerCase );
this.similarity = similarity;
}
@Override
Similarity getSimilarity()
{
return this.similarity;
}
@Override
public Query deletionQuery( long entityId, String key, Object value )
{
BooleanQuery q = new BooleanQuery();
q.add( idTermQuery( entityId ), Occur.MUST );
q.add( new TermQuery( new Term( exactKey( key ), value.toString() ) ), Occur.MUST );
return q;
}
@Override
public Query get( String key, Object value )
{
// TODO we do value.toString() here since initially #addToDocument didn't
// honor ValueContext, and changing it would mean changing store format.
return new TermQuery( new Term( exactKey( key ), value.toString() ) );
}
private String exactKey( String key )
{
return key + "_e";
}
@Override
public void addToDocument( Document document, String key, Object value )
{
// TODO We should honor ValueContext instead of doing value.toString() here.
// if changing it, also change #get to honor ValueContext.
document.add( new Field( exactKey( key ), value.toString(), Store.YES, Index.NOT_ANALYZED ) );
document.add( instantiateField( key, value, Index.ANALYZED ) );
}
@Override
void removeFieldsFromDocument( Document document, String key, Object value )
{
String exactKey = exactKey( key );
Set<String> values = null;
if ( value != null )
{
String stringValue = value.toString();
values = new HashSet<>( Arrays.asList( document.getValues( exactKey ) ) );
if ( !values.remove( stringValue ) )
{
return;
}
}
document.removeFields( exactKey );
document.removeFields( key );
if ( value != null )
{
for ( String existingValue : values )
{
addToDocument( document, key, existingValue );
}
}
}
@Override
public String toString()
{
return "FULLTEXT";
}
};
final Analyzer analyzer;
private final boolean toLowerCase;
private IndexType( Analyzer analyzer, boolean toLowerCase )
{
this.analyzer = analyzer;
this.toLowerCase = toLowerCase;
}
static IndexType getIndexType( IndexIdentifier identifier, Map<String, String> config )
{
String type = config.get( LuceneIndexImplementation.KEY_TYPE );
IndexType result = null;
Similarity similarity = getCustomSimilarity( config );
Boolean toLowerCaseUnbiased = config.get( KEY_TO_LOWER_CASE ) != null ?
parseBoolean( config.get( KEY_TO_LOWER_CASE ), true ) : null;
Analyzer customAnalyzer = getCustomAnalyzer( config );
if ( type != null )
{
// Use the built in alternatives... "exact" or "fulltext"
if ( type.equals( "exact" ) )
{
// In the exact case we default to false
boolean toLowerCase = TRUE.equals( toLowerCaseUnbiased ) ? true : false;
result = toLowerCase ? new CustomType( new LowerCaseKeywordAnalyzer(), toLowerCase, similarity ) : EXACT;
}
else if ( type.equals( "fulltext" ) )
{
// In the fulltext case we default to true
boolean toLowerCase = FALSE.equals( toLowerCaseUnbiased ) ? false : true;
Analyzer analyzer = customAnalyzer;
if ( analyzer == null )
{
analyzer = TRUE.equals( toLowerCase ) ? LuceneDataSource.LOWER_CASE_WHITESPACE_ANALYZER :
LuceneDataSource.WHITESPACE_ANALYZER;
}
result = new CustomType( analyzer, toLowerCase, similarity );
}
}
else
{
// In the custom case we default to true
boolean toLowerCase = FALSE.equals( toLowerCaseUnbiased ) ? false : true;
// Use custom analyzer
if ( customAnalyzer == null )
{
throw new IllegalArgumentException( "No 'type' was given (which can point out " +
"built-in analyzers, such as 'exact' and 'fulltext')" +
" and no 'analyzer' was given either (which can point out a custom " +
Analyzer.class.getName() + " to use)" );
}
result = new CustomType( customAnalyzer, toLowerCase, similarity );
}
return result;
}
private static boolean parseBoolean( String string, boolean valueIfNull )
{
return string == null ? valueIfNull : Boolean.parseBoolean( string );
}
private static Similarity getCustomSimilarity( Map<String, String> config )
{
return getByClassName( config, LuceneIndexImplementation.KEY_SIMILARITY, Similarity.class );
}
private static Analyzer getCustomAnalyzer( Map<String, String> config )
{
return getByClassName( config, LuceneIndexImplementation.KEY_ANALYZER, Analyzer.class );
}
private static <T> T getByClassName( Map<String, String> config, String configKey, Class<T> cls )
{
String className = config.get( configKey );
if ( className != null )
{
try
{
return Class.forName( className ).asSubclass( cls ).newInstance();
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
return null;
}
abstract Query deletionQuery( long entityId, String key, Object value );
abstract Query get( String key, Object value );
TxData newTxData( LuceneIndex index )
{
return new ExactTxData( index );
}
Query query( String keyOrNull, Object value, QueryContext contextOrNull )
{
if ( value instanceof Query )
{
return (Query) value;
}
QueryParser parser = new QueryParser( Version.LUCENE_30, keyOrNull, analyzer );
parser.setAllowLeadingWildcard( true );
parser.setLowercaseExpandedTerms( toLowerCase );
if ( contextOrNull != null && contextOrNull.getDefaultOperator() != null )
{
parser.setDefaultOperator( contextOrNull.getDefaultOperator() );
}
try
{
return parser.parse( value.toString() );
}
catch ( ParseException e )
{
throw new RuntimeException( e );
}
}
abstract void addToDocument( Document document, String key, Object value );
public static Fieldable instantiateField( String key, Object value, Index analyzed )
{
Fieldable field;
if ( value instanceof Number )
{
Number number = (Number) value;
NumericField numberField = new NumericField( key, Store.YES, true );
if ( value instanceof Long )
{
numberField.setLongValue( number.longValue() );
}
else if ( value instanceof Float )
{
numberField.setFloatValue( number.floatValue() );
}
else if ( value instanceof Double )
{
numberField.setDoubleValue( number.doubleValue() );
}
else
{
numberField.setIntValue( number.intValue() );
}
field = numberField;
}
else
{
field = new Field( key, value.toString(), Store.YES, analyzed );
}
field.setOmitNorms( true );
return field;
}
final void removeFromDocument( Document document, String key, Object value )
{
if ( key == null && value == null )
{
clearDocument( document );
}
else
{
removeFieldsFromDocument( document, key, value );
}
}
abstract void removeFieldsFromDocument( Document document, String key, Object value );
private void clearDocument( Document document )
{
Set<String> names = new HashSet<String>();
for ( Fieldable field : document.getFields() )
{
names.add( field.name() );
}
names.remove( LuceneIndex.KEY_DOC_ID );
for ( String name : names )
{
document.removeFields( name );
}
}
public static Document newBaseDocument( long entityId )
{
Document doc = new Document();
doc.add( new Field( LuceneIndex.KEY_DOC_ID, "" + entityId, Store.YES,
Index.NOT_ANALYZED ) );
return doc;
}
public Term idTerm( long entityId )
{
return new Term( LuceneIndex.KEY_DOC_ID, "" + entityId );
}
Query idTermQuery( long entityId )
{
return new TermQuery( idTerm( entityId ) );
}
Similarity getSimilarity()
{
return null;
}
Query queryForGet( String key, Object value )
{
if ( value instanceof ValueContext )
{
Object realValue = ((ValueContext)value).getValue();
if ( realValue instanceof Number )
{
Number number = (Number) realValue;
return LuceneUtil.rangeQuery( key, number, number, true, true );
}
}
return new TermQuery( new Term( key, value.toString() ) );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexType.java
|
5,733
|
public class IndexSearcherLruCache extends LruCache<IndexIdentifier, Pair<IndexReference, AtomicBoolean>>
{
/**
* Creates a LRU cache. If <CODE>maxSize < 1</CODE> an
* IllegalArgumentException is thrown.
*
* @param maxSize maximum size of this cache
*/
public IndexSearcherLruCache( int maxSize )
{
super( "IndexSearcherCache", maxSize );
}
@Override
public void elementCleaned( Pair<IndexReference, AtomicBoolean> searcher )
{
try
{
searcher.first().dispose( true );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexSearcherLruCache.java
|
5,734
|
class IndexReference
{
private final IndexIdentifier identifier;
private final IndexWriter writer;
private final IndexSearcher searcher;
private final AtomicInteger refCount = new AtomicInteger( 0 );
private boolean searcherIsClosed;
private boolean writerIsClosed;
/**
* We need this because we only want to close the reader/searcher if
* it has been detached... i.e. the {@link LuceneDataSource} no longer
* has any reference to it, only an iterator out in the client has a ref.
* And when that client calls close() it should be closed.
*/
private volatile boolean detached;
private final AtomicBoolean stale = new AtomicBoolean();
public IndexReference( IndexIdentifier identifier, IndexSearcher searcher, IndexWriter writer )
{
this.identifier = identifier;
this.searcher = searcher;
this.writer = writer;
}
public IndexSearcher getSearcher()
{
return this.searcher;
}
public IndexWriter getWriter()
{
return writer;
}
public IndexIdentifier getIdentifier()
{
return identifier;
}
void incRef()
{
this.refCount.incrementAndGet();
}
public synchronized void dispose( boolean writerAlso ) throws IOException
{
if ( !searcherIsClosed )
{
searcher.close();
searcher.getIndexReader().close();
searcherIsClosed = true;
}
if ( writerAlso && !writerIsClosed )
{
writer.close();
writerIsClosed = true;
}
}
public /*synchronized externally*/ void detachOrClose() throws IOException
{
if ( this.refCount.get() == 0 )
{
dispose( false );
}
else
{
this.detached = true;
}
}
synchronized boolean close()
{
try
{
if ( this.searcherIsClosed || this.refCount.get() == 0 )
{
return true;
}
boolean reallyClosed = false;
if ( this.refCount.decrementAndGet() <= 0 && this.detached )
{
dispose( false );
reallyClosed = true;
}
return reallyClosed;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
/*synchronized externally*/ boolean isClosed()
{
return searcherIsClosed;
}
/*synchronized externally*/ boolean checkAndClearStale()
{
return stale.compareAndSet( true, false );
}
public synchronized void setStale()
{
stale.set( true );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexReference.java
|
5,735
|
class IndexIdentifier
{
final String indexName;
final EntityType entityType;
final byte entityTypeByte;
private final int hashCode;
public IndexIdentifier( byte entityTypeByte, EntityType entityType, String indexName )
{
this.entityTypeByte = entityTypeByte;
this.entityType = entityType;
this.indexName = indexName;
this.hashCode = calculateHashCode();
}
@Override
public boolean equals( Object o )
{
if ( o == null || !getClass().equals( o.getClass() ) )
{
return false;
}
IndexIdentifier i = (IndexIdentifier) o;
return entityTypeByte == i.entityTypeByte && indexName.equals( i.indexName );
}
private int calculateHashCode()
{
int code = 17;
code += 7*entityTypeByte;
code += 7*indexName.hashCode();
return code;
}
@Override
public int hashCode()
{
return this.hashCode;
}
@Override
public String toString()
{
return "Index[" + indexName + "," + (entityTypeByte==LuceneCommand.NODE?"Node":"Relationship") + "]";
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexIdentifier.java
|
5,736
|
public class IndexClockCache extends ClockCache<IndexIdentifier, IndexReference>
{
public IndexClockCache( int maxSize )
{
super( "IndexSearcherCache", maxSize );
}
@Override
public void elementCleaned( IndexReference searcher )
{
try
{
searcher.dispose( true );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IndexClockCache.java
|
5,737
|
public abstract class IdToEntityIterator<T extends PropertyContainer>
extends CatchingIteratorWrapper<T, Long> implements IndexHits<T>
{
private final IndexHits<Long> ids;
private final Set<Long> alreadyReturned = new HashSet<Long>();
public IdToEntityIterator( IndexHits<Long> ids )
{
super( ids );
this.ids = ids;
}
@Override
protected boolean exceptionOk( Throwable t )
{
return t instanceof NotFoundException;
}
@Override
protected Long fetchNextOrNullFromSource( Iterator<Long> source )
{
while ( source.hasNext() )
{
Long id = source.next();
if ( alreadyReturned.add( id ) )
{
return id;
}
}
return null;
}
public float currentScore()
{
return this.ids.currentScore();
}
public int size()
{
return this.ids.size();
}
public IndexHits<T> iterator()
{
return this;
}
public void close()
{
ids.close();
}
public T getSingle()
{
try
{
return IteratorUtil.singleOrNull( (Iterator<T>) this );
}
finally
{
close();
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_IdToEntityIterator.java
|
5,738
|
abstract class LuceneCommand extends XaCommand
{
private static final byte ADD_COMMAND = (byte) 1;
private static final byte REMOVE_COMMAND = (byte) 2;
private static final byte DELETE_COMMAND = (byte) 3;
private static final byte CREATE_INDEX_COMMAND = (byte) 4;
public static final byte NODE = (byte) 1;
public static final byte RELATIONSHIP = (byte) 2;
private static final byte VALUE_TYPE_NULL = (byte) 0;
private static final byte VALUE_TYPE_INT = (byte) 1;
private static final byte VALUE_TYPE_LONG = (byte) 2;
private static final byte VALUE_TYPE_FLOAT = (byte) 3;
private static final byte VALUE_TYPE_DOUBLE = (byte) 4;
private static final byte VALUE_TYPE_STRING = (byte) 5;
final IndexIdentifier indexId;
final Object entityId;
final String key;
final Object value;
final byte type;
final byte entityType;
LuceneCommand( IndexIdentifier indexId, byte entityType, Object entityId, String key, Object value, byte type )
{
assert entityType == NODE || entityType == RELATIONSHIP;
this.indexId = indexId;
this.entityType = entityType;
this.entityId = entityId;
this.key = key;
this.value = value;
this.type = type;
}
@Override
public void execute()
{
// TODO Auto-generated method stub
}
abstract void perform( CommitContext context );
public Class<? extends PropertyContainer> getEntityType()
{
if ( this.entityType == NODE )
{
return Node.class;
}
else if ( this.entityType == RELATIONSHIP )
{
return Relationship.class;
}
throw new IllegalArgumentException( "Unknown entity type " + entityType );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( type );
buffer.put( entityType );
char[] indexName = indexId.indexName.toCharArray();
buffer.putInt( indexName.length );
long id = entityId instanceof Long ? (Long) entityId : ((RelationshipId)entityId).id;
buffer.putLong( id );
char[] key = this.key == null ? null : this.key.toCharArray();
buffer.putInt( key == null ? -1 : key.length );
byte valueType = 0;
if ( value == null )
{
valueType = VALUE_TYPE_NULL;
}
else if ( value instanceof Number )
{
if ( value instanceof Float )
{
valueType = VALUE_TYPE_FLOAT;
}
else if ( value instanceof Double )
{
valueType = VALUE_TYPE_DOUBLE;
}
else if ( value instanceof Long )
{
valueType = VALUE_TYPE_LONG;
}
else
{
valueType = VALUE_TYPE_INT;
}
}
else
{
valueType = VALUE_TYPE_STRING;
}
buffer.put( valueType );
buffer.put( indexName );
if ( key != null )
{
buffer.put( key );
}
if ( valueType == VALUE_TYPE_STRING )
{
char[] charValue = value.toString().toCharArray();
buffer.putInt( charValue.length );
buffer.put( charValue );
}
else if ( valueType != VALUE_TYPE_NULL )
{
Number number = (Number) value;
switch ( valueType )
{
case VALUE_TYPE_FLOAT:
buffer.putInt( Float.floatToRawIntBits( number.floatValue() ) );
break;
case VALUE_TYPE_DOUBLE:
buffer.putLong( Double.doubleToRawLongBits( number.doubleValue() ) );
break;
case VALUE_TYPE_LONG:
buffer.putLong( number.longValue() );
break;
case VALUE_TYPE_INT:
buffer.putInt( number.intValue() );
break;
default:
throw new Error( "Should not reach here." );
}
}
}
public abstract boolean isConsideredNormalWriteCommand();
private static void writeLengthAndString( LogBuffer buffer, String string ) throws IOException
{
char[] chars = string.toCharArray();
buffer.putInt( chars.length );
buffer.put( chars );
}
static class AddCommand extends LuceneCommand
{
AddCommand( IndexIdentifier indexId, byte entityType, Object entityId, String key, Object value )
{
super( indexId, entityType, entityId, key, value, ADD_COMMAND );
}
@Override
void perform( CommitContext context )
{
context.ensureWriterInstantiated();
context.indexType.addToDocument( context.getDocument( entityId, true ).document, key, value );
context.dataSource.invalidateCache( context.identifier, key, value );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return true;
}
@Override
public String toString()
{
return "Add[" + indexId + "," + entityId + "," + key + "," + value + "]";
}
}
static class AddRelationshipCommand extends LuceneCommand
{
AddRelationshipCommand( IndexIdentifier indexId, byte entityType, RelationshipId entityId, String key,
Object value )
{
super( indexId, entityType, entityId, key, value, ADD_COMMAND );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
super.writeToFile( buffer );
buffer.putLong( ((RelationshipId) entityId).startNode );
buffer.putLong( ((RelationshipId) entityId).endNode );
}
@Override
void perform( CommitContext context )
{
context.ensureWriterInstantiated();
context.indexType.addToDocument( context.getDocument( entityId, true ).document, key, value );
context.dataSource.invalidateCache( context.identifier, key, value );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return true;
}
@Override
public String toString()
{
RelationshipId relId = (RelationshipId)entityId;
return "AddRel[" + indexId + "," + relId.id + "," + key + "," + value + "," + relId.startNode + "," + relId.endNode + "]";
}
}
static class RemoveCommand extends LuceneCommand
{
RemoveCommand( IndexIdentifier indexId, byte entityType, Object entityId, String key, Object value )
{
super( indexId, entityType, entityId, key, value, REMOVE_COMMAND );
}
@Override
void perform( CommitContext context )
{
context.ensureWriterInstantiated();
DocumentContext document = context.getDocument( entityId, false );
if ( document != null )
{
context.indexType.removeFromDocument( document.document, key, value );
context.dataSource.invalidateCache( context.identifier, key, value );
}
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return true;
}
@Override
public String toString()
{
return "Remove[" + indexId + "," + entityId + "," + key + "," + value + "]";
}
}
static class DeleteCommand extends LuceneCommand
{
DeleteCommand( IndexIdentifier indexId )
{
super( indexId, indexId.entityTypeByte, -1L, "", "", DELETE_COMMAND );
}
@Override
void perform( CommitContext context )
{
context.documents.clear();
context.dataSource.deleteIndex( context.identifier, context.recovery );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
@Override
public String toString()
{
return "Delete[" + indexId + "]";
}
}
static class CreateIndexCommand extends LuceneCommand
{
static final IndexIdentifier FAKE_IDENTIFIER = new IndexIdentifier(
(byte) 9, null, "create index" );
private final String name;
private final Map<String, String> config;
CreateIndexCommand( byte entityType, String name, Map<String, String> config )
{
super( FAKE_IDENTIFIER, entityType, -1L, null, null, CREATE_INDEX_COMMAND );
this.name = name;
this.config = config;
}
public String getName()
{
return name;
}
public Map<String, String> getConfig()
{
return config;
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( type );
buffer.put( entityType );
writeLengthAndString( buffer, name );
buffer.putInt( config.size() );
for ( Map.Entry<String, String> entry : config.entrySet() )
{
writeLengthAndString( buffer, entry.getKey() );
writeLengthAndString( buffer, entry.getValue() );
}
}
@Override
void perform( CommitContext context )
{
context.dataSource.indexStore.setIfNecessary( getEntityType(), name, config );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
@Override
public String toString()
{
return "Create[" + new IndexIdentifier( entityType, null, name ) + "," + config + "]";
}
}
static XaCommand readCommand( ReadableByteChannel channel,
ByteBuffer buffer, LuceneDataSource dataSource ) throws IOException
{
// Read what type of command it is
buffer.clear(); buffer.limit( 2 );
if ( channel.read( buffer ) != buffer.limit() )
{
return null;
}
buffer.flip();
byte commandType = buffer.get();
byte entityTypeByte = buffer.get();
if ( commandType == CREATE_INDEX_COMMAND )
{
buffer.clear();
String name = IoPrimitiveUtils.readLengthAndString( channel, buffer );
if ( name == null )
{
return null;
}
int size = IoPrimitiveUtils.readInt( channel, buffer );
Map<String, String> config = new HashMap<String, String>();
for ( int i = 0; i < size; i++ )
{
String key = IoPrimitiveUtils.readLengthAndString( channel, buffer );
String value = IoPrimitiveUtils.readLengthAndString( channel, buffer );
if ( key == null || value == null )
{
return null;
}
config.put( key, value );
}
return new CreateIndexCommand( entityTypeByte, name, config );
}
else
{
// Read the command data
buffer.clear(); buffer.limit( 17 );
if ( channel.read( buffer ) != buffer.limit() )
{
return null;
}
buffer.flip();
EntityType entityType = null;
if ( entityTypeByte == NODE )
{
entityType = dataSource != null ? dataSource.nodeEntityType : null;
}
else if ( entityTypeByte == RELATIONSHIP )
{
entityType = dataSource != null ? dataSource.relationshipEntityType : null;
}
else
{
return null;
}
int indexNameLength = buffer.getInt();
long entityId = buffer.getLong();
int keyCharLength = buffer.getInt();
byte valueType = buffer.get();
String indexName = IoPrimitiveUtils.readString( channel, buffer, indexNameLength );
if ( indexName == null )
{
return null;
}
String key = null;
if ( keyCharLength != -1 )
{
key = IoPrimitiveUtils.readString( channel, buffer, keyCharLength );
if ( key == null )
{
return null;
}
}
Object value = null;
if ( valueType >= VALUE_TYPE_INT && valueType <= VALUE_TYPE_DOUBLE )
{
switch ( valueType )
{
case VALUE_TYPE_INT: value = IoPrimitiveUtils.readInt( channel, buffer ); break;
case VALUE_TYPE_LONG: value = IoPrimitiveUtils.readLong( channel, buffer ); break;
case VALUE_TYPE_FLOAT: value = IoPrimitiveUtils.readFloat( channel, buffer ); break;
case VALUE_TYPE_DOUBLE: value = IoPrimitiveUtils.readDouble( channel, buffer ); break;
}
}
else if ( valueType == VALUE_TYPE_STRING )
{
value = IoPrimitiveUtils.readLengthAndString( channel, buffer );
}
if ( valueType != VALUE_TYPE_NULL && value == null )
{
return null;
}
Long startNodeId = null;
Long endNodeId = null;
if ( commandType == ADD_COMMAND && entityTypeByte == RELATIONSHIP )
{
startNodeId = IoPrimitiveUtils.readLong( channel, buffer );
endNodeId = IoPrimitiveUtils.readLong( channel, buffer );
if ( startNodeId == null || endNodeId == null )
{
return null;
}
}
IndexIdentifier identifier = new IndexIdentifier( entityTypeByte, entityType, indexName );
switch ( commandType )
{
case ADD_COMMAND: return entityTypeByte == NODE ?
new AddCommand( identifier, entityTypeByte, entityId, key, value ) :
new AddRelationshipCommand( identifier, entityTypeByte,
new RelationshipId( entityId, startNodeId, endNodeId ), key, value );
case REMOVE_COMMAND: return new RemoveCommand( identifier, entityTypeByte, entityId, key, value );
case DELETE_COMMAND: return new DeleteCommand( identifier );
default:
throw new IOException( "Unknown command type[" +
commandType + "]" );
}
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneCommand.java
|
5,739
|
static class AddRelationshipCommand extends LuceneCommand
{
AddRelationshipCommand( IndexIdentifier indexId, byte entityType, RelationshipId entityId, String key,
Object value )
{
super( indexId, entityType, entityId, key, value, ADD_COMMAND );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
super.writeToFile( buffer );
buffer.putLong( ((RelationshipId) entityId).startNode );
buffer.putLong( ((RelationshipId) entityId).endNode );
}
@Override
void perform( CommitContext context )
{
context.ensureWriterInstantiated();
context.indexType.addToDocument( context.getDocument( entityId, true ).document, key, value );
context.dataSource.invalidateCache( context.identifier, key, value );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return true;
}
@Override
public String toString()
{
RelationshipId relId = (RelationshipId)entityId;
return "AddRel[" + indexId + "," + relId.id + "," + key + "," + value + "," + relId.startNode + "," + relId.endNode + "]";
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneCommand.java
|
5,740
|
public final class Hits {
private static int MAX_CACHED_DOCS = 200; // max to cache
private Weight weight;
private IndexSearcher searcher;
private Filter filter = null;
private Sort sort = null;
private int length; // the total number of hits
private Vector<HitDoc> hitDocs = new Vector<HitDoc>(); // cache of hits retrieved
private HitDoc first; // head of LRU cache
private HitDoc last; // tail of LRU cache
private int numDocs = 0; // number cached
private int nDeletions; // # deleted docs in the index.
private int lengthAtStart; // this is the number apps usually count on (although deletions can bring it down).
private int nDeletedHits = 0; // # of already collected hits that were meanwhile deleted.
boolean debugCheckedForDeletions = false; // for test purposes.
private boolean score;
public Hits(IndexSearcher s, Query q, Filter f) throws IOException
{
score = false;
weight = q.weight(s);
searcher = s;
filter = f;
nDeletions = countDeletions(s);
getMoreDocs(50); // retrieve 100 initially
lengthAtStart = length;
}
public Hits(IndexSearcher s, Query q, Filter f, Sort o, boolean score) throws IOException {
this.score = score;
weight = q.weight(s);
searcher = s;
filter = f;
sort = o;
nDeletions = countDeletions(s);
getMoreDocs(50); // retrieve 100 initially
lengthAtStart = length;
}
// count # deletions, return -1 if unknown.
private int countDeletions(IndexSearcher s) throws IOException {
int cnt = -1;
if (s instanceof IndexSearcher) {
cnt = s.maxDoc() - ((IndexSearcher) s).getIndexReader().numDocs();
}
return cnt;
}
/**
* Tries to add new documents to hitDocs.
* Ensures that the hit numbered <code>min</code> has been retrieved.
*/
private void getMoreDocs(int min) throws IOException {
if (hitDocs.size() > min) {
min = hitDocs.size();
}
int n = min * 2; // double # retrieved
// TopDocs topDocs = (sort == null) ? searcher.search(weight, filter, n) : searcher.search(weight, filter, n, sort);
TopDocs topDocs = null;
if ( sort == null )
{
topDocs = searcher.search( weight, filter, n );
}
else
{
if ( this.score )
{
TopFieldCollector collector = LuceneDataSource.scoringCollector( sort, n );
searcher.search( weight, null, collector );
topDocs = collector.topDocs();
}
else
{
topDocs = searcher.search( weight, filter, n, sort );
}
}
length = topDocs.totalHits;
ScoreDoc[] scoreDocs = topDocs.scoreDocs;
float scoreNorm = 1.0f;
if (length > 0 && topDocs.getMaxScore() > 1.0f) {
scoreNorm = 1.0f / topDocs.getMaxScore();
}
int start = hitDocs.size() - nDeletedHits;
// any new deletions?
int nDels2 = countDeletions(searcher);
debugCheckedForDeletions = false;
if (nDeletions < 0 || nDels2 > nDeletions) {
// either we cannot count deletions, or some "previously valid hits" might have been deleted, so find exact start point
nDeletedHits = 0;
debugCheckedForDeletions = true;
int i2 = 0;
for (int i1=0; i1<hitDocs.size() && i2<scoreDocs.length; i1++) {
int id1 = ((HitDoc)hitDocs.get(i1)).id;
int id2 = scoreDocs[i2].doc;
if (id1 == id2) {
i2++;
} else {
nDeletedHits ++;
}
}
start = i2;
}
int end = scoreDocs.length < length ? scoreDocs.length : length;
length += nDeletedHits;
for (int i = start; i < end; i++) {
hitDocs.addElement(new HitDoc(scoreDocs[i].score * scoreNorm,
scoreDocs[i].doc));
}
nDeletions = nDels2;
}
/** Returns the total number of hits available in this set. */
public int length() {
return length;
}
/** Returns the stored fields of the n<sup>th</sup> document in this set.
* <p>Documents are cached, so that repeated requests for the same element may
* return the same Document object.
* @throws org.apache.lucene.index.CorruptIndexException if the index is corrupt
* @throws java.io.IOException if there is a low-level IO error
*/
public Document doc(int n) throws CorruptIndexException, IOException {
HitDoc hitDoc = hitDoc(n);
// Update LRU cache of documents
remove(hitDoc); // remove from list, if there
addToFront(hitDoc); // add to front of list
if (numDocs > MAX_CACHED_DOCS ) { // if cache is full
HitDoc oldLast = last;
remove(last); // flush last
oldLast.doc = null; // let doc get gc'd
}
if (hitDoc.doc == null) {
hitDoc.doc = searcher.doc(hitDoc.id); // cache miss: read document
}
return hitDoc.doc;
}
/** Returns the score for the n<sup>th</sup> document in this set. */
public float score(int n) throws IOException {
return hitDoc(n).score;
}
/** Returns the id for the n<sup>th</sup> document in this set.
* Note that ids may change when the index changes, so you cannot
* rely on the id to be stable.
*/
public int id(int n) throws IOException {
return hitDoc(n).id;
}
private HitDoc hitDoc(int n) throws IOException {
if (n >= lengthAtStart) {
throw new IndexOutOfBoundsException("Not a valid hit number: " + n);
}
if (n >= hitDocs.size()) {
getMoreDocs(n);
}
if (n >= length) {
throw new ConcurrentModificationException("Not a valid hit number: " + n);
}
return hitDocs.elementAt(n);
}
private void addToFront(HitDoc hitDoc) { // insert at front of cache
if (first == null) {
last = hitDoc;
} else {
first.prev = hitDoc;
}
hitDoc.next = first;
first = hitDoc;
hitDoc.prev = null;
numDocs++;
}
private void remove(HitDoc hitDoc) { // remove from cache
if (hitDoc.doc == null) { // it's not in the list
return; // abort
}
if (hitDoc.next == null) {
last = hitDoc.prev;
} else {
hitDoc.next.prev = hitDoc.prev;
}
if (hitDoc.prev == null) {
first = hitDoc.next;
} else {
hitDoc.prev.next = hitDoc.next;
}
numDocs--;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_Hits.java
|
5,741
|
static class CreateIndexCommand extends LuceneCommand
{
static final IndexIdentifier FAKE_IDENTIFIER = new IndexIdentifier(
(byte) 9, null, "create index" );
private final String name;
private final Map<String, String> config;
CreateIndexCommand( byte entityType, String name, Map<String, String> config )
{
super( FAKE_IDENTIFIER, entityType, -1L, null, null, CREATE_INDEX_COMMAND );
this.name = name;
this.config = config;
}
public String getName()
{
return name;
}
public Map<String, String> getConfig()
{
return config;
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( type );
buffer.put( entityType );
writeLengthAndString( buffer, name );
buffer.putInt( config.size() );
for ( Map.Entry<String, String> entry : config.entrySet() )
{
writeLengthAndString( buffer, entry.getKey() );
writeLengthAndString( buffer, entry.getValue() );
}
}
@Override
void perform( CommitContext context )
{
context.dataSource.indexStore.setIfNecessary( getEntityType(), name, config );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
@Override
public String toString()
{
return "Create[" + new IndexIdentifier( entityType, null, name ) + "," + config + "]";
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneCommand.java
|
5,742
|
static class RelationshipIndex extends LuceneIndex<Relationship>
implements org.neo4j.graphdb.index.RelationshipIndex
{
private final GraphDatabaseService gdb;
RelationshipIndex( LuceneIndexImplementation service,
GraphDatabaseService gdb,
IndexIdentifier identifier, TransactionManager txManager )
{
super( service, identifier, txManager );
this.gdb = gdb;
}
@Override
protected Relationship getById( long id )
{
return gdb.getRelationshipById(id);
}
@Override
protected long getEntityId( Relationship entity )
{
return entity.getId();
}
@Override
public IndexHits<Relationship> get( String key, Object valueOrNull, Node startNodeOrNull,
Node endNodeOrNull )
{
super.assertInTransaction();
BooleanQuery query = new BooleanQuery();
if ( key != null && valueOrNull != null )
{
query.add( type.get( key, valueOrNull ), Occur.MUST );
}
addIfNotNull( query, startNodeOrNull, KEY_START_NODE_ID );
addIfNotNull( query, endNodeOrNull, KEY_END_NODE_ID );
return query( query, null, null, null );
}
@Override
public IndexHits<Relationship> query( String key, Object queryOrQueryObjectOrNull,
Node startNodeOrNull, Node endNodeOrNull )
{
super.assertInTransaction();
QueryContext context = queryOrQueryObjectOrNull != null &&
queryOrQueryObjectOrNull instanceof QueryContext ?
(QueryContext) queryOrQueryObjectOrNull : null;
BooleanQuery query = new BooleanQuery();
if ( (context != null && context.getQueryOrQueryObject() != null) ||
(context == null && queryOrQueryObjectOrNull != null ) )
{
query.add( type.query( key, context != null ?
context.getQueryOrQueryObject() : queryOrQueryObjectOrNull, context ), Occur.MUST );
}
addIfNotNull( query, startNodeOrNull, KEY_START_NODE_ID );
addIfNotNull( query, endNodeOrNull, KEY_END_NODE_ID );
return query( query, null, null, context );
}
private static void addIfNotNull( BooleanQuery query, Node nodeOrNull, String field )
{
if ( nodeOrNull != null )
{
query.add( new TermQuery( new Term( field, "" + nodeOrNull.getId() ) ),
Occur.MUST );
}
}
@Override
public IndexHits<Relationship> query( Object queryOrQueryObjectOrNull,
Node startNodeOrNull, Node endNodeOrNull )
{
super.assertInTransaction();
return query( null, queryOrQueryObjectOrNull, startNodeOrNull, endNodeOrNull );
}
@Override
protected LuceneCommand newAddCommand( PropertyContainer entity, String key, Object value )
{
Relationship rel = (Relationship) entity;
return new LuceneCommand.AddRelationshipCommand( getIdentifier(), LuceneCommand.RELATIONSHIP,
RelationshipId.of( rel ), key, value );
}
@Override
protected LuceneCommand newRemoveCommand( PropertyContainer entity, String key, Object value )
{
Relationship rel = (Relationship) entity;
return new LuceneCommand.RemoveCommand( getIdentifier(), LuceneCommand.RELATIONSHIP,
RelationshipId.of( rel ), key, value );
}
@Override
public Class<Relationship> getEntityType()
{
return Relationship.class;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneIndex.java
|
5,743
|
static class NodeIndex extends LuceneIndex<Node>
{
private final GraphDatabaseService gdb;
NodeIndex( LuceneIndexImplementation service,
GraphDatabaseService gdb,
IndexIdentifier identifier, TransactionManager txManager )
{
super( service, identifier, txManager );
this.gdb = gdb;
}
@Override
protected Node getById( long id )
{
return gdb.getNodeById(id);
}
@Override
protected long getEntityId( Node entity )
{
return entity.getId();
}
@Override
protected LuceneCommand newAddCommand( PropertyContainer entity, String key, Object value )
{
return new LuceneCommand.AddCommand( getIdentifier(), LuceneCommand.NODE,
((Node) entity).getId(), key, value );
}
@Override
protected LuceneCommand newRemoveCommand( PropertyContainer entity, String key, Object value )
{
return new LuceneCommand.RemoveCommand( getIdentifier(), LuceneCommand.NODE,
((Node) entity).getId(), key, value );
}
@Override
public Class<Node> getEntityType()
{
return Node.class;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneIndex.java
|
5,744
|
{
@Override
protected T underlyingObjectToObject( Long id )
{
return getById( id );
}
@Override
protected void itemDodged( Long item )
{
abandonedIds.add( item );
}
@Override
public boolean hasNext()
{
assertInTransaction();
return super.hasNext();
}
@Override
public T next()
{
assertInTransaction();
return super.next();
}
@Override
public T getSingle()
{
assertInTransaction();
return super.getSingle();
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneIndex.java
|
5,745
|
public abstract class LuceneIndex<T extends PropertyContainer> implements Index<T>
{
static final String KEY_DOC_ID = "_id_";
static final String KEY_START_NODE_ID = "_start_node_id_";
static final String KEY_END_NODE_ID = "_end_node_id_";
private static Set<String> FORBIDDEN_KEYS = new HashSet<String>( Arrays.asList( null, KEY_DOC_ID, KEY_START_NODE_ID, KEY_END_NODE_ID ) );
final LuceneIndexImplementation service;
private final IndexIdentifier identifier;
private final TransactionManager txManager;
final IndexType type;
private volatile boolean deleted;
// Will contain ids which were found to be missing from the graph when doing queries
// Write transactions can fetch from this list and add to their transactions to
// allow for self-healing properties.
final Collection<Long> abandonedIds = new CopyOnWriteArraySet<Long>();
LuceneIndex( LuceneIndexImplementation service, IndexIdentifier identifier, TransactionManager txManager )
{
this.service = service;
this.identifier = identifier;
this.txManager = txManager;
this.type = service.dataSource().getType( identifier, false );
}
LuceneXaConnection getConnection()
{
assertNotDeleted();
if ( service.broker() == null )
{
throw new ReadOnlyDbException();
}
return service.broker().acquireResourceConnection();
}
private void assertNotDeleted()
{
if ( deleted )
{
throw new IllegalStateException( "This index (" + identifier + ") has been deleted" );
}
}
@Override
public GraphDatabaseService getGraphDatabase()
{
return service.graphDb();
}
LuceneXaConnection getReadOnlyConnection()
{
assertNotDeleted();
return service.broker() == null ? null :
service.broker().acquireReadOnlyResourceConnection();
}
void markAsDeleted()
{
this.deleted = true;
this.abandonedIds.clear();
}
@Override
public String getName()
{
return this.identifier.indexName;
}
/**
* See {@link Index#add(PropertyContainer, String, Object)} for more generic
* documentation.
*
* Adds key/value to the {@code entity} in this index. Added values are
* searchable within the transaction, but composite {@code AND}
* queries aren't guaranteed to return added values correctly within that
* transaction. When the transaction has been committed all such queries
* are guaranteed to return correct results.
*
* @param entity the entity (i.e {@link Node} or {@link Relationship})
* to associate the key/value pair with.
* @param key the key in the key/value pair to associate with the entity.
* @param value the value in the key/value pair to associate with the
* entity.
*/
@Override
public void add( T entity, String key, Object value )
{
assertInTransaction();
LuceneXaConnection connection = getConnection();
assertValidKey( key );
for ( Object oneValue : IoPrimitiveUtils.asArray( value ) )
{
connection.add( this, entity, key, oneValue );
}
}
@Override
public T putIfAbsent( T entity, String key, Object value )
{
assertInTransaction();
// TODO This should not be in NodeManager. Make a separate service that does this, which can be passed into index implementations
return ((GraphDatabaseAPI)service.graphDb()).getDependencyResolver().resolveDependency( NodeManager.class )
.indexPutIfAbsent( this, entity, key, value );
}
private void assertValidKey( String key )
{
if ( FORBIDDEN_KEYS.contains( key ) )
{
throw new IllegalArgumentException( "Key " + key + " forbidden" );
}
}
/**
* See {@link Index#remove(PropertyContainer, String, Object)} for more
* generic documentation.
*
* Removes key/value to the {@code entity} in this index. Removed values
* are excluded within the transaction, but composite {@code AND}
* queries aren't guaranteed to exclude removed values correctly within
* that transaction. When the transaction has been committed all such
* queries are guaranteed to return correct results.
*
* @param entity the entity (i.e {@link Node} or {@link Relationship})
* to dissociate the key/value pair from.
* @param key the key in the key/value pair to dissociate from the entity.
* @param value the value in the key/value pair to dissociate from the
* entity.
*/
@Override
public void remove( T entity, String key, Object value )
{
assertInTransaction();
LuceneXaConnection connection = getConnection();
assertValidKey( key );
for ( Object oneValue : IoPrimitiveUtils.asArray( value ) )
{
connection.remove( this, entity, key, oneValue );
}
}
@Override
public void remove( T entity, String key )
{
assertInTransaction();
LuceneXaConnection connection = getConnection();
assertValidKey( key );
connection.remove( this, entity, key );
}
@Override
public void remove( T entity )
{
assertInTransaction();
LuceneXaConnection connection = getConnection();
connection.remove( this, entity );
}
@Override
public void delete()
{
assertInTransaction();
getConnection().deleteIndex( this );
}
@Override
public IndexHits<T> get( String key, Object value )
{
assertInTransaction();
return query( type.get( key, value ), key, value, null );
}
/**
* {@inheritDoc}
*
* {@code queryOrQueryObject} can be a {@link String} containing the query
* in Lucene syntax format, http://lucene.apache.org/java/3_0_2/queryparsersyntax.html.
* Or it can be a {@link Query} object. If can even be a {@link QueryContext}
* object which can contain a query ({@link String} or {@link Query}) and
* additional parameters, such as {@link Sort}.
*
* Because of performance issues, including uncommitted transaction modifications
* in the result is disabled by default, but can be enabled using
* {@link QueryContext#tradeCorrectnessForSpeed()}.
*/
@Override
public IndexHits<T> query( String key, Object queryOrQueryObject )
{
assertInTransaction();
QueryContext context = queryOrQueryObject instanceof QueryContext ?
(QueryContext) queryOrQueryObject : null;
return query( type.query( key, context != null ?
context.getQueryOrQueryObject() : queryOrQueryObject, context ), null, null, context );
}
/**
* {@inheritDoc}
*
* @see #query(String, Object)
*/
@Override
public IndexHits<T> query( Object queryOrQueryObject )
{
return query( null, queryOrQueryObject );
}
protected IndexHits<T> query( Query query, String keyForDirectLookup,
Object valueForDirectLookup, QueryContext additionalParametersOrNull )
{
List<Long> ids = new ArrayList<Long>();
LuceneXaConnection con = getReadOnlyConnection();
LuceneTransaction luceneTx = con != null ? con.getLuceneTx() : null;
Collection<Long> removedIds = Collections.emptySet();
IndexSearcher additionsSearcher = null;
if ( luceneTx != null )
{
if ( keyForDirectLookup != null )
{
ids.addAll( luceneTx.getAddedIds( this, keyForDirectLookup, valueForDirectLookup ) );
}
else
{
additionsSearcher = luceneTx.getAdditionsAsSearcher( this, additionalParametersOrNull );
}
removedIds = keyForDirectLookup != null ?
luceneTx.getRemovedIds( this, keyForDirectLookup, valueForDirectLookup ) :
luceneTx.getRemovedIds( this, query );
}
IndexHits<Long> idIterator = null;
IndexReference searcher = null;
service.dataSource().getReadLock();
try
{
searcher = service.dataSource().getIndexSearcher( identifier );
}
finally
{
service.dataSource().releaseReadLock();
}
if ( searcher != null )
{
boolean foundInCache = false;
LruCache<String, Collection<Long>> cachedIdsMap = null;
if ( keyForDirectLookup != null )
{
cachedIdsMap = service.dataSource().getFromCache(
identifier, keyForDirectLookup );
foundInCache = fillFromCache( cachedIdsMap, ids,
valueForDirectLookup.toString(), removedIds );
}
if ( !foundInCache )
{
DocToIdIterator searchedIds = new DocToIdIterator( search( searcher,
query, additionalParametersOrNull, additionsSearcher, removedIds ), removedIds, searcher );
if ( ids.isEmpty() )
{
idIterator = searchedIds;
}
else
{
Collection<IndexHits<Long>> iterators = new ArrayList<IndexHits<Long>>();
iterators.add( searchedIds );
iterators.add( new ConstantScoreIterator<Long>( ids, Float.NaN ) );
idIterator = new CombinedIndexHits<Long>( iterators );
}
}
}
idIterator = idIterator == null ? new ConstantScoreIterator<Long>( ids, 0 ) : idIterator;
return newEntityIterator( idIterator );
}
@Override
public boolean isWriteable()
{
return true;
}
private IndexHits<T> newEntityIterator( IndexHits<Long> idIterator )
{
return new IdToEntityIterator<T>( idIterator )
{
@Override
protected T underlyingObjectToObject( Long id )
{
return getById( id );
}
@Override
protected void itemDodged( Long item )
{
abandonedIds.add( item );
}
@Override
public boolean hasNext()
{
assertInTransaction();
return super.hasNext();
}
@Override
public T next()
{
assertInTransaction();
return super.next();
}
@Override
public T getSingle()
{
assertInTransaction();
return super.getSingle();
}
};
}
private boolean fillFromCache(
LruCache<String, Collection<Long>> cachedNodesMap,
List<Long> ids, String valueAsString,
Collection<Long> deletedNodes )
{
boolean found = false;
if ( cachedNodesMap != null )
{
Collection<Long> cachedNodes = cachedNodesMap.get( valueAsString );
if ( cachedNodes != null )
{
found = true;
for ( Long cachedNodeId : cachedNodes )
{
if ( !deletedNodes.contains( cachedNodeId ) )
{
ids.add( cachedNodeId );
}
}
}
}
return found;
}
private IndexHits<Document> search( IndexReference searcherRef, Query query,
QueryContext additionalParametersOrNull, IndexSearcher additionsSearcher, Collection<Long> removed )
{
try
{
if ( additionsSearcher != null && !removed.isEmpty() )
{
letThroughAdditions( additionsSearcher, query, removed );
}
IndexSearcher searcher = additionsSearcher == null ? searcherRef.getSearcher() :
new IndexSearcher( new MultiReader( searcherRef.getSearcher().getIndexReader(),
additionsSearcher.getIndexReader() ) );
IndexHits<Document> result = null;
if ( additionalParametersOrNull != null && additionalParametersOrNull.getTop() > 0 )
{
result = new TopDocsIterator( query, additionalParametersOrNull, searcher );
}
else
{
Sort sorting = additionalParametersOrNull != null ?
additionalParametersOrNull.getSorting() : null;
boolean forceScore = additionalParametersOrNull == null ||
!additionalParametersOrNull.getTradeCorrectnessForSpeed();
Hits hits = new Hits( searcher, query, null, sorting, forceScore );
result = new HitsIterator( hits );
}
return result;
}
catch ( IOException e )
{
throw new RuntimeException( "Unable to query " + this + " with "
+ query, e );
}
}
private void letThroughAdditions( IndexSearcher additionsSearcher, Query query, Collection<Long> removed )
throws IOException
{
Hits hits = new Hits( additionsSearcher, query, null );
HitsIterator iterator = new HitsIterator( hits );
while ( iterator.hasNext() )
{
String idString = iterator.next().getField( KEY_DOC_ID ).stringValue();
removed.remove( Long.valueOf( idString ) );
}
}
public void setCacheCapacity( String key, int capacity )
{
service.dataSource().setCacheCapacity( identifier, key, capacity );
}
public Integer getCacheCapacity( String key )
{
return service.dataSource().getCacheCapacity( identifier, key );
}
protected abstract T getById( long id );
protected abstract long getEntityId( T entity );
protected abstract LuceneCommand newAddCommand( PropertyContainer entity,
String key, Object value );
protected abstract LuceneCommand newRemoveCommand( PropertyContainer entity,
String key, Object value );
IndexIdentifier getIdentifier()
{
return this.identifier;
}
static class NodeIndex extends LuceneIndex<Node>
{
private final GraphDatabaseService gdb;
NodeIndex( LuceneIndexImplementation service,
GraphDatabaseService gdb,
IndexIdentifier identifier, TransactionManager txManager )
{
super( service, identifier, txManager );
this.gdb = gdb;
}
@Override
protected Node getById( long id )
{
return gdb.getNodeById(id);
}
@Override
protected long getEntityId( Node entity )
{
return entity.getId();
}
@Override
protected LuceneCommand newAddCommand( PropertyContainer entity, String key, Object value )
{
return new LuceneCommand.AddCommand( getIdentifier(), LuceneCommand.NODE,
((Node) entity).getId(), key, value );
}
@Override
protected LuceneCommand newRemoveCommand( PropertyContainer entity, String key, Object value )
{
return new LuceneCommand.RemoveCommand( getIdentifier(), LuceneCommand.NODE,
((Node) entity).getId(), key, value );
}
@Override
public Class<Node> getEntityType()
{
return Node.class;
}
}
static class RelationshipIndex extends LuceneIndex<Relationship>
implements org.neo4j.graphdb.index.RelationshipIndex
{
private final GraphDatabaseService gdb;
RelationshipIndex( LuceneIndexImplementation service,
GraphDatabaseService gdb,
IndexIdentifier identifier, TransactionManager txManager )
{
super( service, identifier, txManager );
this.gdb = gdb;
}
@Override
protected Relationship getById( long id )
{
return gdb.getRelationshipById(id);
}
@Override
protected long getEntityId( Relationship entity )
{
return entity.getId();
}
@Override
public IndexHits<Relationship> get( String key, Object valueOrNull, Node startNodeOrNull,
Node endNodeOrNull )
{
super.assertInTransaction();
BooleanQuery query = new BooleanQuery();
if ( key != null && valueOrNull != null )
{
query.add( type.get( key, valueOrNull ), Occur.MUST );
}
addIfNotNull( query, startNodeOrNull, KEY_START_NODE_ID );
addIfNotNull( query, endNodeOrNull, KEY_END_NODE_ID );
return query( query, null, null, null );
}
@Override
public IndexHits<Relationship> query( String key, Object queryOrQueryObjectOrNull,
Node startNodeOrNull, Node endNodeOrNull )
{
super.assertInTransaction();
QueryContext context = queryOrQueryObjectOrNull != null &&
queryOrQueryObjectOrNull instanceof QueryContext ?
(QueryContext) queryOrQueryObjectOrNull : null;
BooleanQuery query = new BooleanQuery();
if ( (context != null && context.getQueryOrQueryObject() != null) ||
(context == null && queryOrQueryObjectOrNull != null ) )
{
query.add( type.query( key, context != null ?
context.getQueryOrQueryObject() : queryOrQueryObjectOrNull, context ), Occur.MUST );
}
addIfNotNull( query, startNodeOrNull, KEY_START_NODE_ID );
addIfNotNull( query, endNodeOrNull, KEY_END_NODE_ID );
return query( query, null, null, context );
}
private static void addIfNotNull( BooleanQuery query, Node nodeOrNull, String field )
{
if ( nodeOrNull != null )
{
query.add( new TermQuery( new Term( field, "" + nodeOrNull.getId() ) ),
Occur.MUST );
}
}
@Override
public IndexHits<Relationship> query( Object queryOrQueryObjectOrNull,
Node startNodeOrNull, Node endNodeOrNull )
{
super.assertInTransaction();
return query( null, queryOrQueryObjectOrNull, startNodeOrNull, endNodeOrNull );
}
@Override
protected LuceneCommand newAddCommand( PropertyContainer entity, String key, Object value )
{
Relationship rel = (Relationship) entity;
return new LuceneCommand.AddRelationshipCommand( getIdentifier(), LuceneCommand.RELATIONSHIP,
RelationshipId.of( rel ), key, value );
}
@Override
protected LuceneCommand newRemoveCommand( PropertyContainer entity, String key, Object value )
{
Relationship rel = (Relationship) entity;
return new LuceneCommand.RemoveCommand( getIdentifier(), LuceneCommand.RELATIONSHIP,
RelationshipId.of( rel ), key, value );
}
@Override
public Class<Relationship> getEntityType()
{
return Relationship.class;
}
}
/**
* Copied from AbstractTransactionManager, couldn't find a good reuse
*/
private void assertInTransaction()
{
try
{
if ( txManager.getTransaction() == null )
{
throw new NotInTransactionException();
}
}
catch ( SystemException e )
{
throw new IllegalStateException( "Unable to determine transaction state", e );
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneIndex.java
|
5,746
|
private class LuceneTransactionFactory extends XaTransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state)
{
return createTransaction( this.getLogicalLog(), state );
}
@Override
public void flushAll()
{
for ( IndexReference index : getAllIndexes() )
{
try
{
index.getWriter().commit();
}
catch ( IOException e )
{
throw new RuntimeException( "unable to commit changes to " + index.getIdentifier(), e );
}
}
providerStore.flush();
}
@Override
public void recoveryComplete()
{
if ( !expectedFutureRecoveryDeletions.isEmpty() )
{
throw new TransactionFailureException( "Recovery discovered transactions which couldn't " +
"be applied due to a future index deletion, however some expected deletions " +
"weren't encountered: " + expectedFutureRecoveryDeletions );
}
}
@Override
public long getCurrentVersion()
{
return providerStore.getVersion();
}
@Override
public long getAndSetNewVersion()
{
return providerStore.incrementVersion();
}
@Override
public void setVersion( long version )
{
providerStore.setVersion( version );
}
@Override
public long getLastCommittedTx()
{
return providerStore.getLastCommittedTx();
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,747
|
MEMORY
{
@Override
Directory getDirectory( File baseStorePath, IndexIdentifier identifier )
{
return new RAMDirectory();
}
@Override
void cleanWriteLocks( File path )
{
}
@Override
File ensureDirectoryExists( FileSystemAbstraction fileSystem, File path )
{
try
{
fileSystem.mkdirs( path );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
return path;
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,748
|
FS
{
@Override
Directory getDirectory( File baseStorePath, IndexIdentifier identifier ) throws IOException
{
return FSDirectory.open( getFileDirectory( baseStorePath, identifier ) );
}
@Override
void cleanWriteLocks( File dir )
{
if ( !dir.isDirectory() )
{
return;
}
for ( File file : dir.listFiles() )
{
if ( file.isDirectory() )
{
cleanWriteLocks( file );
}
else if ( file.getName().equals( "write.lock" ) )
{
boolean success = file.delete();
assert success;
}
}
}
@Override
File ensureDirectoryExists( FileSystemAbstraction fileSystem, File dir )
{
if ( !dir.exists() && !dir.mkdirs() )
{
String message = String.format( "Unable to create directory path[%s] for Neo4j store" +
".", dir.getAbsolutePath() );
throw new RuntimeException( message );
}
return dir;
}
},
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,749
|
private class LuceneCommandFactory extends XaCommandFactory
{
LuceneCommandFactory()
{
super();
}
@Override
public XaCommand readCommand( ReadableByteChannel channel,
ByteBuffer buffer ) throws IOException
{
return LuceneCommand.readCommand( channel, buffer, LuceneDataSource.this );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,750
|
public static abstract class Configuration
extends LogBackedXaDataSource.Configuration
{
public static final Setting<Integer> lucene_searcher_cache_size = GraphDatabaseSettings
.lucene_searcher_cache_size;
public static final Setting<Boolean> read_only = GraphDatabaseSettings.read_only;
public static final Setting<Boolean> allow_store_upgrade = GraphDatabaseSettings.allow_store_upgrade;
public static final Setting<Boolean> ephemeral = InternalAbstractGraphDatabase.Configuration.ephemeral;
public static final Setting<File> store_dir = NeoStoreXaDataSource.Configuration.store_dir;
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,751
|
return xaContainer.getLogicalLog().createLogWriter( new Function<Config, File>(){
@Override
public File apply( Config config )
{
return logBaseName(baseDirectory(config.get( GraphDatabaseSettings.store_dir )));
}
});
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,752
|
{
private final Iterator<File> filesIterator = files.iterator();
@Override
protected File fetchNextOrNull()
{
return filesIterator.hasNext() ? filesIterator.next() : null;
}
@Override
public void close()
{
for ( SnapshotDeletionPolicy deletionPolicy : snapshots )
{
try
{
deletionPolicy.release( SNAPSHOT_ID );
}
catch ( IOException e )
{
// TODO What to do?
e.printStackTrace();
}
}
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,753
|
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return (T) LuceneDataSource.this.config;
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,754
|
{
@Override
public Document newDocument( Object entityId )
{
RelationshipId relId = (RelationshipId) entityId;
Document doc = IndexType.newBaseDocument( relId.id );
doc.add( new Field( LuceneIndex.KEY_START_NODE_ID, "" + relId.startNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
doc.add( new Field( LuceneIndex.KEY_END_NODE_ID, "" + relId.endNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
return doc;
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Relationship.class;
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,755
|
{
@Override
public Document newDocument( Object entityId )
{
return IndexType.newBaseDocument( (Long) entityId );
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Node.class;
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,756
|
{
@Override
public TokenStream tokenStream( String fieldName, Reader reader )
{
return new WhitespaceTokenizer( LUCENE_VERSION, reader );
}
@Override
public String toString()
{
return "WHITESPACE_ANALYZER";
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,757
|
{
@Override
public TokenStream tokenStream( String fieldName, Reader reader )
{
return new LowerCaseFilter( LUCENE_VERSION, new WhitespaceTokenizer( LUCENE_VERSION, reader ) );
}
@Override
public String toString()
{
return "LOWER_CASE_WHITESPACE_ANALYZER";
}
};
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,758
|
public class LuceneDataSource extends LogBackedXaDataSource
{
private final Config config;
private final FileSystemAbstraction fileSystemAbstraction;
public static abstract class Configuration
extends LogBackedXaDataSource.Configuration
{
public static final Setting<Integer> lucene_searcher_cache_size = GraphDatabaseSettings
.lucene_searcher_cache_size;
public static final Setting<Boolean> read_only = GraphDatabaseSettings.read_only;
public static final Setting<Boolean> allow_store_upgrade = GraphDatabaseSettings.allow_store_upgrade;
public static final Setting<Boolean> ephemeral = InternalAbstractGraphDatabase.Configuration.ephemeral;
public static final Setting<File> store_dir = NeoStoreXaDataSource.Configuration.store_dir;
}
public static final Version LUCENE_VERSION = Version.LUCENE_36;
public static final String DEFAULT_NAME = "lucene-index";
public static final byte[] DEFAULT_BRANCH_ID = UTF8.encode( "162374" );
// The reason this is still 3.5 even though the lucene version is 3.6 the format is compatible
// (both forwards and backwards) with lucene 3.5 and changing this would require an explicit
// store upgrade which feels unnecessary.
public static final long INDEX_VERSION = versionStringToLong( "3.5" );
/**
* Default {@link Analyzer} for fulltext parsing.
*/
public static final Analyzer LOWER_CASE_WHITESPACE_ANALYZER = new Analyzer()
{
@Override
public TokenStream tokenStream( String fieldName, Reader reader )
{
return new LowerCaseFilter( LUCENE_VERSION, new WhitespaceTokenizer( LUCENE_VERSION, reader ) );
}
@Override
public String toString()
{
return "LOWER_CASE_WHITESPACE_ANALYZER";
}
};
public static final Analyzer WHITESPACE_ANALYZER = new Analyzer()
{
@Override
public TokenStream tokenStream( String fieldName, Reader reader )
{
return new WhitespaceTokenizer( LUCENE_VERSION, reader );
}
@Override
public String toString()
{
return "WHITESPACE_ANALYZER";
}
};
public static final Analyzer KEYWORD_ANALYZER = new KeywordAnalyzer();
private IndexClockCache indexSearchers;
private XaContainer xaContainer;
private File baseStorePath;
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
final IndexStore indexStore;
private final XaFactory xaFactory;
private final TransactionManager txManager;
IndexProviderStore providerStore;
private IndexTypeCache typeCache;
private boolean closed;
private Cache caching;
EntityType nodeEntityType;
EntityType relationshipEntityType;
final Map<IndexIdentifier, LuceneIndex<? extends PropertyContainer>> indexes =
new HashMap<IndexIdentifier, LuceneIndex<? extends PropertyContainer>>();
private LuceneFilesystemFacade filesystemFacade;
// Used for assertion after recovery has been completed.
private final Set<IndexIdentifier> expectedFutureRecoveryDeletions = new HashSet<IndexIdentifier>();
/**
* Constructs this data source.
* @throws InstantiationException if the data source couldn't be
* instantiated
*/
public LuceneDataSource( Config config, IndexStore indexStore, FileSystemAbstraction fileSystemAbstraction,
XaFactory xaFactory, TransactionManager txManager )
{
super( DEFAULT_BRANCH_ID, DEFAULT_NAME );
this.config = config;
this.indexStore = indexStore;
this.xaFactory = xaFactory;
this.txManager = txManager;
this.typeCache = new IndexTypeCache( indexStore );
this.fileSystemAbstraction = fileSystemAbstraction;
}
@Override
public void init()
{
}
@Override
public void start()
{
this.filesystemFacade = config.get( Configuration.ephemeral ) ? LuceneFilesystemFacade.MEMORY :
LuceneFilesystemFacade.FS;
indexSearchers = new IndexClockCache( config.get( Configuration.lucene_searcher_cache_size ) );
caching = new Cache();
File storeDir = config.get( Configuration.store_dir );
this.baseStorePath =
this.filesystemFacade.ensureDirectoryExists( fileSystemAbstraction, baseDirectory( storeDir ) );
this.filesystemFacade.cleanWriteLocks( baseStorePath );
boolean allowUpgrade = config.get( Configuration.allow_store_upgrade );
this.providerStore = newIndexStore( baseStorePath, fileSystemAbstraction, allowUpgrade );
this.typeCache = new IndexTypeCache( indexStore );
boolean isReadOnly = config.get( Configuration.read_only );
nodeEntityType = new EntityType()
{
@Override
public Document newDocument( Object entityId )
{
return IndexType.newBaseDocument( (Long) entityId );
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Node.class;
}
};
relationshipEntityType = new EntityType()
{
@Override
public Document newDocument( Object entityId )
{
RelationshipId relId = (RelationshipId) entityId;
Document doc = IndexType.newBaseDocument( relId.id );
doc.add( new Field( LuceneIndex.KEY_START_NODE_ID, "" + relId.startNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
doc.add( new Field( LuceneIndex.KEY_END_NODE_ID, "" + relId.endNode,
Store.YES, org.apache.lucene.document.Field.Index.NOT_ANALYZED ) );
return doc;
}
@Override
public Class<? extends PropertyContainer> getType()
{
return Relationship.class;
}
};
XaCommandFactory cf = new LuceneCommandFactory();
XaTransactionFactory tf = new LuceneTransactionFactory();
DependencyResolver dummy = new DependencyResolver.Adapter()
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return (T) LuceneDataSource.this.config;
}
};
xaContainer = xaFactory.newXaContainer( this, logBaseName(baseStorePath), cf,
InjectedTransactionValidator.ALLOW_ALL, tf, TransactionStateFactory.noStateFactory( null ),
new TransactionInterceptorProviders( new HashSet<TransactionInterceptorProvider>(), dummy ), false );
closed = false;
if ( !isReadOnly )
{
try
{
xaContainer.openLogicalLog();
}
catch ( IOException e )
{
throw new RuntimeException( "Unable to open lucene log in " + this.baseStorePath, e );
}
setLogicalLogAtCreationTime( xaContainer.getLogicalLog() );
}
}
private File logBaseName(File baseDirectory)
{
return new File( baseDirectory, "lucene.log");
}
private File baseDirectory( File storeDir )
{
return new File( storeDir, "index" );
}
IndexType getType( IndexIdentifier identifier, boolean recovery )
{
return typeCache.getIndexType( identifier, recovery );
}
private IndexProviderStore newIndexStore( File dbStoreDir, FileSystemAbstraction fileSystem,
boolean allowUpgrade )
{
File file = new File( dbStoreDir, "lucene-store.db");
return new IndexProviderStore( file, fileSystem, INDEX_VERSION, allowUpgrade );
}
@Override
public void stop()
{
synchronized ( this )
{
super.stop();
if ( closed )
{
return;
}
closed = true;
for ( IndexReference searcher : indexSearchers.values() )
{
try
{
searcher.dispose( true );
}
catch ( IOException e )
{
e.printStackTrace();
}
}
indexSearchers.clear();
}
if ( xaContainer != null )
{
xaContainer.close();
}
providerStore.close();
}
@Override
public void shutdown()
{
}
public Index<Node> nodeIndex( String indexName, GraphDatabaseService graphDb,
LuceneIndexImplementation luceneIndexImplementation )
{
IndexIdentifier identifier = new IndexIdentifier( LuceneCommand.NODE,
nodeEntityType, indexName );
synchronized ( indexes )
{
LuceneIndex index = indexes.get( identifier );
if ( index == null )
{
index = new LuceneIndex.NodeIndex( luceneIndexImplementation, graphDb, identifier, txManager );
indexes.put( identifier, index );
}
return index;
}
}
public RelationshipIndex relationshipIndex( String indexName,
GraphDatabaseService gdb,
LuceneIndexImplementation luceneIndexImplementation
)
{
IndexIdentifier identifier = new IndexIdentifier( LuceneCommand.RELATIONSHIP,
relationshipEntityType, indexName );
synchronized ( indexes )
{
LuceneIndex index = indexes.get( identifier );
if ( index == null )
{
index = new LuceneIndex.RelationshipIndex( luceneIndexImplementation, gdb, identifier, txManager );
indexes.put( identifier, index );
}
return (RelationshipIndex) index;
}
}
@Override
public XaConnection getXaConnection()
{
return new LuceneXaConnection( baseStorePath, xaContainer
.getResourceManager(), getBranchId() );
}
private class LuceneCommandFactory extends XaCommandFactory
{
LuceneCommandFactory()
{
super();
}
@Override
public XaCommand readCommand( ReadableByteChannel channel,
ByteBuffer buffer ) throws IOException
{
return LuceneCommand.readCommand( channel, buffer, LuceneDataSource.this );
}
}
private class LuceneTransactionFactory extends XaTransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state)
{
return createTransaction( this.getLogicalLog(), state );
}
@Override
public void flushAll()
{
for ( IndexReference index : getAllIndexes() )
{
try
{
index.getWriter().commit();
}
catch ( IOException e )
{
throw new RuntimeException( "unable to commit changes to " + index.getIdentifier(), e );
}
}
providerStore.flush();
}
@Override
public void recoveryComplete()
{
if ( !expectedFutureRecoveryDeletions.isEmpty() )
{
throw new TransactionFailureException( "Recovery discovered transactions which couldn't " +
"be applied due to a future index deletion, however some expected deletions " +
"weren't encountered: " + expectedFutureRecoveryDeletions );
}
}
@Override
public long getCurrentVersion()
{
return providerStore.getVersion();
}
@Override
public long getAndSetNewVersion()
{
return providerStore.incrementVersion();
}
@Override
public void setVersion( long version )
{
providerStore.setVersion( version );
}
@Override
public long getLastCommittedTx()
{
return providerStore.getLastCommittedTx();
}
}
private synchronized IndexReference[] getAllIndexes()
{
Collection<IndexReference> indexReferences = indexSearchers.values();
return indexReferences.toArray( new IndexReference[indexReferences.size()] );
}
void getReadLock()
{
lock.readLock().lock();
}
void releaseReadLock()
{
lock.readLock().unlock();
}
void getWriteLock()
{
lock.writeLock().lock();
}
void releaseWriteLock()
{
lock.writeLock().unlock();
}
/**
* If nothing has changed underneath (since the searcher was last created
* or refreshed) {@code searcher} is returned. But if something has changed a
* refreshed searcher is returned. It makes use if the
* {@link IndexReader#openIfChanged(IndexReader, IndexWriter, boolean)} which faster than opening an index from
* scratch.
*
* @param searcher the {@link IndexSearcher} to refresh.
* @return a refreshed version of the searcher or, if nothing has changed,
* {@code null}.
* @throws IOException if there's a problem with the index.
*/
private IndexReference refreshSearcher( IndexReference searcher )
{
try
{
IndexReader reader = searcher.getSearcher().getIndexReader();
IndexWriter writer = searcher.getWriter();
IndexReader reopened = IndexReader.openIfChanged( reader, writer, true );
if ( reopened != null )
{
IndexSearcher newSearcher = newIndexSearcher( searcher.getIdentifier(), reopened );
searcher.detachOrClose();
return new IndexReference( searcher.getIdentifier(), newSearcher, writer );
}
return searcher;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
static File getFileDirectory( File storeDir, byte entityType )
{
File path = new File( storeDir, "lucene" );
String extra = null;
if ( entityType == LuceneCommand.NODE )
{
extra = "node";
}
else if ( entityType == LuceneCommand.RELATIONSHIP )
{
extra = "relationship";
}
else
{
throw new RuntimeException( "" + entityType );
}
return new File( path, extra );
}
static File getFileDirectory( File storeDir, IndexIdentifier identifier )
{
return new File( getFileDirectory( storeDir, identifier.entityTypeByte ),
identifier.indexName );
}
static Directory getDirectory( File storeDir,
IndexIdentifier identifier ) throws IOException
{
return FSDirectory.open( getFileDirectory( storeDir, identifier ) );
}
static TopFieldCollector scoringCollector( Sort sorting, int n ) throws IOException
{
return TopFieldCollector.create( sorting, n, false, true, false, true );
}
IndexReference getIndexSearcher( IndexIdentifier identifier )
{
assertNotClosed();
IndexReference searcher = indexSearchers.get( identifier );
if ( searcher == null )
{
return syncGetIndexSearcher( identifier );
}
synchronized ( searcher )
{
/*
* We need to get again a reference to the searcher because it might be so that
* it was refreshed while we waited. Once in here though no one will mess with
* our searcher
*/
searcher = indexSearchers.get( identifier );
if ( searcher == null || searcher.isClosed() )
{
return syncGetIndexSearcher( identifier );
}
searcher = refreshSearcherIfNeeded( searcher );
searcher.incRef();
return searcher;
}
}
private void assertNotClosed()
{
if ( closed )
{
throw new IllegalStateException( "Lucene index provider has been shut down" );
}
}
synchronized IndexReference syncGetIndexSearcher( IndexIdentifier identifier )
{
try
{
IndexReference searcher = indexSearchers.get( identifier );
if ( searcher == null )
{
IndexWriter writer = newIndexWriter( identifier );
IndexReader reader = IndexReader.open( writer, true );
IndexSearcher indexSearcher = newIndexSearcher( identifier, reader );
searcher = new IndexReference( identifier, indexSearcher, writer );
indexSearchers.put( identifier, searcher );
}
else
{
synchronized ( searcher )
{
searcher = refreshSearcherIfNeeded( searcher );
}
}
searcher.incRef();
return searcher;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private IndexSearcher newIndexSearcher( IndexIdentifier identifier, IndexReader reader )
{
IndexSearcher searcher = new IndexSearcher( reader );
IndexType type = getType( identifier, false );
if ( type.getSimilarity() != null )
{
searcher.setSimilarity( type.getSimilarity() );
}
return searcher;
}
private IndexReference refreshSearcherIfNeeded( IndexReference searcher )
{
if ( searcher.checkAndClearStale() )
{
searcher = refreshSearcher( searcher );
if ( searcher != null )
{
indexSearchers.put( searcher.getIdentifier(), searcher );
}
}
return searcher;
}
XaTransaction createTransaction( XaLogicalLog logicalLog, TransactionState state )
{
return new LuceneTransaction( logicalLog, state, this );
}
void invalidateIndexSearcher( IndexIdentifier identifier )
{
IndexReference searcher = indexSearchers.get( identifier );
if ( searcher != null )
{
searcher.setStale();
}
}
void deleteIndex( IndexIdentifier identifier, boolean recovery )
{
closeIndex( identifier );
deleteFileOrDirectory( getFileDirectory( baseStorePath, identifier ) );
invalidateCache( identifier );
boolean removeFromIndexStore = !recovery || (recovery &&
indexStore.has( identifier.entityType.getType(), identifier.indexName ));
if ( removeFromIndexStore )
{
indexStore.remove( identifier.entityType.getType(), identifier.indexName );
}
typeCache.invalidate( identifier );
synchronized ( indexes )
{
LuceneIndex<? extends PropertyContainer> index = indexes.remove( identifier );
if ( index != null )
{
index.markAsDeleted();
}
}
}
private static void deleteFileOrDirectory( File file )
{
if ( file.exists() )
{
if ( file.isDirectory() )
{
for ( File child : file.listFiles() )
{
deleteFileOrDirectory( child );
}
}
file.delete();
}
}
private /*synchronized elsewhere*/ IndexWriter newIndexWriter( IndexIdentifier identifier )
{
assertNotClosed();
try
{
Directory dir = filesystemFacade.getDirectory( baseStorePath, identifier ); //getDirectory(
// baseStorePath, identifier );
directoryExists( dir );
IndexType type = getType( identifier, false );
IndexWriterConfig writerConfig = new IndexWriterConfig( LUCENE_VERSION, type.analyzer );
writerConfig.setIndexDeletionPolicy( new MultipleBackupDeletionPolicy() );
Similarity similarity = type.getSimilarity();
if ( similarity != null )
{
writerConfig.setSimilarity( similarity );
}
IndexWriter indexWriter = new IndexWriter( dir, writerConfig );
// TODO We should tamper with this value and see how it affects the
// general performance. Lucene docs says rather <10 for mixed
// reads/writes
// writer.setMergeFactor( 8 );
return indexWriter;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private boolean directoryExists( Directory dir )
{
try
{
String[] files = dir.listAll();
return files != null && files.length > 0;
}
catch ( IOException e )
{
return false;
}
}
static Document findDocument( IndexType type, IndexSearcher searcher, long entityId )
{
try
{
TopDocs docs = searcher.search( type.idTermQuery( entityId ), 1 );
if ( docs.scoreDocs.length > 0 )
{
return searcher.doc( docs.scoreDocs[0].doc );
}
return null;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
static boolean documentIsEmpty( Document document )
{
List<Fieldable> fields = document.getFields();
for ( Fieldable field : fields )
{
if ( !(LuceneIndex.KEY_DOC_ID.equals( field.name() ) ||
LuceneIndex.KEY_END_NODE_ID.equals( field.name() ) ||
LuceneIndex.KEY_START_NODE_ID.equals( field.name() )))
{
return false;
}
}
return true;
}
static void remove( IndexWriter writer, Query query )
{
try
{
// TODO
writer.deleteDocuments( query );
}
catch ( IOException e )
{
throw new RuntimeException( "Unable to delete for " + query + " using" + writer, e );
}
}
private synchronized void closeIndex( IndexIdentifier identifier )
{
try
{
IndexReference searcher = indexSearchers.remove( identifier );
if ( searcher != null )
{
searcher.dispose( true );
}
}
catch ( IOException e )
{
throw new RuntimeException( "Unable to close lucene writer " + identifier, e );
}
}
LruCache<String, Collection<Long>> getFromCache( IndexIdentifier identifier, String key )
{
return caching.get( identifier, key );
}
void setCacheCapacity( IndexIdentifier identifier, String key, int maxNumberOfCachedEntries )
{
this.caching.setCapacity( identifier, key, maxNumberOfCachedEntries );
}
Integer getCacheCapacity( IndexIdentifier identifier, String key )
{
LruCache<String, Collection<Long>> cache = this.caching.get( identifier, key );
return cache != null ? cache.maxSize() : null;
}
void invalidateCache( IndexIdentifier identifier, String key, Object value )
{
LruCache<String, Collection<Long>> cache = caching.get( identifier, key );
if ( cache != null )
{
cache.remove( value.toString() );
}
}
void invalidateCache( IndexIdentifier identifier )
{
this.caching.disable( identifier );
}
@Override
public long getCreationTime()
{
return providerStore.getCreationTime();
}
@Override
public long getRandomIdentifier()
{
return providerStore.getRandomNumber();
}
@Override
public long getCurrentLogVersion()
{
return providerStore.getVersion();
}
@Override
public long getLastCommittedTxId()
{
return providerStore.getLastCommittedTx();
}
@Override
public void setLastCommittedTxId( long txId )
{
providerStore.setLastCommittedTx( txId );
}
@Override
public XaContainer getXaContainer()
{
return this.xaContainer;
}
@Override
public ResourceIterator<File> listStoreFiles( boolean includeLogicalLogs ) throws IOException
{ // Never include logical logs since they are of little importance
final Collection<File> files = new ArrayList<>();
final Collection<SnapshotDeletionPolicy> snapshots = new ArrayList<>();
makeSureAllIndexesAreInstantiated();
for ( IndexReference writer : getAllIndexes() )
{
SnapshotDeletionPolicy deletionPolicy = (SnapshotDeletionPolicy)
writer.getWriter().getConfig().getIndexDeletionPolicy();
File indexDirectory = getFileDirectory( baseStorePath, writer.getIdentifier() );
try
{
// Throws IllegalStateException if no commits yet
IndexCommit commit = deletionPolicy.snapshot( SNAPSHOT_ID );
for ( String fileName : commit.getFileNames() )
{
files.add( new File( indexDirectory, fileName ) );
}
snapshots.add( deletionPolicy );
}
catch ( IllegalStateException e )
{
// TODO Review this
/*
* This is insane but happens if we try to snapshot an existing index
* that has no commits. This is a bad API design - it should return null
* or something. This is not exceptional.
*/
}
}
files.add( providerStore.getFile() );
return new PrefetchingResourceIterator<File>()
{
private final Iterator<File> filesIterator = files.iterator();
@Override
protected File fetchNextOrNull()
{
return filesIterator.hasNext() ? filesIterator.next() : null;
}
@Override
public void close()
{
for ( SnapshotDeletionPolicy deletionPolicy : snapshots )
{
try
{
deletionPolicy.release( SNAPSHOT_ID );
}
catch ( IOException e )
{
// TODO What to do?
e.printStackTrace();
}
}
}
};
}
@Override
public ResourceIterator<File> listStoreFiles() throws IOException
{
return listStoreFiles( false );
}
@Override
public ResourceIterator<File> listLogicalLogs() throws IOException
{
return IteratorUtil.emptyIterator();
}
@Override
public LogBufferFactory createLogBufferFactory()
{
return xaContainer.getLogicalLog().createLogWriter( new Function<Config, File>(){
@Override
public File apply( Config config )
{
return logBaseName(baseDirectory(config.get( GraphDatabaseSettings.store_dir )));
}
});
}
private void makeSureAllIndexesAreInstantiated()
{
for ( String name : indexStore.getNames( Node.class ) )
{
Map<String, String> config = indexStore.get( Node.class, name );
if ( config.get( IndexManager.PROVIDER ).equals( LuceneIndexImplementation.SERVICE_NAME ) )
{
IndexIdentifier identifier = new IndexIdentifier( LuceneCommand.NODE, nodeEntityType, name );
getIndexSearcher( identifier );
}
}
for ( String name : indexStore.getNames( Relationship.class ) )
{
Map<String, String> config = indexStore.get( Relationship.class, name );
if ( config.get( IndexManager.PROVIDER ).equals( LuceneIndexImplementation.SERVICE_NAME ) )
{
IndexIdentifier identifier = new IndexIdentifier( LuceneCommand.RELATIONSHIP, relationshipEntityType,
name );
getIndexSearcher( identifier );
}
}
}
private static enum LuceneFilesystemFacade
{
FS
{
@Override
Directory getDirectory( File baseStorePath, IndexIdentifier identifier ) throws IOException
{
return FSDirectory.open( getFileDirectory( baseStorePath, identifier ) );
}
@Override
void cleanWriteLocks( File dir )
{
if ( !dir.isDirectory() )
{
return;
}
for ( File file : dir.listFiles() )
{
if ( file.isDirectory() )
{
cleanWriteLocks( file );
}
else if ( file.getName().equals( "write.lock" ) )
{
boolean success = file.delete();
assert success;
}
}
}
@Override
File ensureDirectoryExists( FileSystemAbstraction fileSystem, File dir )
{
if ( !dir.exists() && !dir.mkdirs() )
{
String message = String.format( "Unable to create directory path[%s] for Neo4j store" +
".", dir.getAbsolutePath() );
throw new RuntimeException( message );
}
return dir;
}
},
MEMORY
{
@Override
Directory getDirectory( File baseStorePath, IndexIdentifier identifier )
{
return new RAMDirectory();
}
@Override
void cleanWriteLocks( File path )
{
}
@Override
File ensureDirectoryExists( FileSystemAbstraction fileSystem, File path )
{
try
{
fileSystem.mkdirs( path );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
return path;
}
};
abstract Directory getDirectory( File baseStorePath, IndexIdentifier identifier ) throws IOException;
abstract File ensureDirectoryExists( FileSystemAbstraction fileSystem, File path );
abstract void cleanWriteLocks( File path );
}
void addExpectedFutureDeletion( IndexIdentifier identifier )
{
expectedFutureRecoveryDeletions.add( identifier );
}
void removeExpectedFutureDeletion( IndexIdentifier identifier )
{
expectedFutureRecoveryDeletions.remove( identifier );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneDataSource.java
|
5,759
|
static class RemoveCommand extends LuceneCommand
{
RemoveCommand( IndexIdentifier indexId, byte entityType, Object entityId, String key, Object value )
{
super( indexId, entityType, entityId, key, value, REMOVE_COMMAND );
}
@Override
void perform( CommitContext context )
{
context.ensureWriterInstantiated();
DocumentContext document = context.getDocument( entityId, false );
if ( document != null )
{
context.indexType.removeFromDocument( document.document, key, value );
context.dataSource.invalidateCache( context.identifier, key, value );
}
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return true;
}
@Override
public String toString()
{
return "Remove[" + indexId + "," + entityId + "," + key + "," + value + "]";
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneCommand.java
|
5,760
|
static class DeleteCommand extends LuceneCommand
{
DeleteCommand( IndexIdentifier indexId )
{
super( indexId, indexId.entityTypeByte, -1L, "", "", DELETE_COMMAND );
}
@Override
void perform( CommitContext context )
{
context.documents.clear();
context.dataSource.deleteIndex( context.identifier, context.recovery );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
@Override
public String toString()
{
return "Delete[" + indexId + "]";
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_LuceneCommand.java
|
5,761
|
public class HitsIterator extends AbstractIndexHits<Document>
{
private final int size;
private final Hits hits;
private int index;
public HitsIterator( Hits hits )
{
this.size = hits.length();
this.hits = hits;
}
@Override
protected Document fetchNextOrNull()
{
int i = index++;
try
{
return i < size() ? hits.doc( i ) : null;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
public float currentScore()
{
int i = index-1;
try
{
return i >= 0 && i < size() ? hits.score( i ) : -1;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
public int size()
{
return this.size;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_HitsIterator.java
|
5,762
|
final class HitDoc {
float score;
int id;
Document doc = null;
org.neo4j.index.impl.lucene.HitDoc next; // in doubly-linked cache
org.neo4j.index.impl.lucene.HitDoc prev; // in doubly-linked cache
HitDoc(float s, int i) {
score = s;
id = i;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_Hits.java
|
5,763
|
public class BidirectionalTraversalBranchPath implements Path
{
private final TraversalBranch start;
private final TraversalBranch end;
private final Node endNode;
private final Relationship lastRelationship;
private Node cachedStartNode;
private LinkedList<Relationship> cachedRelationships;
public BidirectionalTraversalBranchPath( TraversalBranch start, TraversalBranch end )
{
this.start = start;
this.end = end;
// Most used properties: endNode and lastRelationship, so cache them right away (semi-expensive though).
Iterator<PropertyContainer> endPathEntities = end.iterator();
this.endNode = (Node) endPathEntities.next();
this.lastRelationship = endPathEntities.hasNext() ?
(Relationship) endPathEntities.next() : start.lastRelationship();
}
@Override
public Node startNode()
{
// Getting the start node is expensive in some Path implementations, so cache it
if ( cachedStartNode == null )
cachedStartNode = start.startNode();
return cachedStartNode;
}
@Override
public Node endNode()
{
return this.endNode;
}
@Override
public Relationship lastRelationship()
{
return this.lastRelationship;
}
@Override
public Iterable<Relationship> relationships()
{
// Cache the relationships since we use them in hashCode/equals too.
if ( cachedRelationships == null )
cachedRelationships = gatherRelationships( start, end );
return cachedRelationships;
}
@Override
public Iterable<Relationship> reverseRelationships()
{
return gatherRelationships( end, start );
}
private LinkedList<Relationship> gatherRelationships( TraversalBranch first, TraversalBranch then )
{
// TODO Don't loop through them all up front
LinkedList<Relationship> relationships = new LinkedList<Relationship>();
TraversalBranch branch = first;
while ( branch.length() > 0 )
{
relationships.addFirst( branch.lastRelationship() );
branch = branch.parent();
}
// We can might as well cache start node since we're right now there anyway
if ( cachedStartNode == null && first == start )
cachedStartNode = branch.endNode();
branch = then;
while ( branch.length() > 0 )
{
relationships.add( branch.lastRelationship() );
branch = branch.parent();
}
if ( cachedStartNode == null && then == start )
cachedStartNode = branch.endNode();
return relationships;
}
@Override
public Iterable<Node> nodes()
{
return gatherNodes( start, end );
}
@Override
public Iterable<Node> reverseNodes()
{
return gatherNodes( end, start );
}
private Iterable<Node> gatherNodes( TraversalBranch first, TraversalBranch then )
{
// TODO Don't loop through them all up front
LinkedList<Node> nodes = new LinkedList<Node>();
TraversalBranch branch = first;
while ( branch.length() > 0 )
{
nodes.addFirst( branch.endNode() );
branch = branch.parent();
}
if ( cachedStartNode == null && first == start )
cachedStartNode = branch.endNode();
nodes.addFirst( branch.endNode() );
branch = then.parent();
if ( branch != null )
{
while ( branch.length() > 0 )
{
nodes.add( branch.endNode() );
branch = branch.parent();
}
if ( branch.length() >= 0 )
{
nodes.add( branch.endNode() );
}
}
if ( cachedStartNode == null && then == start )
cachedStartNode = branch.endNode();
return nodes;
}
@Override
public int length()
{
return start.length() + end.length();
}
@Override
public Iterator<PropertyContainer> iterator()
{
// TODO Don't loop through them all up front
LinkedList<PropertyContainer> entities = new LinkedList<PropertyContainer>();
TraversalBranch branch = start;
while ( branch.length() > 0 )
{
entities.addFirst( branch.endNode() );
entities.addFirst( branch.lastRelationship() );
branch = branch.parent();
}
entities.addFirst( branch.endNode() );
if ( cachedStartNode == null )
cachedStartNode = branch.endNode();
if ( end.length() > 0 )
{
entities.add( end.lastRelationship() );
branch = end.parent();
while ( branch.length() > 0 )
{
entities.add( branch.endNode() );
entities.add( branch.lastRelationship() );
branch = branch.parent();
}
entities.add( branch.endNode() );
}
return entities.iterator();
}
@Override
public int hashCode()
{
return relationships().hashCode();
}
@Override
public boolean equals( Object obj )
{
if ( obj == this )
return true;
if ( !( obj instanceof Path ) )
return false;
Path other = (Path) obj;
return relationships().equals( other.relationships() ) && other.startNode().equals( cachedStartNode );
}
@Override
public String toString()
{
return Traversal.defaultPathToString( this );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_BidirectionalTraversalBranchPath.java
|
5,764
|
public class AddDeleteQuit
{
public static void main( String[] args )
{
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( args[0] );
Transaction tx = db.beginTx();
try
{
Index<Node> index = db.index().forNodes( "index" );
index.add( db.createNode(), "key", "value" );
index.delete();
tx.success();
}
finally
{
tx.finish();
}
System.exit( 0 );
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AddDeleteQuit.java
|
5,765
|
{
public Relationship create( Object... properties )
{
Relationship rel = graphDb.createNode().createRelationshipTo( graphDb.createNode(), TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
};
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTestIT.java
|
5,766
|
{
public Node create( Object... properties )
{
Node node = graphDb.createNode();
setProperties( node, properties );
return node;
}
public void delete( Node entity )
{
entity.delete();
}
};
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTestIT.java
|
5,767
|
public abstract class AbstractLuceneIndexTestIT
{
protected static GraphDatabaseService graphDb;
protected Transaction tx;
@BeforeClass
public static void setUpStuff()
{
graphDb = new GraphDatabaseFactory().newEmbeddedDatabase(TargetDirectory.forTest( AbstractLuceneIndexTest.class ).cleanDirectory( "dir" ).getAbsolutePath() );
}
@AfterClass
public static void tearDownStuff()
{
graphDb.shutdown();
}
@After
public void commitTx()
{
finishTx( true );
}
public void rollbackTx()
{
finishTx( false );
}
public void finishTx( boolean success )
{
if ( tx != null )
{
if ( success )
{
tx.success();
}
tx.finish();
tx = null;
}
}
@Before
public void beginTx()
{
if ( tx == null )
{
tx = graphDb.beginTx();
}
}
void restartTx()
{
commitTx();
beginTx();
}
protected static abstract interface EntityCreator<T extends PropertyContainer>
{
T create( Object... properties );
void delete( T entity );
}
private static final RelationshipType TEST_TYPE =
DynamicRelationshipType.withName( "TEST_TYPE" );
protected static final EntityCreator<Node> NODE_CREATOR = new EntityCreator<Node>()
{
public Node create( Object... properties )
{
Node node = graphDb.createNode();
setProperties( node, properties );
return node;
}
public void delete( Node entity )
{
entity.delete();
}
};
protected static final EntityCreator<Relationship> RELATIONSHIP_CREATOR =
new EntityCreator<Relationship>()
{
public Relationship create( Object... properties )
{
Relationship rel = graphDb.createNode().createRelationshipTo( graphDb.createNode(), TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
};
static class FastRelationshipCreator implements EntityCreator<Relationship>
{
private Node node, otherNode;
public Relationship create( Object... properties )
{
if ( node == null )
{
node = graphDb.createNode();
otherNode = graphDb.createNode();
}
Relationship rel = node.createRelationshipTo( otherNode, TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
}
private static void setProperties( PropertyContainer entity, Object... properties )
{
for ( Map.Entry<String, Object> entry : MapUtil.map( properties ).entrySet() )
{
entity.setProperty( entry.getKey(), entry.getValue() );
}
}
protected Index<Node> nodeIndex( String name, Map<String, String> config )
{
return graphDb.index().forNodes( name, config );
}
protected RelationshipIndex relationshipIndex( String name, Map<String, String> config )
{
return graphDb.index().forRelationships( name, config );
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTestIT.java
|
5,768
|
static class FastRelationshipCreator implements EntityCreator<Relationship>
{
private Node node, otherNode;
public Relationship create( Object... properties )
{
if ( node == null )
{
node = graphDb.createNode();
otherNode = graphDb.createNode();
}
Relationship rel = node.createRelationshipTo( otherNode, TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTest.java
|
5,769
|
{
public Relationship create( Object... properties )
{
Relationship rel = graphDb.createNode().createRelationshipTo( graphDb.createNode(), TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
};
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTest.java
|
5,770
|
{
public Node create( Object... properties )
{
Node node = graphDb.createNode();
setProperties( node, properties );
return node;
}
public void delete( Node entity )
{
entity.delete();
}
};
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTest.java
|
5,771
|
public abstract class AbstractLuceneIndexTest
{
protected static GraphDatabaseService graphDb;
protected Transaction tx;
public final @Rule TestName testname = new TestName();
@BeforeClass
public static void setUpStuff()
{
graphDb = new TestGraphDatabaseFactory().newImpermanentDatabase();
}
@AfterClass
public static void tearDownStuff()
{
graphDb.shutdown();
}
@After
public void commitTx()
{
finishTx( true );
}
public void rollbackTx()
{
finishTx( false );
}
public void finishTx( boolean success )
{
if ( tx != null )
{
if ( success )
{
tx.success();
}
tx.close();
tx = null;
}
}
@Before
public void beginTx()
{
if ( tx == null )
{
tx = graphDb.beginTx();
}
}
void restartTx()
{
commitTx();
beginTx();
}
protected static abstract interface EntityCreator<T extends PropertyContainer>
{
T create( Object... properties );
void delete( T entity );
}
private static final RelationshipType TEST_TYPE =
DynamicRelationshipType.withName( "TEST_TYPE" );
protected static final EntityCreator<Node> NODE_CREATOR = new EntityCreator<Node>()
{
public Node create( Object... properties )
{
Node node = graphDb.createNode();
setProperties( node, properties );
return node;
}
public void delete( Node entity )
{
entity.delete();
}
};
protected static final EntityCreator<Relationship> RELATIONSHIP_CREATOR =
new EntityCreator<Relationship>()
{
public Relationship create( Object... properties )
{
Relationship rel = graphDb.createNode().createRelationshipTo( graphDb.createNode(), TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
};
static class FastRelationshipCreator implements EntityCreator<Relationship>
{
private Node node, otherNode;
public Relationship create( Object... properties )
{
if ( node == null )
{
node = graphDb.createNode();
otherNode = graphDb.createNode();
}
Relationship rel = node.createRelationshipTo( otherNode, TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
}
private static void setProperties( PropertyContainer entity, Object... properties )
{
for ( Map.Entry<String, Object> entry : MapUtil.map( properties ).entrySet() )
{
entity.setProperty( entry.getKey(), entry.getValue() );
}
}
protected Index<Node> nodeIndex( Map<String, String> config )
{
return nodeIndex( currentIndexName(), config );
}
protected Index<Node> nodeIndex( String name, Map<String, String> config )
{
return graphDb.index().forNodes( name, config );
}
protected RelationshipIndex relationshipIndex( Map<String, String> config )
{
return relationshipIndex( currentIndexName(), config );
}
protected RelationshipIndex relationshipIndex( String name, Map<String, String> config )
{
return graphDb.index().forRelationships( name, config );
}
protected String currentIndexName()
{
return testname.getMethodName();
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTest.java
|
5,772
|
public abstract class AbstractIndexHits<T> extends PrefetchingIterator<T> implements IndexHits<T>
{
public IndexHits<T> iterator()
{
return this;
}
public void close()
{
}
public T getSingle()
{
try
{
return IteratorUtil.singleOrNull( (Iterator<T>) this );
}
finally
{
close();
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_AbstractIndexHits.java
|
5,773
|
public abstract class Neo4jTestCase
{
private static GraphDatabaseService graphDb;
private Transaction tx;
@BeforeClass
public static void setUpDb() throws Exception
{
graphDb = new TestGraphDatabaseFactory().newImpermanentDatabase();
}
@Before
public void setUpTest()
{
tx = graphDb.beginTx();
}
@After
public void tearDownTest()
{
if ( !manageMyOwnTxFinish() )
{
finishTx( true );
}
}
protected boolean manageMyOwnTxFinish()
{
return false;
}
protected void finishTx( boolean commit )
{
if ( tx == null )
{
return;
}
if ( commit )
{
tx.success();
}
tx.finish();
tx = null;
}
protected Transaction beginTx()
{
if ( tx == null )
{
tx = graphDb.beginTx();
}
return tx;
}
@AfterClass
public static void tearDownDb() throws Exception
{
graphDb.shutdown();
}
public static void deleteFileOrDirectory( File file )
{
if ( !file.exists() )
{
return;
}
if ( file.isDirectory() )
{
for ( File child : nonNull( file.listFiles() ) )
{
deleteFileOrDirectory( child );
}
}
assertTrue( "delete " + file, file.delete() );
}
protected static GraphDatabaseService graphDb()
{
return graphDb;
}
// public static <T> void assertContains( IndexHits<T> hits, T... expectedItems )
// {
// assertEquals( expectedItems.length, hits.size() );
// assertContains( (Iterable<T>) hits, expectedItems );
// }
public static <T> void assertContains( Collection<T> collection,
T... expectedItems )
{
String collectionString = join( ", ", collection.toArray() );
assertEquals( collectionString, expectedItems.length,
collection.size() );
for ( T item : expectedItems )
{
assertTrue( collection.contains( item ) );
}
}
public static <T> void assertContains( Iterable<T> items, T... expectedItems )
{
assertContains( asCollection( items ), expectedItems );
}
public static <T> void assertContainsInOrder( Collection<T> collection,
T... expectedItems )
{
String collectionString = join( ", ", collection.toArray() );
assertEquals( collectionString, expectedItems.length, collection.size() );
Iterator<T> itr = collection.iterator();
for ( int i = 0; itr.hasNext(); i++ )
{
assertEquals( expectedItems[i], itr.next() );
}
}
public static <T> void assertContainsInOrder( Iterable<T> collection,
T... expectedItems )
{
assertContainsInOrder( asCollection( collection ), expectedItems );
}
public static <T> Collection<T> asCollection( Iterable<T> iterable )
{
List<T> list = new ArrayList<T>();
for ( T item : iterable )
{
list.add( item );
}
return list;
}
public static <T> String join( String delimiter, T... items )
{
StringBuilder buffer = new StringBuilder();
for ( T item : items )
{
if ( buffer.length() > 0 )
{
buffer.append( delimiter );
}
buffer.append( item.toString() );
}
return buffer.toString();
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_Neo4jTestCase.java
|
5,774
|
{
public Node call() throws Exception
{
try ( Transaction tx = graphDb.beginTx() )
{
final Node node = graphDb.createNode();
// Acquire lock
tx.acquireWriteLock( commonNode );
Index<Node> index = graphDb.index().forNodes( "uuids" );
final Node existing = index.get( "uuid", uuid ).getSingle();
if ( existing != null )
{
throw new RuntimeException( "Node already exists" );
}
node.setProperty( "uuid", uuid );
index.add( node, "uuid", uuid );
tx.success();
return node;
}
}
} );
| false
|
community_lucene-index_src_test_java_org_neo4j_index_IndexConstraintsTest.java
|
5,775
|
public class IndexConstraintsTest
{
private static final Label LABEL = DynamicLabel.label( "Label" );
private static final String PROPERTY_KEY = "x";
private GraphDatabaseService graphDb;
@Before
public void setup() throws IOException
{
this.graphDb = new TestGraphDatabaseFactory().newImpermanentDatabase();
}
@After
public void shutdown() throws IOException
{
this.graphDb.shutdown();
}
@Test
public void testMultipleCreate() throws InterruptedException
{
final int numThreads = 25;
final String uuid = UUID.randomUUID().toString();
final Node commonNode;
try(Transaction tx = graphDb.beginTx())
{
commonNode = graphDb.createNode();
tx.success();
}
ExecutorCompletionService<Node> ecs = new ExecutorCompletionService<>(
Executors.newFixedThreadPool( numThreads ) );
for ( int i = 0; i < numThreads; i++ )
{
ecs.submit( new Callable<Node>()
{
public Node call() throws Exception
{
try ( Transaction tx = graphDb.beginTx() )
{
final Node node = graphDb.createNode();
// Acquire lock
tx.acquireWriteLock( commonNode );
Index<Node> index = graphDb.index().forNodes( "uuids" );
final Node existing = index.get( "uuid", uuid ).getSingle();
if ( existing != null )
{
throw new RuntimeException( "Node already exists" );
}
node.setProperty( "uuid", uuid );
index.add( node, "uuid", uuid );
tx.success();
return node;
}
}
} );
}
int numSucceeded = 0;
for ( int i = 0; i < numThreads; i++ )
{
try
{
ecs.take().get();
++numSucceeded;
}
catch ( ExecutionException e )
{
}
}
assertEquals( 1, numSucceeded );
}
// The following tests verify that multiple interacting schema commands can be applied in the same transaction.
@Test
public void convertIndexToConstraint()
{
try( Transaction tx = graphDb.beginTx() )
{
graphDb.schema().indexFor( LABEL ).on( PROPERTY_KEY ).create();
tx.success();
}
try( Transaction tx = graphDb.beginTx() )
{
IndexDefinition index = first( graphDb.schema().getIndexes( LABEL ) );
index.drop();
graphDb.schema().constraintFor( LABEL ).assertPropertyIsUnique( PROPERTY_KEY ).create();
tx.success();
}
// assert no exception is thrown
}
@Test
public void convertIndexToConstraintWithExistingData()
{
try( Transaction tx = graphDb.beginTx() )
{
for ( int i = 0; i < 2000; i++)
{
Node node = graphDb.createNode( LABEL );
node.setProperty( PROPERTY_KEY, i );
}
tx.success();
}
try( Transaction tx = graphDb.beginTx() )
{
graphDb.schema().indexFor( LABEL ).on( PROPERTY_KEY ).create();
tx.success();
}
try( Transaction tx = graphDb.beginTx() )
{
IndexDefinition index = first( graphDb.schema().getIndexes( LABEL ) );
index.drop();
graphDb.schema().constraintFor( LABEL ).assertPropertyIsUnique( PROPERTY_KEY ).create();
tx.success();
}
// assert no exception is thrown
}
@Test
public void convertConstraintToIndex()
{
try( Transaction tx = graphDb.beginTx() )
{
graphDb.schema().constraintFor( LABEL ).assertPropertyIsUnique( PROPERTY_KEY ).create();
tx.success();
}
try( Transaction tx = graphDb.beginTx() )
{
ConstraintDefinition constraint = first( graphDb.schema().getConstraints( LABEL ) );
constraint.drop();
graphDb.schema().indexFor( LABEL ).on( PROPERTY_KEY ).create();
tx.success();
}
// assert no exception is thrown
}
@Test
public void creatingAndDroppingAndCreatingIndexInSameTransaction()
{
// go increasingly meaner
for ( int times = 1; times <= 4; times++ )
{
try
{
// when: CREATE, DROP, CREATE => effect: CREATE
try ( Transaction tx = graphDb.beginTx() )
{
recreate( graphDb.schema().indexFor( LABEL ).on( PROPERTY_KEY ).create(), times );
tx.success();
}
// then
assertNotNull( "Index should exist", getIndex( LABEL, PROPERTY_KEY ) );
// when: DROP, CREATE => effect: <none>
try ( Transaction tx = graphDb.beginTx() )
{
recreate( getIndex( LABEL, PROPERTY_KEY ), times );
tx.success();
}
// then
assertNotNull( "Index should exist", getIndex( LABEL, PROPERTY_KEY ) );
// when: DROP, CREATE, DROP => effect: DROP
try ( Transaction tx = graphDb.beginTx() )
{
recreate( getIndex( LABEL, PROPERTY_KEY ), times )
.drop();
tx.success();
}
// then
assertNull( "Index should be removed", getIndex( LABEL, PROPERTY_KEY ) );
}
catch ( Throwable e )
{
throw new AssertionError( "times=" + times, e );
}
}
}
private IndexDefinition recreate( IndexDefinition index, int times )
{
for ( int i = 0; i < times; i++ )
{
index.drop();
index = graphDb.schema()
.indexFor( index.getLabel() )
.on( single( index.getPropertyKeys() ) )
.create();
}
return index;
}
private IndexDefinition getIndex( Label label, String propertyKey )
{
try ( Transaction tx = graphDb.beginTx() )
{
IndexDefinition found = null;
for ( IndexDefinition index : graphDb.schema().getIndexes( label ) )
{
if ( propertyKey.equals( single( index.getPropertyKeys() ) ) )
{
assertNull( "Found multiple indexes.", found );
found = index;
}
}
tx.success();
return found;
}
}
@Test
public void shouldRemoveIndexForConstraintEvenIfDroppedInCreatingTransaction()
{
try ( Transaction tx = graphDb.beginTx() )
{
// given
graphDb.schema()
.constraintFor( LABEL ).assertPropertyIsUnique( PROPERTY_KEY )
.create()
.drop();
// when - rolling back
tx.failure();
}
// then
assertNull( "Should not have constraint index", getIndex( LABEL, PROPERTY_KEY ) );
}
@Test
public void creatingAndDroppingAndCreatingConstraintInSameTransaction()
{
// go increasingly meaner
for ( int times = 1; times <= 4; times++ )
{
try
{
// when: CREATE, DROP, CREATE => effect: CREATE
try ( Transaction tx = graphDb.beginTx() )
{
recreate( graphDb.schema().constraintFor( LABEL ).assertPropertyIsUnique( PROPERTY_KEY ).create(), times );
tx.success();
}
// then
assertNotNull( "Constraint should exist", getConstraint( LABEL, PROPERTY_KEY ) );
assertNotNull( "Should have constraint index", getIndex( LABEL, PROPERTY_KEY ) );
// when: DROP, CREATE => effect: <none>
try ( Transaction tx = graphDb.beginTx() )
{
recreate( getConstraint( LABEL, PROPERTY_KEY ), times );
tx.success();
}
// then
assertNotNull( "Constraint should exist", getConstraint( LABEL, PROPERTY_KEY ) );
assertNotNull( "Should have constraint index", getIndex( LABEL, PROPERTY_KEY ) );
// when: DROP, CREATE, DROP => effect: DROP
try ( Transaction tx = graphDb.beginTx() )
{
recreate( getConstraint( LABEL, PROPERTY_KEY ), times )
.drop();
tx.success();
}
// then
assertNull( "Constraint should be removed", getConstraint( LABEL, PROPERTY_KEY ) );
assertNull( "Should not have constraint index", getIndex( LABEL, PROPERTY_KEY ) );
}
catch ( Throwable e )
{
throw new AssertionError( "times=" + times, e );
}
}
}
private ConstraintDefinition recreate( ConstraintDefinition constraint, int times )
{
for ( int i = 0; i < times; i++ )
{
constraint.drop();
constraint = graphDb.schema()
.constraintFor( constraint.getLabel() )
.assertPropertyIsUnique( single( constraint.getPropertyKeys() ) )
.create();
}
return constraint;
}
private ConstraintDefinition getConstraint( Label label, String propertyKey )
{
try ( Transaction tx = graphDb.beginTx() )
{
ConstraintDefinition found = null;
for ( ConstraintDefinition constraint : graphDb.schema().getConstraints( label ) )
{
if ( propertyKey.equals( single( constraint.getPropertyKeys() ) ) )
{
assertNull( "Found multiple constraints.", found );
found = constraint;
}
}
tx.success();
return found;
}
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_IndexConstraintsTest.java
|
5,776
|
{
@Override
public void evaluate() throws Throwable
{
base.evaluate();
for ( Map.Entry<ProgressMonitorFactory,Boolean> factoryMock : factoryMocks.entrySet() )
{
if ( factoryMock.getValue() )
{
verify( factoryMock.getKey(), times( 1 ) ).newOpenEndedIndicator( any( String.class ),
anyInt() );
}
else
{
verify( factoryMock.getKey(), times( 1 ) ).newIndicator( any( String.class ) );
}
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_helpers_progress_ProgressMonitorTest.java
|
5,777
|
{
@Override
public Indicator answer( InvocationOnMock invocation ) throws Throwable
{
when( indicatorMock.reportResolution() ).thenReturn( (Integer) invocation.getArguments()[1] );
return indicatorMock;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_helpers_progress_ProgressMonitorTest.java
|
5,778
|
private static class SingleIndicator implements TestRule
{
ProgressMonitorFactory mock( Indicator indicatorMock, int indicatorSteps )
{
when( indicatorMock.reportResolution() ).thenReturn( indicatorSteps );
ProgressMonitorFactory factory = Mockito.mock( ProgressMonitorFactory.class );
when( factory.newIndicator( any( String.class ) ) ).thenReturn( indicatorMock );
factoryMocks.put( factory, false );
return factory;
}
ProgressMonitorFactory mock( final Indicator.OpenEnded indicatorMock )
{
ProgressMonitorFactory factory = Mockito.mock( ProgressMonitorFactory.class );
when( factory.newOpenEndedIndicator( any( String.class ), anyInt() ) ).thenAnswer( new Answer<Indicator>()
{
@Override
public Indicator answer( InvocationOnMock invocation ) throws Throwable
{
when( indicatorMock.reportResolution() ).thenReturn( (Integer) invocation.getArguments()[1] );
return indicatorMock;
}
} );
factoryMocks.put( factory, true );
return factory;
}
private final Map<ProgressMonitorFactory,Boolean> factoryMocks = new HashMap<ProgressMonitorFactory,Boolean>();
@Override
public Statement apply( final Statement base, Description description )
{
return new Statement()
{
@Override
public void evaluate() throws Throwable
{
base.evaluate();
for ( Map.Entry<ProgressMonitorFactory,Boolean> factoryMock : factoryMocks.entrySet() )
{
if ( factoryMock.getValue() )
{
verify( factoryMock.getKey(), times( 1 ) ).newOpenEndedIndicator( any( String.class ),
anyInt() );
}
else
{
verify( factoryMock.getKey(), times( 1 ) ).newIndicator( any( String.class ) );
}
}
}
};
}
}
| false
|
community_kernel_src_test_java_org_neo4j_helpers_progress_ProgressMonitorTest.java
|
5,779
|
{
@Override
public void run()
{
begin.countDown();
try
{
completion.await( 1, SECONDS );
}
catch ( Exception e )
{
return; // do not count down the end latch
}
end.countDown();
}
}.start();
| false
|
community_kernel_src_test_java_org_neo4j_helpers_progress_ProgressMonitorTest.java
|
5,780
|
{
};
| false
|
community_kernel_src_test_java_org_neo4j_helpers_progress_ProgressMonitorTest.java
|
5,781
|
public class ProgressMonitorTest
{
public static final String LINE_SEPARATOR = System.getProperty( "line.separator" );
@Test
public void shouldReportProgressInTheSpecifiedIntervals() throws Exception
{
// given
Indicator indicator = indicatorMock();
ProgressListener progressListener = factory.mock( indicator, 10 ).singlePart( testName.getMethodName(), 16 );
// when
progressListener.started();
for ( int i = 0; i < 16; i++ )
{
progressListener.add( 1 );
}
progressListener.done();
// then
InOrder order = inOrder( indicator );
order.verify( indicator ).startProcess( 16 );
for ( int i = 0; i < 10; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completeProcess();
order.verifyNoMoreInteractions();
}
@Test
public void shouldAggregateProgressFromMultipleProcesses() throws Exception
{
// given
Indicator indicator = indicatorMock();
ProgressMonitorFactory.MultiPartBuilder builder = factory.mock( indicator, 10 ).multipleParts( testName.getMethodName() );
ProgressListener first = builder.progressForPart( "first", 5 );
ProgressListener other = builder.progressForPart( "other", 5 );
builder.build();
InOrder order = inOrder( indicator );
order.verify( indicator ).startProcess( 10 );
order.verifyNoMoreInteractions();
// when
first.started();
for ( int i = 0; i < 5; i++ )
{
first.add( 1 );
}
first.done();
// then
order.verify( indicator ).startPart( "first", 5 );
for ( int i = 0; i < 5; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completePart( "first" );
order.verifyNoMoreInteractions();
// when
other.started();
for ( int i = 0; i < 5; i++ )
{
other.add( 1 );
}
other.done();
// then
order.verify( indicator ).startPart( "other", 5 );
for ( int i = 5; i < 10; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completePart( "other" );
order.verify( indicator ).completeProcess();
order.verifyNoMoreInteractions();
}
@Test
public void shouldNotAllowAddingPartsAfterCompletingMultiPartBuilder() throws Exception
{
// given
ProgressMonitorFactory.MultiPartBuilder builder = factory.mock( indicatorMock(), 10 )
.multipleParts( testName.getMethodName() );
builder.progressForPart( "first", 10 );
builder.build();
// when
try
{
builder.progressForPart( "other", 10 );
fail( "should have thrown exception" );
}
// then
catch ( IllegalStateException expected )
{
assertEquals( "Builder has been completed.", expected.getMessage() );
}
}
@Test
public void shouldNotAllowAddingMultiplePartsWithSameIdentifier() throws Exception
{
// given
ProgressMonitorFactory.MultiPartBuilder builder = Mockito.mock( ProgressMonitorFactory.class )
.multipleParts( testName.getMethodName() );
builder.progressForPart( "first", 10 );
// when
try
{
builder.progressForPart( "first", 10 );
fail( "should have thrown exception" );
}
// then
catch ( IllegalArgumentException expected )
{
assertEquals( "Part 'first' has already been defined.", expected.getMessage() );
}
}
@Test
public void shouldStartProcessAutomaticallyIfNotDoneBefore() throws Exception
{
// given
Indicator indicator = indicatorMock();
ProgressListener progressListener = factory.mock( indicator, 10 ).singlePart( testName.getMethodName(), 16 );
// when
for ( int i = 0; i < 16; i++ )
{
progressListener.add( 1 );
}
progressListener.done();
// then
InOrder order = inOrder( indicator );
order.verify( indicator, times( 1 ) ).startProcess( 16 );
for ( int i = 0; i < 10; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completeProcess();
order.verifyNoMoreInteractions();
}
@Test
public void shouldStartMultiPartProcessAutomaticallyIfNotDoneBefore() throws Exception
{
// given
Indicator indicator = indicatorMock();
ProgressMonitorFactory.MultiPartBuilder builder = factory.mock( indicator, 10 ).multipleParts( testName.getMethodName() );
ProgressListener first = builder.progressForPart( "first", 5 );
ProgressListener other = builder.progressForPart( "other", 5 );
builder.build();
InOrder order = inOrder( indicator );
order.verify( indicator ).startProcess( 10 );
order.verifyNoMoreInteractions();
// when
for ( int i = 0; i < 5; i++ )
{
first.add( 1 );
}
first.done();
// then
order.verify( indicator ).startPart( "first", 5 );
for ( int i = 0; i < 5; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completePart( "first" );
order.verifyNoMoreInteractions();
// when
for ( int i = 0; i < 5; i++ )
{
other.add( 1 );
}
other.done();
// then
order.verify( indicator ).startPart( "other", 5 );
for ( int i = 5; i < 10; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completePart( "other" );
order.verify( indicator ).completeProcess();
order.verifyNoMoreInteractions();
}
@Test
public void shouldCompleteMultiPartProgressWithNoPartsImmediately() throws Exception
{
// given
Indicator indicator = indicatorMock();
ProgressMonitorFactory.MultiPartBuilder builder = factory.mock( indicator, 10 ).multipleParts( testName.getMethodName() );
// when
builder.build();
// then
InOrder order = inOrder( indicator );
order.verify( indicator ).startProcess( 0 );
order.verify( indicator ).progress( 0, 10 );
order.verify( indicator ).completeProcess();
order.verifyNoMoreInteractions();
}
private static Indicator indicatorMock()
{
Indicator indicator = mock( Indicator.class, Mockito.CALLS_REAL_METHODS );
doNothing().when( indicator ).progress( anyInt(), anyInt() );
return indicator;
}
private static final String EXPECTED_TEXTUAL_OUTPUT;
static
{
StringWriter expectedTextualOutput = new StringWriter();
for ( int i = 0; i < 10; )
{
for ( int j = 0; j < 20; j++ )
{
expectedTextualOutput.write( '.' );
}
expectedTextualOutput.write( String.format( " %3d%%%n", (++i) * 10 ) );
}
EXPECTED_TEXTUAL_OUTPUT = expectedTextualOutput.toString();
}
@Test
public void shouldPrintADotEveryHalfPercentAndFullPercentageEveryTenPercentWithTextualIndicator() throws Exception
{
// given
ByteArrayOutputStream stream = new ByteArrayOutputStream();
ProgressListener progressListener = ProgressMonitorFactory.textual( stream ).singlePart( testName.getMethodName(), 1000 );
// when
for ( int i = 0; i < 1000; i++ )
{
progressListener.add( 1 );
}
// then
assertEquals( testName.getMethodName() + LINE_SEPARATOR + EXPECTED_TEXTUAL_OUTPUT,
stream.toString( Charset.defaultCharset().name() ) );
}
@Test
public void shouldPrintADotEveryHalfPercentAndFullPercentageEveryTenPercentEvenWhenStepResolutionIsLower()
throws Exception
{
// given
StringWriter writer = new StringWriter();
ProgressListener progressListener = ProgressMonitorFactory.textual( writer ).singlePart( testName.getMethodName(), 50 );
// when
for ( int i = 0; i < 50; i++ )
{
progressListener.add( 1 );
}
// then
assertEquals( testName.getMethodName() + LINE_SEPARATOR + EXPECTED_TEXTUAL_OUTPUT,
writer.toString() );
}
@Test
public void shouldPassThroughAllInvocationsOnDecorator() throws Exception
{
// given
Indicator decorated = mock( Indicator.class );
Indicator decorator = new Indicator.Decorator( decorated )
{
};
// when
decorator.startProcess( 4 );
// then
verify( decorated ).startProcess( 4 );
// when
decorator.startPart( "part1", 2 );
// then
verify( decorated ).startPart( "part1", 2 );
// when
decorator.progress( 0, 1 );
// then
verify( decorated ).progress( 0, 1 );
// when
decorator.startPart( "part2", 2 );
// then
verify( decorated ).startPart( "part2", 2 );
// when
decorator.progress( 1, 2 );
// then
verify( decorated ).progress( 1, 2 );
// when
decorator.completePart( "part1" );
// then
verify( decorated ).completePart( "part1" );
// when
decorator.progress( 2, 3 );
// then
verify( decorated ).progress( 2, 3 );
// when
decorator.completePart( "part2" );
// then
verify( decorated ).completePart( "part2" );
// when
decorator.progress( 3, 4 );
// then
verify( decorated ).progress( 3, 4 );
// when
decorator.completeProcess();
// then
verify( decorated ).completeProcess();
}
@Test
public void shouldBeAbleToAwaitCompletionOfMultiPartProgress() throws Exception
{
// given
ProgressMonitorFactory.MultiPartBuilder builder = ProgressMonitorFactory.NONE.multipleParts( testName.getMethodName() );
ProgressListener part1 = builder.progressForPart( "part1", 1 );
ProgressListener part2 = builder.progressForPart( "part2", 1 );
final Completion completion = builder.build();
// when
final CountDownLatch begin = new CountDownLatch( 1 ), end = new CountDownLatch( 1 );
new Thread()
{
@Override
public void run()
{
begin.countDown();
try
{
completion.await( 1, SECONDS );
}
catch ( Exception e )
{
return; // do not count down the end latch
}
end.countDown();
}
}.start();
Runnable callback = mock( Runnable.class );
completion.notify( callback );
assertTrue( begin.await( 1, SECONDS ) );
// then
verifyZeroInteractions( callback );
// when
try
{
completion.await( 1, TimeUnit.MILLISECONDS );
fail( "should have thrown exception" );
}
// then
catch ( TimeoutException expected )
{
assertEquals( "Process did not complete within 1 MILLISECONDS.", expected.getMessage() );
}
// when
part1.done();
// then
verifyZeroInteractions( callback );
// when
part2.done();
// then
verify( callback ).run();
completion.await( 0, TimeUnit.NANOSECONDS ); // should not have to wait
assertTrue( end.await( 1, SECONDS ) ); // should have been completed
// when
callback = mock( Runnable.class );
completion.notify( callback );
verify( callback ).run();
}
@Test
public void shouldReturnToCompletionWaiterWhenFirstJobFails() throws Exception
{
// given
ProgressMonitorFactory.MultiPartBuilder builder = ProgressMonitorFactory.NONE.multipleParts( testName.getMethodName() );
ProgressListener part1 = builder.progressForPart( "part1", 1 );
ProgressListener part2 = builder.progressForPart( "part2", 1 );
final Completion completion = builder.build();
// when
part1.started();
part2.started();
part2.failed( new RuntimeException( "failure in one of the jobs" ) );
// neither job completes
try
{
completion.await( 1, TimeUnit.MILLISECONDS );
fail( "should have thrown exception" );
}
// then
catch ( ProcessFailureException expected )
{
assertEquals( "failure in one of the jobs", expected.getCause().getMessage() );
}
}
@Test
public void shouldNotAllowNullCompletionCallbacks() throws Exception
{
ProgressMonitorFactory.MultiPartBuilder builder = ProgressMonitorFactory.NONE.multipleParts( testName.getMethodName() );
Completion completion = builder.build();
// when
try
{
completion.notify( null );
fail( "should have thrown exception" );
}
// then
catch ( IllegalArgumentException expected )
{
assertEquals( "callback may not be null", expected.getMessage() );
}
}
@Test
public void shouldInvokeAllCallbacksEvenWhenOneThrowsException() throws Exception
{
// given
ProgressMonitorFactory.MultiPartBuilder builder = ProgressMonitorFactory.NONE.multipleParts( testName.getMethodName() );
ProgressListener progressListener = builder.progressForPart( "only part", 1 );
Completion completion = builder.build();
Runnable callback = mock( Runnable.class );
doThrow( new RuntimeException()).doNothing().when( callback ).run();
completion.notify( callback );
completion.notify( callback );
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream sysErr = System.out;
try
{
System.setErr(new PrintStream(out));
// when
progressListener.done();
}
finally
{
System.setOut( sysErr );
}
// then
verify( callback, times( 2 ) ).run();
String printedOutput = out.toString( Charset.defaultCharset().name() );
assertTrue( printedOutput, printedOutput.startsWith( RuntimeException.class.getName() ) );
assertTrue( printedOutput, printedOutput
.contains( "\n\tat " + getClass().getName() + "." + testName.getMethodName() ) );
}
@Test
public void shouldAllowStartingAPartBeforeCompletionOfMultiPartBuilder() throws Exception
{
// given
Indicator indicator = mock( Indicator.class );
ProgressMonitorFactory.MultiPartBuilder builder = factory.mock( indicator, 10 ).multipleParts( testName.getMethodName() );
ProgressListener part1 = builder.progressForPart( "part1", 1 );
ProgressListener part2 = builder.progressForPart( "part2", 1 );
// when
part1.add( 1 );
builder.build();
part2.add( 1 );
part1.done();
part2.done();
// then
InOrder order = inOrder( indicator );
order.verify( indicator ).startPart( "part1", 1 );
order.verify( indicator ).startProcess( 2 );
order.verify( indicator ).startPart( "part2", 1 );
order.verify( indicator ).completePart( "part1" );
order.verify( indicator ).completePart( "part2" );
order.verify( indicator ).completeProcess();
}
@Test
public void shouldAllowOpenEndedProgressListeners() throws Exception
{
// given
Indicator.OpenEnded indicator = mock( Indicator.OpenEnded.class );
ProgressListener progress = factory.mock( indicator ).openEnded( testName.getMethodName(), 10 );
// when
for ( int i = 0; i < 20; i++ )
{
progress.add( 5 );
}
progress.done();
// then
verify( indicator, atLeast( 1 ) ).reportResolution();
InOrder order = inOrder( indicator );
order.verify( indicator ).startProcess( 0 );
for ( int i = 0; i < 10; i++ )
{
order.verify( indicator ).progress( i, i + 1 );
}
order.verify( indicator ).completeProcess();
verifyNoMoreInteractions( indicator );
}
@Test
public void shouldReportOpenEndedProgressInANiceWay() throws Exception
{
// given
StringWriter buffer = new StringWriter();
ProgressListener progress = ProgressMonitorFactory.textual( buffer ).openEnded( testName.getMethodName(), 10 );
// when
for ( int i = 0; i < 25; i++ )
{
progress.add( 50 );
}
progress.done();
// then
assertEquals( String.format(
testName.getMethodName() + "%n" +
".................... 200%n" +
".................... 400%n" +
".................... 600%n" +
".................... 800%n" +
".................... 1000%n" +
".................... 1200%n" +
"..... done%n" ), buffer.toString() );
}
@Rule
public final TestName testName = new TestName();
@Rule
public final SingleIndicator factory = new SingleIndicator();
private static class SingleIndicator implements TestRule
{
ProgressMonitorFactory mock( Indicator indicatorMock, int indicatorSteps )
{
when( indicatorMock.reportResolution() ).thenReturn( indicatorSteps );
ProgressMonitorFactory factory = Mockito.mock( ProgressMonitorFactory.class );
when( factory.newIndicator( any( String.class ) ) ).thenReturn( indicatorMock );
factoryMocks.put( factory, false );
return factory;
}
ProgressMonitorFactory mock( final Indicator.OpenEnded indicatorMock )
{
ProgressMonitorFactory factory = Mockito.mock( ProgressMonitorFactory.class );
when( factory.newOpenEndedIndicator( any( String.class ), anyInt() ) ).thenAnswer( new Answer<Indicator>()
{
@Override
public Indicator answer( InvocationOnMock invocation ) throws Throwable
{
when( indicatorMock.reportResolution() ).thenReturn( (Integer) invocation.getArguments()[1] );
return indicatorMock;
}
} );
factoryMocks.put( factory, true );
return factory;
}
private final Map<ProgressMonitorFactory,Boolean> factoryMocks = new HashMap<ProgressMonitorFactory,Boolean>();
@Override
public Statement apply( final Statement base, Description description )
{
return new Statement()
{
@Override
public void evaluate() throws Throwable
{
base.evaluate();
for ( Map.Entry<ProgressMonitorFactory,Boolean> factoryMock : factoryMocks.entrySet() )
{
if ( factoryMock.getValue() )
{
verify( factoryMock.getKey(), times( 1 ) ).newOpenEndedIndicator( any( String.class ),
anyInt() );
}
else
{
verify( factoryMock.getKey(), times( 1 ) ).newIndicator( any( String.class ) );
}
}
}
};
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_helpers_progress_ProgressMonitorTest.java
|
5,782
|
public static class MultiPartBuilder
{
private Aggregator aggregator;
private Set<String> parts = new HashSet<String>();
private Completion completion = null;
private MultiPartBuilder( ProgressMonitorFactory factory, String process )
{
this.aggregator = new Aggregator(factory.newIndicator( process ));
}
public ProgressListener progressForPart( String part, long totalCount )
{
if ( aggregator == null )
{
throw new IllegalStateException( "Builder has been completed." );
}
if ( !parts.add( part ) )
{
throw new IllegalArgumentException( String.format( "Part '%s' has already been defined.", part ) );
}
ProgressListener.MultiPartProgressListener progress = new ProgressListener.MultiPartProgressListener( aggregator, part, totalCount );
aggregator.add( progress );
return progress;
}
public Completion build()
{
if ( aggregator != null )
{
completion = aggregator.initialize();
}
aggregator = null;
parts = null;
return completion;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_helpers_progress_ProgressMonitorFactory.java
|
5,783
|
{
@Override
protected Indicator newIndicator( String process )
{
return new Indicator.Textual( process, writer() );
}
@Override
protected Indicator.OpenEnded newOpenEndedIndicator( String process, int resolution )
{
return new Indicator.OpenEndedTextual( process, writer(), resolution );
}
private PrintWriter writer()
{
return out instanceof PrintWriter ? (PrintWriter) out : new PrintWriter( out );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_helpers_progress_ProgressMonitorFactory.java
|
5,784
|
static class FastRelationshipCreator implements EntityCreator<Relationship>
{
private Node node, otherNode;
public Relationship create( Object... properties )
{
if ( node == null )
{
node = graphDb.createNode();
otherNode = graphDb.createNode();
}
Relationship rel = node.createRelationshipTo( otherNode, TEST_TYPE );
setProperties( rel, properties );
return rel;
}
public void delete( Relationship entity )
{
entity.delete();
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AbstractLuceneIndexTestIT.java
|
5,785
|
public class AddRelToIndex
{
public static void main( String[] args )
{
String path = args[0];
String indexName = "myIndex";
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( path );
Transaction tx = db.beginTx();
Index<Relationship> index = db.index().forRelationships( indexName );
Node node = db.createNode();
Relationship relationship = db.createNode().createRelationshipTo( node, DynamicRelationshipType.withName( "KNOWS" ) );
index.add( relationship, "key", "value" );
tx.success();
tx.finish();
// Skip shutdown
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AddRelToIndex.java
|
5,786
|
class FullTxData extends TxData
{
/*
* The concept of orphan exists to find entities when querying where the transaction state
* (i.e. a FullTxData object) has seen removed entities w/o key and potentially also w/o value.
* A TxData instance receiving "add" calls with null key/value is an instance used to track removals.
* A Lucene document storing state about e.g. {@code index.remove( myNode, "name" )}
* <pre>
* {
* __all__: "name"
* }
* </pre>
*
* A Lucene document storing state about e.g. {@code index.remove( myNode )}
* <pre>
* {
* __all__: "1"
* }
* where queries would (if there are any orphans at all stored) include the "all orphans" value ("1") as
* well as any specific key which is pulled out from the incoming query.
*/
private static final String ORPHANS_KEY = "__all__";
private static final String ORPHANS_VALUE = "1";
private Directory directory;
private IndexWriter writer;
private boolean modified;
private IndexReader reader;
private IndexSearcher searcher;
private final Map<Long, Document> cachedDocuments = new HashMap<Long, Document>();
private Set<String> orphans;
FullTxData( LuceneIndex index )
{
super( index );
}
@Override
void add( TxDataHolder holder, Object entityId, String key, Object value )
{
try
{
ensureLuceneDataInstantiated();
long id = entityId instanceof Long ? (Long) entityId : ((RelationshipId)entityId).id;
Document document = findDocument( id );
boolean add = false;
if ( document == null )
{
document = index.getIdentifier().entityType.newDocument( entityId );
cachedDocuments.put( id, document );
add = true;
}
if ( key == null && value == null )
{
// Set a special "always hit" flag
document.add( new Field( ORPHANS_KEY, ORPHANS_VALUE, Store.NO, Index.NOT_ANALYZED ) );
addOrphan( null );
}
else if ( value == null )
{
// Set a special "always hit" flag
document.add( new Field( ORPHANS_KEY, key, Store.NO, Index.NOT_ANALYZED ) );
addOrphan( key );
}
else
{
index.type.addToDocument( document, key, value );
}
if ( add )
{
writer.addDocument( document );
}
else
{
writer.updateDocument( index.type.idTerm( id ), document );
}
invalidateSearcher();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private void addOrphan( String key )
{
if ( orphans == null )
{
orphans = new HashSet<String>();
}
orphans.add( key );
}
private Document findDocument( long id )
{
return cachedDocuments.get( id );
}
private void ensureLuceneDataInstantiated()
{
if ( this.directory == null )
{
try
{
this.directory = new RAMDirectory();
IndexWriterConfig writerConfig = new IndexWriterConfig( LUCENE_VERSION, index.type.analyzer );
this.writer = new IndexWriter( directory, writerConfig );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
@Override
void remove( TxDataHolder holder, Object entityId, String key, Object value )
{
try
{
ensureLuceneDataInstantiated();
long id = entityId instanceof Long ? (Long) entityId : ((RelationshipId)entityId).id;
Document document = findDocument( id );
if ( document != null )
{
index.type.removeFromDocument( document, key, value );
if ( LuceneDataSource.documentIsEmpty( document ) )
{
writer.deleteDocuments( index.type.idTerm( id ) );
}
else
{
writer.updateDocument( index.type.idTerm( id ), document );
}
}
invalidateSearcher();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
@Override
Collection<Long> query( TxDataHolder holder, Query query, QueryContext contextOrNull )
{
return internalQuery( query, contextOrNull );
}
private Collection<Long> internalQuery( Query query, QueryContext contextOrNull )
{
if ( this.directory == null )
{
return Collections.<Long>emptySet();
}
try
{
Sort sorting = contextOrNull != null ? contextOrNull.getSorting() : null;
boolean prioritizeCorrectness = contextOrNull == null || !contextOrNull.getTradeCorrectnessForSpeed();
IndexSearcher theSearcher = searcher( prioritizeCorrectness );
query = includeOrphans( query );
Hits hits = new Hits( theSearcher, query, null, sorting, prioritizeCorrectness );
Collection<Long> result = new ArrayList<Long>();
for ( int i = 0; i < hits.length(); i++ )
{
result.add( Long.valueOf( hits.doc( i ).get( KEY_DOC_ID ) ) );
}
return result;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
private Query includeOrphans( Query query )
{
if ( orphans == null )
{
return query;
}
BooleanQuery result = new BooleanQuery();
result.add( injectOrphans( query ), Occur.SHOULD );
result.add( new TermQuery( new Term( ORPHANS_KEY, ORPHANS_VALUE ) ), Occur.SHOULD );
return result;
}
private Query injectOrphans( Query query )
{
if ( query instanceof BooleanQuery )
{
BooleanQuery source = (BooleanQuery) query;
BooleanQuery result = new BooleanQuery();
for ( BooleanClause clause : source.clauses() )
{
result.add( injectOrphans( clause.getQuery() ), clause.getOccur() );
}
return result;
}
String orphanField = extractTermField( query );
if ( orphanField == null )
{
return query;
}
BooleanQuery result = new BooleanQuery();
result.add( query, Occur.SHOULD );
result.add( new TermQuery( new Term( ORPHANS_KEY, orphanField ) ), Occur.SHOULD );
return result;
}
private String extractTermField( Query query )
{
// Try common types of queries
if ( query instanceof TermQuery )
{
return ((TermQuery)query).getTerm().field();
}
else if ( query instanceof WildcardQuery )
{
return ((WildcardQuery)query).getTerm().field();
}
else if ( query instanceof PrefixQuery )
{
return ((PrefixQuery)query).getPrefix().field();
}
else if ( query instanceof MatchAllDocsQuery )
{
return null;
}
// Try to extract terms and get it that way
String field = getFieldFromExtractTerms( query );
if ( field != null )
{
return field;
}
// Last resort: since Query doesn't have a common interface for getting
// the term/field of its query this is one option.
return getFieldViaReflection( query );
}
private String getFieldViaReflection( Query query )
{
try
{
try
{
Term term = (Term) query.getClass().getMethod( "getTerm" ).invoke( query );
return term.field();
}
catch ( NoSuchMethodException e )
{
return (String) query.getClass().getMethod( "getField" ).invoke( query );
}
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
private String getFieldFromExtractTerms( Query query )
{
Set<Term> terms = new HashSet<Term>();
try
{
query.extractTerms( terms );
}
catch ( UnsupportedOperationException e )
{
// In case of wildcard/range queries try to rewrite the query
// i.e. get the terms from the reader.
try
{
query.rewrite( reader ).extractTerms( terms );
}
catch ( IOException ioe )
{
throw new UnsupportedOperationException( ioe );
}
catch ( UnsupportedOperationException ue )
{
// TODO This is for "*" queries and such. Lucene doesn't seem
// to be able/willing to rewrite such queries.
// Just ignore the orphans then... OK?
}
}
return terms.isEmpty() ? null : terms.iterator().next().field();
}
@Override
void close()
{
safeClose( this.writer );
safeClose( this.reader );
safeClose( this.searcher );
}
private void invalidateSearcher()
{
this.modified = true;
}
private IndexSearcher searcher( boolean allowRefreshSearcher )
{
if ( this.searcher != null && (!modified || !allowRefreshSearcher) )
{
return this.searcher;
}
try
{
IndexReader newReader = this.reader == null ? IndexReader.open( this.writer, true ) : this.reader.reopen();
if ( newReader == this.reader )
{
return this.searcher;
}
safeClose( reader );
this.reader = newReader;
safeClose( searcher );
searcher = new IndexSearcher( reader );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
if ( allowRefreshSearcher )
{
this.modified = false;
}
}
return this.searcher;
}
private static void safeClose( Object object )
{
if ( object == null )
{
return;
}
try
{
if ( object instanceof IndexWriter )
{
( ( IndexWriter ) object ).close();
}
else if ( object instanceof IndexSearcher )
{
( ( IndexSearcher ) object ).close();
}
else if ( object instanceof IndexReader )
{
( ( IndexReader ) object ).close();
}
}
catch ( IOException e )
{
// Ok
}
}
@Override
IndexSearcher asSearcher( TxDataHolder holder, QueryContext context )
{
boolean refresh = context == null || !context.getTradeCorrectnessForSpeed();
return searcher( refresh );
}
@Override
Collection<Long> get( TxDataHolder holder, String key, Object value )
{
return internalQuery( index.type.get( key, value ), null );
}
@Override
Collection<Long> getOrphans( String key )
{
return emptyList();
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_FullTxData.java
|
5,787
|
public class AddThenDeleteInAnotherTxAndQuit
{
public static void main( String[] args )
{
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( args[0] );
Transaction tx = db.beginTx();
Index<Node> index;
Index<Node> index2;
try
{
index = db.index().forNodes( "index" );
index2 = db.index().forNodes( "index2" );
Node node = db.createNode();
index.add( node, "key", "value" );
tx.success();
}
finally
{
tx.finish();
}
tx = db.beginTx();
try
{
index.delete();
index2.add( db.createNode(), "key", "value" );
tx.success();
}
finally
{
tx.finish();
}
System.exit( 0 );
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_AddThenDeleteInAnotherTxAndQuit.java
|
5,788
|
public class ExactTxData extends TxData
{
private Map<String, Map<Object, Set<Object>>> data;
private boolean hasOrphans;
ExactTxData( LuceneIndex index )
{
super( index );
}
@Override
void add( TxDataHolder holder, Object entityId, String key, Object value )
{
idCollection( key, value, true ).add( entityId );
}
private Set<Object> idCollection( String key, Object value, boolean create )
{
Map<Object, Set<Object>> keyMap = keyMap( key, create );
if ( keyMap == null )
{
return null;
}
Set<Object> ids = keyMap.get( value );
if ( ids == null && create )
{
ids = new HashSet<Object>();
keyMap.put( value, ids );
if ( value == null )
{
hasOrphans = true;
}
}
return ids;
}
private Map<Object, Set<Object>> keyMap( String key, boolean create )
{
if ( data == null )
{
if ( create )
{
data = new HashMap<String, Map<Object,Set<Object>>>();
}
else
{
return null;
}
}
Map<Object, Set<Object>> inner = data.get( key );
if ( inner == null && create )
{
inner = new HashMap<Object, Set<Object>>();
data.put( key, inner );
if ( key == null )
{
hasOrphans = true;
}
}
return inner;
}
private TxData toFullTxData()
{
FullTxData data = new FullTxData( index );
if ( this.data != null )
{
for ( Map.Entry<String, Map<Object, Set<Object>>> entry : this.data.entrySet() )
{
String key = entry.getKey();
for ( Map.Entry<Object, Set<Object>> valueEntry : entry.getValue().entrySet() )
{
Object value = valueEntry.getKey();
for ( Object id : valueEntry.getValue() )
{
data.add( null, id, key, value );
}
}
}
}
return data;
}
@Override
void close()
{
}
@Override
Collection<Long> query( TxDataHolder holder, Query query, QueryContext contextOrNull )
{
if ( contextOrNull != null && contextOrNull.getTradeCorrectnessForSpeed() )
{
return Collections.<Long>emptyList();
}
TxData fullTxData = toFullTxData();
holder.set( fullTxData );
return fullTxData.query( holder, query, contextOrNull );
}
@Override
void remove( TxDataHolder holder, Object entityId, String key, Object value )
{
if ( data == null )
{
return;
}
if ( key == null || value == null )
{
TxData fullData = toFullTxData();
fullData.remove( holder, entityId, key, value );
holder.set( fullData );
}
else
{
Collection<Object> ids = idCollection( key, value, false );
if ( ids != null )
{
ids.remove( entityId );
}
}
}
@Override
Collection<Long> get( TxDataHolder holder, String key, Object value )
{
value = value instanceof ValueContext ? ((ValueContext) value).getCorrectValue() : value.toString();
Set<Object> ids = idCollection( key, value, false );
if ( ids == null || ids.isEmpty() )
{
return Collections.<Long>emptySet();
}
return toLongs( ids );
}
@Override
Collection<Long> getOrphans( String key )
{
if ( !hasOrphans )
{
return null;
}
Set<Object> orphans = idCollection( null, null, false );
Set<Object> keyOrphans = idCollection( key, null, false );
Collection<Long> orphanLongs = orphans != null ? toLongs( orphans ) : null;
Collection<Long> keyOrphanLongs = keyOrphans != null ? toLongs( keyOrphans ) : null;
return LuceneTransaction.merge( orphanLongs, keyOrphanLongs );
}
@SuppressWarnings( { "unchecked", "rawtypes" } )
private Collection<Long> toLongs( Set<Object> ids )
{
if (ids.isEmpty()) return Collections.emptySet();
if ( ids.iterator().next() instanceof Long )
{
return (Collection) ids;
}
else
{
Collection<Long> longs = new ArrayList<Long>(ids.size());
for ( Object id : ids )
{
longs.add( ((RelationshipId) id).id );
}
return longs;
}
}
@Override
IndexSearcher asSearcher( TxDataHolder holder, QueryContext context )
{
if ( context != null && context.getTradeCorrectnessForSpeed() )
{
return null;
}
TxData fullTxData = toFullTxData();
holder.set( fullTxData );
return fullTxData.asSearcher( holder, context );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_ExactTxData.java
|
5,789
|
private static class CommandFactory extends XaCommandFactory
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
return LuceneCommand.readCommand( byteChannel, buffer, null );
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_DumpLogicalLog.java
|
5,790
|
public class DumpLogicalLog extends org.neo4j.kernel.impl.util.DumpLogicalLog
{
public DumpLogicalLog( FileSystemAbstraction fileSystem )
{
super( fileSystem );
}
public static void main( String[] args ) throws IOException
{
FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
Args arguments = new Args( args );
TimeZone timeZome = parseTimeZoneConfig( arguments );
try ( Printer printer = getPrinter( arguments ) )
{
for ( String file : arguments.orphans() )
{
int dumped = new DumpLogicalLog( fs ).dump( file, printer.getFor( file ), timeZome );
if ( dumped == 0 && isAGraphDatabaseDirectory( file ) )
{ // If none were found and we really pointed to a neodb directory
// then go to its index folder and try there.
new DumpLogicalLog( fs ).dump( new File( file, "index" ).getAbsolutePath(),
printer.getFor( file ), timeZome );
}
}
}
}
@Override
protected XaCommandFactory instantiateCommandFactory()
{
return new CommandFactory();
}
@Override
protected String getLogPrefix()
{
return "lucene.log";
}
private static class CommandFactory extends XaCommandFactory
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
return LuceneCommand.readCommand( byteChannel, buffer, null );
}
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_DumpLogicalLog.java
|
5,791
|
public class DocToIdIterator extends AbstractIndexHits<Long>
{
private final Collection<Long> exclude;
private IndexReference searcherOrNull;
private final IndexHits<Document> source;
public DocToIdIterator( IndexHits<Document> source, Collection<Long> exclude, IndexReference searcherOrNull )
{
this.source = source;
this.exclude = exclude;
this.searcherOrNull = searcherOrNull;
if ( source.size() == 0 )
{
close();
}
}
@Override
protected Long fetchNextOrNull()
{
Long result = null;
while ( result == null )
{
if ( !source.hasNext() )
{
endReached();
break;
}
Document doc = source.next();
Long id = Long.valueOf( doc.get( LuceneIndex.KEY_DOC_ID ) );
if ( !exclude.contains( id ) )
{
result = id;
}
}
return result;
}
protected void endReached()
{
close();
}
@Override
public void close()
{
if ( !isClosed() )
{
this.searcherOrNull.close();
this.searcherOrNull = null;
}
}
@Override
public int size()
{
/*
* If stuff was removed from the index during this tx and during the same tx a query that matches them is
* issued, then it is possible to get negative size from the IndexHits result, if exclude is larger than source.
* To avoid such weirdness, we return at least 0. Note that the iterator will return no results, as it should.
*/
return Math.max( 0, source.size() - exclude.size() );
}
private boolean isClosed()
{
return searcherOrNull==null;
}
@Override
public float currentScore()
{
return source.currentScore();
}
@Override
protected void finalize() throws Throwable
{
close();
super.finalize();
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_DocToIdIterator.java
|
5,792
|
public class DieCommand implements WorkerCommand<CommandState, Void>
{
@Override
public Void doWork( CommandState state )
{
state.alive = false;
return null;
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_DieCommand.java
|
5,793
|
public class DeleteIndexCommand implements WorkerCommand<CommandState, Void>
{
@Override
public Void doWork( CommandState state )
{
state.index.delete();
return null;
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_DeleteIndexCommand.java
|
5,794
|
final class CustomAnalyzer extends Analyzer
{
static boolean called;
@Override
public final TokenStream tokenStream( String fieldName, Reader reader )
{
called = true;
return new LowerCaseFilter( LUCENE_VERSION, new WhitespaceTokenizer( LUCENE_VERSION, reader ) );
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_CustomAnalyzer.java
|
5,795
|
public class CreateNodeAndIndexByCommand implements WorkerCommand<CommandState, Void>
{
private String key;
private String value;
public CreateNodeAndIndexByCommand( String key, String value )
{
this.key = key;
this.value = value;
}
@Override
public Void doWork( CommandState state )
{
state.node = state.graphDb.createNode();
state.index.add( state.node, key, value );
return null;
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_CreateNodeAndIndexByCommand.java
|
5,796
|
public class Contains<T> extends TypeSafeMatcher<IndexHits<T>>
{
private final T[] expectedItems;
private String message;
public Contains( T... expectedItems )
{
this.expectedItems = expectedItems;
}
@Override
public boolean matchesSafely( IndexHits<T> indexHits )
{
Collection<T> collection = IteratorUtil.asCollection( indexHits.iterator() );
if ( expectedItems.length != collection.size() )
{
message = "IndexHits with a size of " + expectedItems.length + ", got one with " + collection.size();
message += collection.toString();
return false;
}
for ( T item : expectedItems )
{
if ( !collection.contains( item ) )
{
message = "Item (" + item + ") not found.";
return false;
}
}
return true;
}
@Override
public void describeTo( Description description )
{
if (message != null)
{
description.appendText( message );
}
}
@Factory
public static <T> Contains<T> contains( T... expectedItems )
{
return new Contains<T>( expectedItems );
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_index_impl_lucene_Contains.java
|
5,797
|
class ConstantScoreIterator<T> extends AbstractIndexHits<T>
{
private final Iterator<T> items;
private final int size;
private final float score;
ConstantScoreIterator( Collection<T> items, float score )
{
this.items = items.iterator();
this.score = score;
this.size = items.size();
}
public float currentScore()
{
return this.score;
}
public int size()
{
return this.size;
}
@Override
protected T fetchNextOrNull()
{
return items.hasNext() ? items.next() : null;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_ConstantScoreIterator.java
|
5,798
|
public class ConnectionBroker extends IndexConnectionBroker<LuceneXaConnection>
{
private final LuceneDataSource xaDs;
public ConnectionBroker( TransactionManager transactionManager,
LuceneDataSource dataSource )
{
super( transactionManager );
this.xaDs = dataSource;
}
@Override
protected LuceneXaConnection newConnection()
{
return (LuceneXaConnection) xaDs.getXaConnection();
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_ConnectionBroker.java
|
5,799
|
static class DocumentContext
{
final Document document;
final boolean exists;
final long entityId;
DocumentContext( Document document, boolean exists, long entityId )
{
this.document = document;
this.exists = exists;
this.entityId = entityId;
}
}
| false
|
community_lucene-index_src_main_java_org_neo4j_index_impl_lucene_CommitContext.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.