Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
1,000
|
{
@Override
public Void perform( Statement state )
throws KernelException
{
for ( Iterator<IndexDescriptor> indexes = state.readOperations().uniqueIndexesGetAll();
indexes.hasNext(); )
{
IndexDescriptor index = indexes.next();
if ( state.readOperations().indexGetOwningUniquenessConstraintId( index ) == null )
{
state.schemaWriteOperations().uniqueIndexDrop( index );
}
}
return null;
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_RemoveOrphanConstraintIndexesOnStartup.java
|
1,001
|
public class RemoveOrphanConstraintIndexesOnStartup
{
private final StringLogger log;
private final Transactor transactor;
public RemoveOrphanConstraintIndexesOnStartup( Transactor transactor, Logging logging )
{
this.transactor = transactor;
this.log = logging.getMessagesLog( getClass() );
}
public void perform()
{
try
{
transactor.execute( new Transactor.Work<Void, KernelException>()
{
@Override
public Void perform( Statement state )
throws KernelException
{
for ( Iterator<IndexDescriptor> indexes = state.readOperations().uniqueIndexesGetAll();
indexes.hasNext(); )
{
IndexDescriptor index = indexes.next();
if ( state.readOperations().indexGetOwningUniquenessConstraintId( index ) == null )
{
state.schemaWriteOperations().uniqueIndexDrop( index );
}
}
return null;
}
} );
}
catch ( KernelException | TransactionalException e )
{
log.error( "Failed to execute orphan index checking transaction.", e );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_RemoveOrphanConstraintIndexesOnStartup.java
|
1,002
|
public class RecoveringIndexProxy extends AbstractSwallowingIndexProxy
{
public RecoveringIndexProxy( IndexDescriptor descriptor, SchemaIndexProvider.Descriptor providerDescriptor )
{
super( descriptor, providerDescriptor, null );
}
@Override
public InternalIndexState getState()
{
return InternalIndexState.POPULATING;
}
@Override
public boolean awaitStoreScanCompleted() throws IndexPopulationFailedKernelException, InterruptedException
{
throw unsupportedOperation( "Cannot await population on a recovering index." );
}
@Override
public void activate()
{
throw unsupportedOperation( "Cannot activate recovering index." );
}
@Override
public void validate()
{
throw unsupportedOperation( "Cannot validate recovering index." );
}
@Override
public ResourceIterator<File> snapshotFiles()
{
throw unsupportedOperation( "Cannot snapshot a recovering index." );
}
@Override
public Future<Void> drop()
{
return VOID;
}
@Override
public IndexPopulationFailure getPopulationFailure() throws IllegalStateException
{
throw new IllegalStateException( this + " is recovering" );
}
private UnsupportedOperationException unsupportedOperation( String message )
{
return new UnsupportedOperationException( message + " Recovering Index" + getDescriptor() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_RecoveringIndexProxy.java
|
1,003
|
{
@Override
public void create()
{
try
{
latch.await();
}
catch ( InterruptedException e )
{
// fall through and return early
}
throw new RuntimeException( "this is expected" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRecoveryIT.java
|
1,004
|
{
@Override
public Void call() throws Exception
{
killDb();
return null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRecoveryIT.java
|
1,005
|
{
@Override
public void run()
{
db.shutdown();
db = null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRecoveryIT.java
|
1,006
|
public class IndexRecoveryIT
{
@Test
public void shouldBeAbleToRecoverInTheMiddleOfPopulatingAnIndex() throws Exception
{
// Given
startDb();
CountDownLatch latch = new CountDownLatch( 1 );
when( mockedIndexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) )
.thenReturn( indexPopulatorWithControlledCompletionTiming( latch ) );
createIndex( myLabel );
// And Given
Future<Void> killFuture = killDbInSeparateThread();
latch.countDown();
killFuture.get();
// When
when( mockedIndexProvider.getInitialState( anyLong() ) ).thenReturn( InternalIndexState.POPULATING );
latch = new CountDownLatch( 1 );
when( mockedIndexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) )
.thenReturn( indexPopulatorWithControlledCompletionTiming( latch ) );
startDb();
// Then
assertThat( getIndexes( db, myLabel ), inTx( db, hasSize( 1 ) ) );
assertThat( getIndexes( db, myLabel ), inTx( db, haveState( db, Schema.IndexState.POPULATING ) ) );
verify( mockedIndexProvider, times( 2 ) ).getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) );
verify( mockedIndexProvider, times( 0 ) ).getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) );
latch.countDown();
}
@Test
public void shouldBeAbleToRecoverInTheMiddleOfPopulatingAnIndexWhereLogHasRotated() throws Exception
{
// Given
startDb();
CountDownLatch latch = new CountDownLatch( 1 );
when( mockedIndexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) )
.thenReturn( indexPopulatorWithControlledCompletionTiming( latch ) );
createIndex( myLabel );
rotateLogs();
// And Given
Future<Void> killFuture = killDbInSeparateThread();
latch.countDown();
killFuture.get();
latch = new CountDownLatch( 1 );
when( mockedIndexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) )
.thenReturn( indexPopulatorWithControlledCompletionTiming( latch ) );
when( mockedIndexProvider.getInitialState( anyLong() ) ).thenReturn( InternalIndexState.POPULATING );
// When
startDb();
// Then
assertThat( getIndexes( db, myLabel ), inTx( db, hasSize( 1 ) ) );
assertThat( getIndexes( db, myLabel ), inTx( db, haveState( db, Schema.IndexState.POPULATING ) ) );
verify( mockedIndexProvider, times( 2 ) ).getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) );
verify( mockedIndexProvider, times( 0 ) ).getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) );
latch.countDown();
}
@Test
public void shouldBeAbleToRecoverAndUpdateOnlineIndex() throws Exception
{
// Given
startDb();
when( mockedIndexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) )
.thenReturn( mock( IndexPopulator.class ) );
IndexAccessor mockedAccessor = mock( IndexAccessor.class );
when( mockedAccessor.newUpdater( any( IndexUpdateMode.class ) ) ).thenReturn( SwallowingIndexUpdater.INSTANCE );
when( mockedIndexProvider.getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) ) )
.thenReturn( mockedAccessor );
createIndexAndAwaitPopulation( myLabel );
Set<NodePropertyUpdate> expectedUpdates = createSomeBananas( myLabel );
// And Given
killDb();
when( mockedIndexProvider.getInitialState( anyLong() ) ).thenReturn( InternalIndexState.ONLINE );
GatheringIndexWriter writer = new GatheringIndexWriter();
when( mockedIndexProvider.getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) ) )
.thenReturn( writer );
// When
startDb();
// Then
assertThat( getIndexes( db, myLabel ), inTx( db, hasSize( 1 ) ) );
assertThat( getIndexes( db, myLabel ), inTx( db, haveState( db, Schema.IndexState.ONLINE ) ) );
verify( mockedIndexProvider, times( 1 ) ).getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) );
int onlineAccessorInvocationCount = 2; // once when we create the index, and once when we restart the db
verify( mockedIndexProvider, times( onlineAccessorInvocationCount ) )
.getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) );
assertEquals( expectedUpdates, writer.recoveredUpdates );
for ( NodePropertyUpdate update : writer.recoveredUpdates )
{
assertTrue( writer.recoveredNodes.contains( update.getNodeId() ) );
}
}
@Test
public void shouldKeepFailedIndexesAsFailedAfterRestart() throws Exception
{
// Given
when( mockedIndexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) )
.thenReturn( mock( IndexPopulator.class ) );
when( mockedIndexProvider.getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) ) )
.thenReturn( mock( IndexAccessor.class ) );
startDb();
createIndex( myLabel );
// And Given
killDb();
when( mockedIndexProvider.getInitialState( anyLong() ) ).thenReturn( InternalIndexState.FAILED );
// When
startDb();
// Then
assertThat( getIndexes( db, myLabel ), inTx( db, hasSize( 1 ) ) );
assertThat( getIndexes( db, myLabel ), inTx( db, haveState( db, Schema.IndexState.FAILED ) ) );
verify( mockedIndexProvider, times( 2 ) ).getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) );
}
@SuppressWarnings("deprecation") private GraphDatabaseAPI db;
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private final SchemaIndexProvider mockedIndexProvider = mock( SchemaIndexProvider.class );
private final KernelExtensionFactory<?> mockedIndexProviderFactory =
singleInstanceSchemaIndexProviderFactory( TestSchemaIndexProviderDescriptor.PROVIDER_DESCRIPTOR.getKey(),
mockedIndexProvider );
private final String key = "number_of_bananas_owned";
private final Label myLabel = label( "MyLabel" );
@Before
public void setUp()
{
when( mockedIndexProvider.getProviderDescriptor() )
.thenReturn( TestSchemaIndexProviderDescriptor.PROVIDER_DESCRIPTOR );
when( mockedIndexProvider.compareTo( any( SchemaIndexProvider.class ) ) )
.thenReturn( 1 ); // always pretend to have highest priority
}
@SuppressWarnings("deprecation")
private void startDb()
{
if ( db != null )
{
db.shutdown();
}
TestGraphDatabaseFactory factory = new TestGraphDatabaseFactory();
factory.setFileSystem( fs.get() );
factory.addKernelExtensions( Arrays.<KernelExtensionFactory<?>>asList( mockedIndexProviderFactory ) );
db = (GraphDatabaseAPI) factory.newImpermanentDatabase();
}
private void killDb()
{
if ( db != null )
{
fs.snapshot( new Runnable()
{
@Override
public void run()
{
db.shutdown();
db = null;
}
} );
}
}
private Future<Void> killDbInSeparateThread()
{
ExecutorService executor = newSingleThreadExecutor();
Future<Void> result = executor.submit( new Callable<Void>()
{
@Override
public Void call() throws Exception
{
killDb();
return null;
}
} );
executor.shutdown();
return result;
}
@After
public void after()
{
if ( db != null )
{
db.shutdown();
}
}
private void rotateLogs()
{
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).rotateLogicalLogs();
}
private void createIndexAndAwaitPopulation( Label label )
{
IndexDefinition index = createIndex( label );
try ( Transaction tx = db.beginTx() )
{
db.schema().awaitIndexOnline( index, 10, SECONDS );
tx.success();
}
}
private IndexDefinition createIndex( Label label )
{
try ( Transaction tx = db.beginTx() )
{
IndexDefinition index = db.schema().indexFor( label ).on( key ).create();
tx.success();
return index;
}
}
private Set<NodePropertyUpdate> createSomeBananas( Label label )
{
Set<NodePropertyUpdate> updates = new HashSet<>();
try ( Transaction tx = db.beginTx() )
{
ThreadToStatementContextBridge ctxProvider = db.getDependencyResolver().resolveDependency(
ThreadToStatementContextBridge.class );
try ( Statement statement = ctxProvider.instance() )
{
for ( int number : new int[] {4, 10} )
{
Node node = db.createNode( label );
node.setProperty( key, number );
updates.add( NodePropertyUpdate.add( node.getId(),
statement.readOperations().propertyKeyGetForName( key ), number,
new long[]{statement.readOperations().labelGetForName( label.name() )} ) );
}
}
tx.success();
return updates;
}
}
public static class GatheringIndexWriter extends IndexAccessor.Adapter
{
private final Set<NodePropertyUpdate> regularUpdates = new HashSet<>();
private final Set<NodePropertyUpdate> recoveredUpdates = new HashSet<>();
private final Set<Long> recoveredNodes = new HashSet<>();
@Override
public IndexUpdater newUpdater( final IndexUpdateMode mode )
{
return new CollectingIndexUpdater()
{
@Override
public void close() throws IOException, IndexEntryConflictException
{
switch (mode)
{
case ONLINE:
regularUpdates.addAll( updates );
break;
case RECOVERY:
recoveredUpdates.addAll( updates );
break;
default:
throw new UnsupportedOperationException( );
}
}
@Override
public void remove( Iterable<Long> nodeIds ) throws IOException
{
for ( Long nodeId : nodeIds )
{
recoveredNodes.add( nodeId );
}
}
};
}
}
private IndexPopulator indexPopulatorWithControlledCompletionTiming( final CountDownLatch latch )
{
return new IndexPopulator.Adapter()
{
@Override
public void create()
{
try
{
latch.await();
}
catch ( InterruptedException e )
{
// fall through and return early
}
throw new RuntimeException( "this is expected" );
}
};
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRecoveryIT.java
|
1,007
|
public class IndexProxyAdapter implements IndexProxy
{
@Override
public void start()
{
}
@Override
public IndexUpdater newUpdater( IndexUpdateMode mode )
{
return SwallowingIndexUpdater.INSTANCE;
}
@Override
public Future<Void> drop()
{
return VOID;
}
@Override
public InternalIndexState getState()
{
throw new UnsupportedOperationException();
}
@Override
public void force()
{
}
@Override
public Future<Void> close()
{
return VOID;
}
@Override
public IndexDescriptor getDescriptor()
{
return null;
}
@Override
public SchemaIndexProvider.Descriptor getProviderDescriptor()
{
return null;
}
@Override
public IndexReader newReader()
{
return IndexReader.EMPTY;
}
@Override
public boolean awaitStoreScanCompleted()
{
throw new UnsupportedOperationException();
}
@Override
public void activate()
{
}
@Override
public void validate()
{
}
@Override
public ResourceIterator<File> snapshotFiles()
{
return emptyIterator();
}
@Override
public IndexPopulationFailure getPopulationFailure() throws IllegalStateException
{
throw new IllegalStateException( "This index isn't failed" );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexProxyAdapter.java
|
1,008
|
public class IndexProviderNotFoundException extends RuntimeException
{
public IndexProviderNotFoundException( long ruleId )
{
this( ruleId, null );
}
public IndexProviderNotFoundException( long ruleId, Throwable cause )
{
super( "Did not find index provider for index rule: " + ruleId, cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexProviderNotFoundException.java
|
1,009
|
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch ( update.getUpdateMode() )
{
case ADDED:
case CHANGED:
added.put( update.getNodeId(), update.getValueAfter() );
break;
case REMOVED:
removed.put( update.getNodeId(), update.getValueBefore() );
break;
default:
throw new IllegalArgumentException( update.getUpdateMode().name() );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "not expected" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,010
|
private class NodeDeletingWriter extends IndexPopulator.Adapter
{
private final Map<Long, Object> added = new HashMap<>();
private final Map<Long, Object> removed = new HashMap<>();
private final long nodeToDelete;
private IndexPopulationJob job;
private final int propertyKeyId;
private final Object valueToDelete;
private final int label;
public NodeDeletingWriter( long nodeToDelete, int propertyKeyId, Object valueToDelete, int label )
{
this.nodeToDelete = nodeToDelete;
this.propertyKeyId = propertyKeyId;
this.valueToDelete = valueToDelete;
this.label = label;
}
public void setJob( IndexPopulationJob job )
{
this.job = job;
}
@Override
public void add( long nodeId, Object propertyValue )
{
if ( nodeId == 2 )
{
job.update( remove( nodeToDelete, propertyKeyId, valueToDelete, new long[]{label} ) );
}
added.put( nodeId, propertyValue );
}
@Override
public IndexUpdater newPopulatingUpdater( PropertyAccessor propertyAccessor )
{
return new IndexUpdater()
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch ( update.getUpdateMode() )
{
case ADDED:
case CHANGED:
added.put( update.getNodeId(), update.getValueAfter() );
break;
case REMOVED:
removed.put( update.getNodeId(), update.getValueBefore() );
break;
default:
throw new IllegalArgumentException( update.getUpdateMode().name() );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "not expected" );
}
};
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,011
|
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch ( update.getUpdateMode() )
{
case ADDED:
case CHANGED:
added.add( Pair.of( update.getNodeId(), update.getValueAfter() ) );
break;
default:
throw new IllegalArgumentException( update.getUpdateMode().name() );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "not expected" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,012
|
private class NodeChangingWriter extends IndexPopulator.Adapter
{
private final Set<Pair<Long, Object>> added = new HashSet<>();
private IndexPopulationJob job;
private final long nodeToChange;
private final Object newValue;
private final Object previousValue;
private final int label, propertyKeyId;
public NodeChangingWriter( long nodeToChange, int propertyKeyId, Object previousValue, Object newValue,
int label )
{
this.nodeToChange = nodeToChange;
this.propertyKeyId = propertyKeyId;
this.previousValue = previousValue;
this.newValue = newValue;
this.label = label;
}
@Override
public void add( long nodeId, Object propertyValue )
{
if ( nodeId == 2 )
{
long[] labels = new long[]{label};
job.update( change( nodeToChange, propertyKeyId, previousValue, labels, newValue, labels ) );
}
added.add( Pair.of( nodeId, propertyValue ) );
}
@Override
public IndexUpdater newPopulatingUpdater( PropertyAccessor propertyAccessor )
{
return new IndexUpdater()
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch ( update.getUpdateMode() )
{
case ADDED:
case CHANGED:
added.add( Pair.of( update.getNodeId(), update.getValueAfter() ) );
break;
default:
throw new IllegalArgumentException( update.getUpdateMode().name() );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "not expected" );
}
};
}
public void setJob( IndexPopulationJob job )
{
this.job = job;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,013
|
private static class ControlledStoreScan implements StoreScan<RuntimeException>
{
private final DoubleLatch latch = new DoubleLatch();
@Override
public void run()
{
latch.startAndAwaitFinish();
}
@Override
public void stop()
{
latch.finish();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,014
|
{
@Override
public Void doWork( Void state )
{
job.run();
return null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,015
|
public class IndexPopulationJobTest
{
@Test
public void shouldPopulateIndexWithOneNode() throws Exception
{
// GIVEN
String value = "Taylor";
long nodeId = createNode( map( name, value ), FIRST );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, new FlippableIndexProxy() );
// WHEN
job.run();
// THEN
verify( populator ).create();
verify( populator ).add( nodeId, value );
verify( populator ).verifyDeferredConstraints( indexStoreView );
verify( populator ).close( true );
verifyNoMoreInteractions( populator );
}
@Test
public void shouldFlushSchemaStateAfterPopulation() throws Exception
{
// GIVEN
String value = "Taylor";
createNode( map( name, value ), FIRST );
stateHolder.apply( MapUtil.stringMap( "key", "original_value" ) );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, new FlippableIndexProxy() );
// WHEN
job.run();
// THEN
String result = stateHolder.get( "key" );
assertEquals( null, result );
}
@Test
public void shouldPopulateIndexWithASmallDataset() throws Exception
{
// GIVEN
String value = "Mattias";
long node1 = createNode( map( name, value ), FIRST );
createNode( map( name, value ), SECOND );
createNode( map( age, 31 ), FIRST );
long node4 = createNode( map( age, 35, name, value ), FIRST );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, new FlippableIndexProxy() );
// WHEN
job.run();
// THEN
verify( populator ).create();
verify( populator ).add( node1, value );
verify( populator ).add( node4, value );
verify( populator ).verifyDeferredConstraints( indexStoreView );
verify( populator ).close( true );
verifyNoMoreInteractions( populator );
}
@Test
public void shouldIndexUpdatesWhenDoingThePopulation() throws Exception
{
// GIVEN
Object value1 = "Mattias", value2 = "Jacob", value3 = "Stefan", changedValue = "changed";
long node1 = createNode( map( name, value1 ), FIRST );
long node2 = createNode( map( name, value2 ), FIRST );
long node3 = createNode( map( name, value3 ), FIRST );
@SuppressWarnings("UnnecessaryLocalVariable")
long changeNode = node1;
int propertyKeyId = getPropertyKeyForName( name );
NodeChangingWriter populator = new NodeChangingWriter( changeNode, propertyKeyId, value1, changedValue,
labelId );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, new FlippableIndexProxy() );
populator.setJob( job );
// WHEN
job.run();
// THEN
Set<Pair<Long, Object>> expected = asSet(
Pair.of( node1, value1 ),
Pair.of( node2, value2 ),
Pair.of( node3, value3 ),
Pair.of( node1, changedValue ) );
assertEquals( expected, populator.added );
}
@Test
public void shouldRemoveViaIndexUpdatesWhenDoingThePopulation() throws Exception
{
// GIVEN
String value1 = "Mattias", value2 = "Jacob", value3 = "Stefan";
long node1 = createNode( map( name, value1 ), FIRST );
long node2 = createNode( map( name, value2 ), FIRST );
long node3 = createNode( map( name, value3 ), FIRST );
int propertyKeyId = getPropertyKeyForName( name );
NodeDeletingWriter populator = new NodeDeletingWriter( node2, propertyKeyId, value2, labelId );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, new FlippableIndexProxy() );
populator.setJob( job );
// WHEN
job.run();
// THEN
Map<Long, Object> expectedAdded = genericMap( node1, value1, node2, value2, node3, value3 );
assertEquals( expectedAdded, populator.added );
Map<Long, Object> expectedRemoved = genericMap( node2, value2 );
assertEquals( expectedRemoved, populator.removed );
}
@Test
public void shouldTransitionToFailedStateIfPopulationJobCrashes() throws Exception
{
// GIVEN
IndexPopulator failingPopulator = mock( IndexPopulator.class );
doThrow( new RuntimeException( "BORK BORK" ) ).when( failingPopulator ).add( anyLong(), any() );
FlippableIndexProxy index = new FlippableIndexProxy();
createNode( map( name, "Taylor" ), FIRST );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, failingPopulator, index );
// WHEN
job.run();
// THEN
assertThat( index.getState(), equalTo( InternalIndexState.FAILED ) );
}
@Test
public void shouldBeAbleToCancelPopulationJob() throws Exception
{
// GIVEN
createNode( map( name, "Mattias" ), FIRST );
IndexPopulator populator = mock( IndexPopulator.class );
FlippableIndexProxy index = mock( FlippableIndexProxy.class );
IndexStoreView storeView = mock( IndexStoreView.class );
ControlledStoreScan storeScan = new ControlledStoreScan();
when( storeView.visitNodesWithPropertyAndLabel( any( IndexDescriptor.class ),
Matchers.<Visitor<NodePropertyUpdate, RuntimeException>>any() ) ).thenReturn( storeScan );
final IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, index, storeView,
StringLogger.DEV_NULL );
OtherThreadExecutor<Void> populationJobRunner = new OtherThreadExecutor<>(
"Population job test runner", null );
Future<Void> runFuture = populationJobRunner.executeDontWait( new WorkerCommand<Void, Void>()
{
@Override
public Void doWork( Void state )
{
job.run();
return null;
}
} );
storeScan.latch.awaitStart();
job.cancel().get();
storeScan.latch.awaitFinish();
// WHEN
runFuture.get();
// THEN
verify( populator, times( 1 ) ).close( false );
verify( index, times( 0 ) ).flip( Matchers.<Callable<Void>>any(), Matchers.<FailedIndexProxyFactory>any() );
}
@Test
public void shouldLogJobProgress() throws Exception
{
// Given
createNode( map( name, "irrelephant" ), FIRST );
TestLogger logger = new TestLogger();
FlippableIndexProxy index = mock( FlippableIndexProxy.class );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, index, indexStoreView, logger );
// When
job.run();
// Then
logger.assertExactly(
info( "Index population started: [:FIRST(name)]" ),
info( "Index population completed. Index is now online: [:FIRST(name)]" )
);
}
@Test
public void shouldLogJobFailure() throws Exception
{
// Given
createNode( map( name, "irrelephant" ), FIRST );
TestLogger logger = new TestLogger();
FlippableIndexProxy index = mock( FlippableIndexProxy.class );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, index, indexStoreView, logger );
IllegalStateException failure = new IllegalStateException( "not successful" );
doThrow( failure ).when( populator ).create();
// When
job.run();
// Then
logger.assertAtLeastOnce( error( "Failed to populate index: [:FIRST(name)]", failure ) );
}
@Test
public void shouldFlipToFailedUsingFailedIndexProxyFactory() throws Exception
{
// Given
FailedIndexProxyFactory failureDelegateFactory = mock( FailedIndexProxyFactory.class );
IndexPopulationJob job =
newIndexPopulationJob( FIRST, name, failureDelegateFactory, populator,
new FlippableIndexProxy(), indexStoreView, new TestLogger() );
IllegalStateException failure = new IllegalStateException( "not successful" );
doThrow( failure ).when( populator ).close( true );
// When
job.run();
// Then
verify( failureDelegateFactory ).create( any( Throwable.class ) );
}
@Test
public void shouldCloseAndFailOnFailure() throws Exception
{
createNode( map( name, "irrelephant" ), FIRST );
TestLogger logger = new TestLogger();
FlippableIndexProxy index = mock( FlippableIndexProxy.class );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, index, indexStoreView, logger );
String failureMessage = "not successful";
IllegalStateException failure = new IllegalStateException( failureMessage );
doThrow( failure ).when( populator ).create();
// When
job.run();
// Then
verify( populator ).markAsFailed( Matchers.contains( failureMessage ) );
}
@Test
public void shouldFailIfDeferredConstraintViolated() throws Exception
{
createNode( map( name, "irrelephant" ), FIRST );
TestLogger logger = new TestLogger();
FlippableIndexProxy index = mock( FlippableIndexProxy.class );
IndexPopulationJob job = newIndexPopulationJob( FIRST, name, populator, index, indexStoreView, logger );
IndexEntryConflictException failure = new PreexistingIndexEntryConflictException( "duplicate value", 0, 1 );
doThrow( failure ).when( populator ).verifyDeferredConstraints( indexStoreView );
// When
job.run();
// Then
verify( populator ).markAsFailed( Matchers.contains( "duplicate value" ) );
}
private static class ControlledStoreScan implements StoreScan<RuntimeException>
{
private final DoubleLatch latch = new DoubleLatch();
@Override
public void run()
{
latch.startAndAwaitFinish();
}
@Override
public void stop()
{
latch.finish();
}
}
private class NodeChangingWriter extends IndexPopulator.Adapter
{
private final Set<Pair<Long, Object>> added = new HashSet<>();
private IndexPopulationJob job;
private final long nodeToChange;
private final Object newValue;
private final Object previousValue;
private final int label, propertyKeyId;
public NodeChangingWriter( long nodeToChange, int propertyKeyId, Object previousValue, Object newValue,
int label )
{
this.nodeToChange = nodeToChange;
this.propertyKeyId = propertyKeyId;
this.previousValue = previousValue;
this.newValue = newValue;
this.label = label;
}
@Override
public void add( long nodeId, Object propertyValue )
{
if ( nodeId == 2 )
{
long[] labels = new long[]{label};
job.update( change( nodeToChange, propertyKeyId, previousValue, labels, newValue, labels ) );
}
added.add( Pair.of( nodeId, propertyValue ) );
}
@Override
public IndexUpdater newPopulatingUpdater( PropertyAccessor propertyAccessor )
{
return new IndexUpdater()
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch ( update.getUpdateMode() )
{
case ADDED:
case CHANGED:
added.add( Pair.of( update.getNodeId(), update.getValueAfter() ) );
break;
default:
throw new IllegalArgumentException( update.getUpdateMode().name() );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "not expected" );
}
};
}
public void setJob( IndexPopulationJob job )
{
this.job = job;
}
}
private class NodeDeletingWriter extends IndexPopulator.Adapter
{
private final Map<Long, Object> added = new HashMap<>();
private final Map<Long, Object> removed = new HashMap<>();
private final long nodeToDelete;
private IndexPopulationJob job;
private final int propertyKeyId;
private final Object valueToDelete;
private final int label;
public NodeDeletingWriter( long nodeToDelete, int propertyKeyId, Object valueToDelete, int label )
{
this.nodeToDelete = nodeToDelete;
this.propertyKeyId = propertyKeyId;
this.valueToDelete = valueToDelete;
this.label = label;
}
public void setJob( IndexPopulationJob job )
{
this.job = job;
}
@Override
public void add( long nodeId, Object propertyValue )
{
if ( nodeId == 2 )
{
job.update( remove( nodeToDelete, propertyKeyId, valueToDelete, new long[]{label} ) );
}
added.put( nodeId, propertyValue );
}
@Override
public IndexUpdater newPopulatingUpdater( PropertyAccessor propertyAccessor )
{
return new IndexUpdater()
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch ( update.getUpdateMode() )
{
case ADDED:
case CHANGED:
added.put( update.getNodeId(), update.getValueAfter() );
break;
case REMOVED:
removed.put( update.getNodeId(), update.getValueBefore() );
break;
default:
throw new IllegalArgumentException( update.getUpdateMode().name() );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "not expected" );
}
};
}
}
private ImpermanentGraphDatabase db;
private final Label FIRST = DynamicLabel.label( "FIRST" );
private final Label SECOND = DynamicLabel.label( "SECOND" );
private final String name = "name";
private final String age = "age";
private ThreadToStatementContextBridge ctxProvider;
private NeoStoreIndexStoreView indexStoreView;
private IndexPopulator populator;
private KernelSchemaStateStore stateHolder;
private int labelId;
@Before
public void before() throws Exception
{
db = (ImpermanentGraphDatabase) new TestGraphDatabaseFactory().newImpermanentDatabase();
ctxProvider = db.getDependencyResolver().resolveDependency( ThreadToStatementContextBridge.class );
populator = mock( IndexPopulator.class );
stateHolder = new KernelSchemaStateStore();
indexStoreView = newStoreView();
try ( Transaction tx = db.beginTx() )
{
Statement statement = ctxProvider.instance();
labelId = statement.schemaWriteOperations().labelGetOrCreateForName( FIRST.name() );
statement.schemaWriteOperations().labelGetOrCreateForName( SECOND.name() );
statement.close();
tx.success();
}
}
@After
public void after() throws Exception
{
db.shutdown();
}
private IndexPopulationJob newIndexPopulationJob( Label label, String propertyKey, IndexPopulator populator,
FlippableIndexProxy flipper )
{
return newIndexPopulationJob( label, propertyKey, populator, flipper, indexStoreView,
StringLogger.DEV_NULL );
}
private IndexPopulationJob newIndexPopulationJob( Label label, String propertyKey,
IndexPopulator populator,
FlippableIndexProxy flipper, IndexStoreView storeView,
StringLogger logger )
{
return newIndexPopulationJob( label, propertyKey,
mock( FailedIndexProxyFactory.class ), populator, flipper, storeView, logger );
}
private IndexPopulationJob newIndexPopulationJob( Label label, String propertyKey,
FailedIndexProxyFactory failureDelegateFactory,
IndexPopulator populator,
FlippableIndexProxy flipper, IndexStoreView storeView,
StringLogger logger )
{
IndexDescriptor descriptor;
try ( Transaction tx = db.beginTx() )
{
ReadOperations statement = ctxProvider.instance().readOperations();
descriptor = new IndexDescriptor( statement.labelGetForName( label.name() ),
statement.propertyKeyGetForName( propertyKey ) );
tx.success();
}
flipper.setFlipTarget( mock( IndexProxyFactory.class ) );
return new IndexPopulationJob(
descriptor, PROVIDER_DESCRIPTOR,
format( ":%s(%s)", label.name(), propertyKey ),
failureDelegateFactory,
populator, flipper, storeView,
stateHolder, new SingleLoggingService( logger ) );
}
private long createNode( Map<String, Object> properties, Label... labels )
{
try ( Transaction tx = db.beginTx() )
{
Node node = db.createNode( labels );
for ( Map.Entry<String, Object> property : properties.entrySet() )
{
node.setProperty( property.getKey(), property.getValue() );
}
tx.success();
return node.getId();
}
}
private int getPropertyKeyForName( String name )
{
try ( Transaction tx = db.beginTx() )
{
int result = ctxProvider.instance().readOperations().propertyKeyGetForName( name );
tx.success();
return result;
}
}
private NeoStoreIndexStoreView newStoreView()
{
return new NeoStoreIndexStoreView( mock( LockService.class, RETURNS_MOCKS ),
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class )
.getNeoStoreDataSource().getNeoStore() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexPopulationJobTest.java
|
1,016
|
{
@Override
public boolean visit( NodePropertyUpdate update ) throws IndexPopulationFailedKernelException
{
try
{
populator.add( update.getNodeId(), update.getValueAfter() );
populateFromQueueIfAvailable( update.getNodeId() );
}
catch ( IndexEntryConflictException | IOException conflict )
{
throw new IndexPopulationFailedKernelException( descriptor, indexUserDescription, conflict );
}
return false;
}
});
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexPopulationJob.java
|
1,017
|
{
@Override
public Void call() throws Exception
{
populateFromQueueIfAvailable( Long.MAX_VALUE );
populator.close( true );
updateableSchemaState.clear();
return null;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexPopulationJob.java
|
1,018
|
public class IndexPopulationJob implements Runnable
{
private final IndexStoreView storeView;
private final String indexUserDescription;
// NOTE: unbounded queue expected here
private final Queue<NodePropertyUpdate> queue = new ConcurrentLinkedQueue<>();
private final IndexDescriptor descriptor;
private final FailedIndexProxyFactory failureDelegate;
private final IndexPopulator populator;
private final FlippableIndexProxy flipper;
private final UpdateableSchemaState updateableSchemaState;
private final StringLogger log;
private final CountDownLatch doneSignal = new CountDownLatch( 1 );
private volatile StoreScan<IndexPopulationFailedKernelException> storeScan;
private volatile boolean cancelled;
private final SchemaIndexProvider.Descriptor providerDescriptor;
public IndexPopulationJob(IndexDescriptor descriptor, SchemaIndexProvider.Descriptor providerDescriptor,
String indexUserDescription,
FailedIndexProxyFactory failureDelegateFactory,
IndexPopulator populator, FlippableIndexProxy flipper,
IndexStoreView storeView, UpdateableSchemaState updateableSchemaState,
Logging logging)
{
this.descriptor = descriptor;
this.providerDescriptor = providerDescriptor;
this.populator = populator;
this.flipper = flipper;
this.storeView = storeView;
this.updateableSchemaState = updateableSchemaState;
this.indexUserDescription = indexUserDescription;
this.failureDelegate = failureDelegateFactory;
this.log = logging.getMessagesLog( getClass() );
}
@Override
public void run()
{
String oldThreadName = currentThread().getName();
currentThread().setName( format( "Index populator on %s [runs on: %s]", indexUserDescription, oldThreadName ) );
boolean success = false;
Throwable failureCause = null;
try
{
try
{
log.info( format("Index population started: [%s]", indexUserDescription) );
log.flush();
populator.create();
indexAllNodes();
if ( cancelled )
{
// We remain in POPULATING state
return;
}
Callable<Void> duringFlip = new Callable<Void>()
{
@Override
public Void call() throws Exception
{
populateFromQueueIfAvailable( Long.MAX_VALUE );
populator.close( true );
updateableSchemaState.clear();
return null;
}
};
flipper.flip( duringFlip, failureDelegate );
success = true;
log.info( format("Index population completed. Index is now online: [%s]", indexUserDescription) );
log.flush();
}
catch ( Throwable t )
{
// If the cause of index population failure is a conflict in a (unique) index, the conflict is the
// failure
if ( t instanceof IndexPopulationFailedKernelException )
{
Throwable cause = t.getCause();
if ( cause instanceof IndexEntryConflictException )
{
t = cause;
}
}
// Index conflicts are expected (for unique indexes) so we don't need to log them.
if ( !(t instanceof IndexEntryConflictException) /*TODO: && this is a unique index...*/ )
{
log.error( format("Failed to populate index: [%s]", indexUserDescription), t );
log.flush();
}
// Set failure cause to be stored persistently
failureCause = t;
// The flipper will have already flipped to a failed index context here, but
// it will not include the cause of failure, so we do another flip to a failed
// context that does.
// The reason for having the flipper transition to the failed index context in the first
// place is that we would otherwise introduce a race condition where updates could come
// in to the old context, if something failed in the job we send to the flipper.
flipper.flipTo( new FailedIndexProxy( descriptor, providerDescriptor, indexUserDescription,
populator, failure( t ) ) );
}
finally
{
try
{
if ( !success )
{
if ( failureCause != null )
{
populator.markAsFailed( failure( failureCause ).asString() );
}
populator.close( false );
}
}
catch ( Throwable e )
{
log.error( format("Unable to close failed populator for index: [%s]", indexUserDescription), e );
log.flush();
}
}
}
finally
{
doneSignal.countDown();
currentThread().setName( oldThreadName );
}
}
private void indexAllNodes() throws IndexPopulationFailedKernelException
{
storeScan = storeView.visitNodesWithPropertyAndLabel( descriptor, new Visitor<NodePropertyUpdate,
IndexPopulationFailedKernelException>()
{
@Override
public boolean visit( NodePropertyUpdate update ) throws IndexPopulationFailedKernelException
{
try
{
populator.add( update.getNodeId(), update.getValueAfter() );
populateFromQueueIfAvailable( update.getNodeId() );
}
catch ( IndexEntryConflictException | IOException conflict )
{
throw new IndexPopulationFailedKernelException( descriptor, indexUserDescription, conflict );
}
return false;
}
});
storeScan.run();
try
{
populator.verifyDeferredConstraints( storeView );
}
catch ( Exception conflict )
{
throw new IndexPopulationFailedKernelException( descriptor, indexUserDescription, conflict );
}
}
private void populateFromQueueIfAvailable( final long highestIndexedNodeId )
throws IndexEntryConflictException, IOException
{
if ( !queue.isEmpty() )
{
try ( IndexUpdater updater = populator.newPopulatingUpdater( storeView ) )
{
for ( NodePropertyUpdate update : queue )
{
if ( update.getNodeId() <= highestIndexedNodeId )
{
updater.process( update );
}
}
}
}
}
public Future<Void> cancel()
{
// Stop the population
if ( storeScan != null )
{
cancelled = true;
storeScan.stop();
}
return latchGuardedValue( NO_VALUE, doneSignal );
}
/**
* A transaction happened that produced the given updates. Let this job incorporate its data,
* feeding it to the {@link IndexPopulator}.
*/
public void update( NodePropertyUpdate update )
{
queue.add( update );
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[populator:" + populator + ", descriptor:" + indexUserDescription + "]";
}
public void awaitCompletion() throws InterruptedException
{
doneSignal.await();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexPopulationJob.java
|
1,019
|
{
@Override
public String asString()
{
return failure;
}
@Override
public IndexPopulationFailedKernelException asIndexPopulationFailure(
IndexDescriptor descriptor, String indexUserDescription )
{
return new IndexPopulationFailedKernelException( descriptor, indexUserDescription, failure );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexPopulationFailure.java
|
1,020
|
{
@Override
public String asString()
{
return Exceptions.stringify( failure );
}
@Override
public IndexPopulationFailedKernelException asIndexPopulationFailure(
IndexDescriptor descriptor, String indexUserDescription )
{
return new IndexPopulationFailedKernelException( descriptor, indexUserDescription, failure );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexPopulationFailure.java
|
1,021
|
public abstract class IndexPopulationFailure
{
public abstract String asString();
public abstract IndexPopulationFailedKernelException asIndexPopulationFailure(
IndexDescriptor descriptor, String indexUserDescriptor );
public static IndexPopulationFailure failure( final Throwable failure )
{
return new IndexPopulationFailure()
{
@Override
public String asString()
{
return Exceptions.stringify( failure );
}
@Override
public IndexPopulationFailedKernelException asIndexPopulationFailure(
IndexDescriptor descriptor, String indexUserDescription )
{
return new IndexPopulationFailedKernelException( descriptor, indexUserDescription, failure );
}
};
}
public static IndexPopulationFailure failure( final String failure )
{
return new IndexPopulationFailure()
{
@Override
public String asString()
{
return failure;
}
@Override
public IndexPopulationFailedKernelException asIndexPopulationFailure(
IndexDescriptor descriptor, String indexUserDescription )
{
return new IndexPopulationFailedKernelException( descriptor, indexUserDescription, failure );
}
};
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexPopulationFailure.java
|
1,022
|
public static class GatheringIndexWriter extends IndexAccessor.Adapter
{
private final Set<NodePropertyUpdate> regularUpdates = new HashSet<>();
private final Set<NodePropertyUpdate> recoveredUpdates = new HashSet<>();
private final Set<Long> recoveredNodes = new HashSet<>();
@Override
public IndexUpdater newUpdater( final IndexUpdateMode mode )
{
return new CollectingIndexUpdater()
{
@Override
public void close() throws IOException, IndexEntryConflictException
{
switch (mode)
{
case ONLINE:
regularUpdates.addAll( updates );
break;
case RECOVERY:
recoveredUpdates.addAll( updates );
break;
default:
throw new UnsupportedOperationException( );
}
}
@Override
public void remove( Iterable<Long> nodeIds ) throws IOException
{
for ( Long nodeId : nodeIds )
{
recoveredNodes.add( nodeId );
}
}
};
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRecoveryIT.java
|
1,023
|
{
@Override
public void close() throws IOException, IndexEntryConflictException
{
switch (mode)
{
case ONLINE:
regularUpdates.addAll( updates );
break;
case RECOVERY:
recoveredUpdates.addAll( updates );
break;
default:
throw new UnsupportedOperationException( );
}
}
@Override
public void remove( Iterable<Long> nodeIds ) throws IOException
{
for ( Long nodeId : nodeIds )
{
recoveredNodes.add( nodeId );
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRecoveryIT.java
|
1,024
|
public class IndexRestartIt
{
/* This is somewhat difficult to test since dropping an index while it's populating forces it to be cancelled
* first (and also awaiting cancellation to complete). So this is a best-effort to have the timing as close
* as possible. If this proves to be flaky, remove it right away.
*/
@Test
public void shouldBeAbleToDropIndexWhileItIsPopulating() throws Exception
{
// GIVEN
startDb();
DoubleLatch populationCompletionLatch = provider.installPopulationJobCompletionLatch();
IndexDefinition index = createIndex();
populationCompletionLatch.awaitStart(); // await population job to start
// WHEN
dropIndex( index, populationCompletionLatch );
// THEN
assertThat( getIndexes( db, myLabel ), inTx( db, hasSize( 0 ) ) );
try
{
getIndexState( db, index );
fail( "This index should have been deleted" );
}
catch ( NotFoundException e )
{
assertThat( e.getMessage(), CoreMatchers.containsString( myLabel.name() ) );
}
}
@Test
public void shouldHandleRestartOfOnlineIndex() throws Exception
{
// Given
startDb();
createIndex();
provider.awaitFullyPopulated();
// And Given
stopDb();
provider.setInitialIndexState( ONLINE );
// When
startDb();
// Then
assertThat( getIndexes( db, myLabel ), inTx( db, haveState( db, Schema.IndexState.ONLINE ) ) );
assertEquals( 1, provider.populatorCallCount.get() );
assertEquals( 2, provider.writerCallCount.get() );
}
@Test
public void shouldHandleRestartIndexThatHasNotComeOnlineYet() throws Exception
{
// Given
startDb();
createIndex();
// And Given
stopDb();
provider.setInitialIndexState( POPULATING );
// When
startDb();
assertThat( getIndexes( db, myLabel ), inTx( db, not( haveState( db, Schema.IndexState.FAILED ) ) ) );
assertEquals( 2, provider.populatorCallCount.get() );
}
private GraphDatabaseAPI db;
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private TestGraphDatabaseFactory factory;
private final ControlledPopulationSchemaIndexProvider provider = new ControlledPopulationSchemaIndexProvider();
private final Label myLabel = label( "MyLabel" );
private void startDb()
{
if ( db != null )
{
db.shutdown();
}
db = (GraphDatabaseAPI) factory.newImpermanentDatabase();
}
private void stopDb()
{
if(db != null)
{
db.shutdown();
}
}
@Before
public void before() throws Exception
{
factory = new TestGraphDatabaseFactory();
factory.setFileSystem( fs.get() );
factory.addKernelExtensions( Arrays.<KernelExtensionFactory<?>>asList(
singleInstanceSchemaIndexProviderFactory( "test", provider ) ) );
}
@After
public void after() throws Exception
{
db.shutdown();
}
private IndexDefinition createIndex()
{
Transaction tx = db.beginTx();
IndexDefinition index = db.schema().indexFor( myLabel ).on( "number_of_bananas_owned" ).create();
tx.success();
tx.finish();
return index;
}
private void dropIndex( IndexDefinition index, DoubleLatch populationCompletionLatch )
{
Transaction tx = db.beginTx();
try
{
index.drop();
populationCompletionLatch.finish();
tx.success();
}
finally
{
tx.finish();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexRestartIt.java
|
1,025
|
return new Answer<ResourceIterator<File>>(){
@Override
public ResourceIterator<File> answer( InvocationOnMock invocationOnMock ) throws Throwable
{
return asResourceIterator(iterator( theFile ));
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexingServiceTest.java
|
1,026
|
{
@Override
public PropertyRecord getBefore()
{
return before;
}
@Override
public PropertyRecord getAfter()
{
return after;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_PropertyPhysicalToLogicalConverterTest.java
|
1,027
|
public class PropertyPhysicalToLogicalConverterTest
{
@Test
public void shouldConvertInlinedAddedProperty() throws Exception
{
// GIVEN
long key = 10;
int value = 12345;
PropertyRecord before = propertyRecord();
PropertyRecord after = propertyRecord( property( key, value ) );
// WHEN
NodePropertyUpdate update = single( convert( none, none, change( before, after ) ) );
// THEN
assertEquals( UpdateMode.ADDED, update.getUpdateMode() );
}
@Test
public void shouldConvertInlinedChangedProperty() throws Exception
{
// GIVEN
long key = 10;
int valueBefore = 12341, valueAfter = 738;
PropertyRecord before = propertyRecord( property( key, valueBefore ) );
PropertyRecord after = propertyRecord( property( key, valueAfter ) );
// WHEN
NodePropertyUpdate update = single( convert( none, none, change( before, after ) ) );
// THEN
assertEquals( UpdateMode.CHANGED, update.getUpdateMode() );
}
@Test
public void shouldIgnoreInlinedUnchangedProperty() throws Exception
{
// GIVEN
long key = 10;
int value = 12341;
PropertyRecord before = propertyRecord( property( key, value ) );
PropertyRecord after = propertyRecord( property( key, value ) );
// WHEN
assertEquals( 0, count( convert( none, none, change( before, after ) ) ) );
}
@Test
public void shouldConvertInlinedRemovedProperty() throws Exception
{
// GIVEN
long key = 10;
int value = 12341;
PropertyRecord before = propertyRecord( property( key, value ) );
PropertyRecord after = propertyRecord();
// WHEN
NodePropertyUpdate update = single( convert( none, none, change( before, after ) ) );
// THEN
assertEquals( UpdateMode.REMOVED, update.getUpdateMode() );
}
@Test
public void shouldConvertDynamicAddedProperty() throws Exception
{
// GIVEN
long key = 10;
PropertyRecord before = propertyRecord();
PropertyRecord after = propertyRecord( property( key, longString ) );
// WHEN
NodePropertyUpdate update = single( convert( none, none, change( before, after ) ) );
// THEN
assertEquals( UpdateMode.ADDED, update.getUpdateMode() );
}
@Test
public void shouldConvertDynamicChangedProperty() throws Exception
{
// GIVEN
long key = 10;
PropertyRecord before = propertyRecord( property( key, longString ) );
PropertyRecord after = propertyRecord( property( key, longerString ) );
// WHEN
NodePropertyUpdate update = single( convert( none, none, change( before, after ) ) );
// THEN
assertEquals( UpdateMode.CHANGED, update.getUpdateMode() );
}
@Test
public void shouldConvertDynamicInlinedRemovedProperty() throws Exception
{
// GIVEN
long key = 10;
PropertyRecord before = propertyRecord( property( key, longString ) );
PropertyRecord after = propertyRecord();
// WHEN
NodePropertyUpdate update = single( convert( none, none, change( before, after ) ) );
// THEN
assertEquals( UpdateMode.REMOVED, update.getUpdateMode() );
}
@Test
public void shouldTreatPropertyThatMovedToAnotherRecordAsChange() throws Exception
{
// GIVEN
long key = 12;
String oldValue = "value1";
String newValue = "value two";
PropertyRecordChange movedFrom = change(
propertyRecord( property( key, oldValue ) ),
propertyRecord() );
PropertyRecordChange movedTo = change(
propertyRecord(),
propertyRecord( property( key, newValue ) ) );
// WHEN
NodePropertyUpdate update = single( convert( none, none, movedFrom, movedTo ) );
// THEN
assertEquals( UpdateMode.CHANGED, update.getUpdateMode() );
assertEquals( oldValue, update.getValueBefore() );
assertEquals( newValue, update.getValueAfter() );
}
private PropertyRecord propertyRecord( PropertyBlock... propertyBlocks )
{
PropertyRecord record = new PropertyRecord( 0 );
if ( propertyBlocks != null )
{
record.setInUse( true );
for ( PropertyBlock propertyBlock : propertyBlocks )
{
record.addPropertyBlock( propertyBlock );
}
}
return record;
}
private PropertyBlock property( long key, Object value )
{
PropertyBlock block = new PropertyBlock();
store.encodeValue( block, (int) key, value );
return block;
}
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private PropertyStore store;
private final String longString = "my super looooooooooooooooooooooooooooooooooooooong striiiiiiiiiiiiiiiiiiiiiiing";
private final String longerString = "my super looooooooooooooooooooooooooooooooooooooong striiiiiiiiiiiiiiiiiiiiiiingdd";
private PropertyPhysicalToLogicalConverter converter;
private final long[] none = new long[0];
@Before
public void before() throws Exception
{
StoreFactory storeFactory = new StoreFactory( new Config(), new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL, new DefaultTxHook() );
File storeFile = new File( "propertystore" );
storeFactory.createPropertyStore( storeFile );
store = storeFactory.newPropertyStore( storeFile );
converter = new PropertyPhysicalToLogicalConverter( store );
}
@After
public void after() throws Exception
{
store.close();
}
private Iterable<NodePropertyUpdate> convert( long[] labelsBefore,
long[] labelsAfter, PropertyRecordChange change )
{
return convert( labelsBefore, labelsAfter, new PropertyRecordChange[] {change} );
}
private Iterable<NodePropertyUpdate> convert( long[] labelsBefore,
long[] labelsAfter, PropertyRecordChange... changes )
{
Collection<NodePropertyUpdate> updates = new ArrayList<>();
converter.apply( updates, Iterables.<PropertyRecordChange,PropertyRecordChange>iterable( changes ),
labelsBefore, labelsAfter );
return updates;
}
private PropertyRecordChange change( final PropertyRecord before, final PropertyRecord after )
{
return new PropertyRecordChange()
{
@Override
public PropertyRecord getBefore()
{
return before;
}
@Override
public PropertyRecord getAfter()
{
return after;
}
};
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_PropertyPhysicalToLogicalConverterTest.java
|
1,028
|
public class PropertyPhysicalToLogicalConverter
{
private final PropertyStore propertyStore;
public PropertyPhysicalToLogicalConverter( PropertyStore propertyStore )
{
this.propertyStore = propertyStore;
}
/**
* Converts physical changes to PropertyRecords for a node into logical updates
*
* @param before the state of affected property records for a particular node in a transaction before the changes.
* @param labelsBefore labels that node had before the change.
* @param after the state of affected property records for a particular node in a transaction after the changes.
* @param labelsAfter labels that node has after the change.
* @return logical updates of the physical property record changes.
*/
public void apply( Collection<NodePropertyUpdate> target, Iterable<PropertyRecordChange> changes,
long[] labelsBefore, long[] labelsAfter )
{
Map<Integer, PropertyBlock> beforeMap = new HashMap<>(), afterMap = new HashMap<>();
long nodeId = mapBlocks( changes, beforeMap, afterMap );
for ( int key : union( beforeMap.keySet(), afterMap.keySet() ) )
{
PropertyBlock beforeBlock = beforeMap.get( key );
PropertyBlock afterBlock = afterMap.get( key );
NodePropertyUpdate update = null;
if ( beforeBlock != null && afterBlock != null )
{
// CHANGE
if ( !beforeBlock.hasSameContentsAs( afterBlock ) )
{
Object beforeVal = valueOf( beforeBlock );
Object afterVal = valueOf( afterBlock );
update = NodePropertyUpdate.change( nodeId, key, beforeVal, labelsBefore, afterVal, labelsAfter );
}
}
else
{
// ADD/REMOVE
if ( afterBlock != null )
{
update = NodePropertyUpdate.add( nodeId, key, valueOf( afterBlock ), labelsAfter );
}
else if ( beforeBlock != null )
{
update = NodePropertyUpdate.remove( nodeId, key, valueOf( beforeBlock ), labelsBefore );
}
else
{
throw new IllegalStateException( "Weird, an update with no property value for before or after" );
}
}
if ( update != null)
{
target.add( update );
}
}
}
private <T> Set<T> union( Set<T> first, Set<T> other )
{
Set<T> union = new HashSet<>( first );
union.addAll( other );
return union;
}
private long mapBlocks( Iterable<PropertyRecordChange> changes,
Map<Integer,PropertyBlock> beforeMap, Map<Integer,PropertyBlock> afterMap )
{
long nodeId = -1;
for ( PropertyRecordChange change : changes )
{
nodeId = equalCheck( change.getBefore().getNodeId(), nodeId );
nodeId = equalCheck( change.getAfter().getNodeId(), nodeId );
mapBlocks( change.getBefore(), beforeMap );
mapBlocks( change.getAfter(), afterMap );
}
return nodeId;
}
private long equalCheck( long nodeId, long expectedNodeId )
{
assert expectedNodeId == -1 || nodeId == expectedNodeId : "Node id differs expected " + expectedNodeId + ", but was " + nodeId;
return nodeId;
}
private void mapBlocks( PropertyRecord record, Map<Integer, PropertyBlock> blocks )
{
for ( PropertyBlock block : record.getPropertyBlocks() )
{
blocks.put( block.getKeyIndexId(), block );
}
}
private Object valueOf( PropertyBlock block )
{
if ( block == null )
{
return null;
}
return block.getType().getValue( block, propertyStore );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_PropertyPhysicalToLogicalConverter.java
|
1,029
|
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch( mode )
{
case ONLINE:
job.update( update );
break;
case RECOVERY:
throw new UnsupportedOperationException( "Recovered updates shouldn't reach this place" );
default:
throw new ThisShouldNotHappenError( "Stefan", "Unsupported IndexUpdateMode" );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "Should not remove() from populating index." );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_PopulatingIndexProxy.java
|
1,030
|
public class PopulatingIndexProxy implements IndexProxy
{
private final JobScheduler scheduler;
private final IndexDescriptor descriptor;
private final SchemaIndexProvider.Descriptor providerDescriptor;
private final IndexPopulationJob job;
public PopulatingIndexProxy( JobScheduler scheduler,
final IndexDescriptor descriptor,
final SchemaIndexProvider.Descriptor providerDescriptor,
final FailedIndexProxyFactory failureDelegateFactory,
final IndexPopulator writer,
FlippableIndexProxy flipper,
IndexStoreView storeView, final String indexUserDescription,
UpdateableSchemaState updateableSchemaState, Logging logging )
{
this.scheduler = scheduler;
this.descriptor = descriptor;
this.providerDescriptor = providerDescriptor;
this.job = new IndexPopulationJob( descriptor, providerDescriptor,
indexUserDescription, failureDelegateFactory, writer, flipper, storeView,
updateableSchemaState, logging );
}
@Override
public void start()
{
scheduler.schedule( indexPopulation, job );
}
@Override
public IndexUpdater newUpdater( final IndexUpdateMode mode )
{
return new IndexUpdater()
{
@Override
public void process( NodePropertyUpdate update ) throws IOException, IndexEntryConflictException
{
switch( mode )
{
case ONLINE:
job.update( update );
break;
case RECOVERY:
throw new UnsupportedOperationException( "Recovered updates shouldn't reach this place" );
default:
throw new ThisShouldNotHappenError( "Stefan", "Unsupported IndexUpdateMode" );
}
}
@Override
public void close() throws IOException, IndexEntryConflictException
{
}
@Override
public void remove( Iterable<Long> nodeIds )
{
throw new UnsupportedOperationException( "Should not remove() from populating index." );
}
};
}
@Override
public Future<Void> drop()
{
return job.cancel();
}
@Override
public IndexDescriptor getDescriptor()
{
return descriptor;
}
@Override
public SchemaIndexProvider.Descriptor getProviderDescriptor()
{
return providerDescriptor;
}
@Override
public InternalIndexState getState()
{
return InternalIndexState.POPULATING;
}
@Override
public void force()
{
// Ignored... I think
}
@Override
public Future<Void> close()
{
return job.cancel();
}
@Override
public IndexReader newReader() throws IndexNotFoundKernelException
{
throw new IndexNotFoundKernelException( "Index is still populating: " + job );
}
@Override
public boolean awaitStoreScanCompleted() throws IndexPopulationFailedKernelException, InterruptedException
{
job.awaitCompletion();
return true;
}
@Override
public void activate() throws IndexActivationFailedKernelException
{
throw new IllegalStateException( "Cannot activate index while it is still populating: " + job );
}
@Override
public void validate()
{
throw new IllegalStateException( "Cannot validate index while it is still populating: " + job );
}
@Override
public ResourceIterator<File> snapshotFiles()
{
return emptyIterator();
}
@Override
public IndexPopulationFailure getPopulationFailure() throws IllegalStateException
{
throw new IllegalStateException( this + " is POPULATING" );
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[job:" + job + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_PopulatingIndexProxy.java
|
1,031
|
public class OnlineIndexProxy implements IndexProxy
{
private final IndexDescriptor descriptor;
final IndexAccessor accessor;
private final SchemaIndexProvider.Descriptor providerDescriptor;
public OnlineIndexProxy( IndexDescriptor descriptor, SchemaIndexProvider.Descriptor providerDescriptor,
IndexAccessor accessor )
{
this.descriptor = descriptor;
this.providerDescriptor = providerDescriptor;
this.accessor = accessor;
}
@Override
public void start()
{
}
@Override
public IndexUpdater newUpdater( final IndexUpdateMode mode )
{
return accessor.newUpdater( mode );
}
@Override
public Future<Void> drop() throws IOException
{
accessor.drop();
return VOID;
}
@Override
public IndexDescriptor getDescriptor()
{
return descriptor;
}
@Override
public SchemaIndexProvider.Descriptor getProviderDescriptor()
{
return providerDescriptor;
}
@Override
public InternalIndexState getState()
{
return InternalIndexState.ONLINE;
}
@Override
public void force() throws IOException
{
accessor.force();
}
@Override
public Future<Void> close() throws IOException
{
accessor.close();
return VOID;
}
@Override
public IndexReader newReader()
{
return accessor.newReader();
}
@Override
public boolean awaitStoreScanCompleted() throws IndexPopulationFailedKernelException, InterruptedException
{
return false; // the store scan is already completed
}
@Override
public void activate()
{
// ok, already active
}
@Override
public void validate()
{
// ok, it's online so it's valid
}
@Override
public IndexPopulationFailure getPopulationFailure() throws IllegalStateException
{
throw new IllegalStateException( this + " is ONLINE" );
}
@Override
public ResourceIterator<File> snapshotFiles() throws IOException
{
return accessor.snapshotFiles();
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[accessor:" + accessor + ", descriptor:" + descriptor + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_OnlineIndexProxy.java
|
1,032
|
{
@Override
public void run()
{
for ( NodePropertyUpdate update : updates )
{
visitor.visit( update );
}
}
@Override
public void stop()
{
// throw new UnsupportedOperationException( "not implemented" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexingServiceTest.java
|
1,033
|
private static class DataUpdates implements Answer<StoreScan<RuntimeException>>, Iterable<NodePropertyUpdate>
{
private final NodePropertyUpdate[] updates;
DataUpdates( NodePropertyUpdate[] updates )
{
this.updates = updates;
}
void getsProcessedByStoreScanFrom( IndexStoreView mock )
{
when( mock.visitNodesWithPropertyAndLabel( any( IndexDescriptor.class ), visitor( any( Visitor.class ) ) ) )
.thenAnswer( this );
}
@Override
public StoreScan<RuntimeException> answer( InvocationOnMock invocation ) throws Throwable
{
final Visitor<NodePropertyUpdate, RuntimeException> visitor = visitor( invocation.getArguments()[1] );
return new StoreScan<RuntimeException>()
{
@Override
public void run()
{
for ( NodePropertyUpdate update : updates )
{
visitor.visit( update );
}
}
@Override
public void stop()
{
// throw new UnsupportedOperationException( "not implemented" );
}
};
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Visitor<NodePropertyUpdate, RuntimeException> visitor( Object v )
{
return (Visitor) v;
}
@Override
public Iterator<NodePropertyUpdate> iterator()
{
return new ArrayIterator<>( updates );
}
@Override
public String toString()
{
return Arrays.toString( updates );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexingServiceTest.java
|
1,034
|
return new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
latch.await();
return null;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexingServiceTest.java
|
1,035
|
public class IndexUpdaterMap implements AutoCloseable, Iterable<IndexUpdater>
{
private final IndexUpdateMode indexUpdateMode;
private final IndexMap indexMap;
private final Map<IndexDescriptor, IndexUpdater> updaterMap;
public IndexUpdaterMap( IndexUpdateMode indexUpdateMode, IndexMap indexMap )
{
this.indexUpdateMode = indexUpdateMode;
this.indexMap = indexMap;
this.updaterMap = new HashMap<>();
}
public IndexUpdater getUpdater( IndexDescriptor descriptor )
{
IndexUpdater updater = updaterMap.get( descriptor );
if ( null == updater )
{
IndexProxy indexProxy = indexMap.getIndexProxy( descriptor );
if ( null != indexProxy )
{
updater = indexProxy.newUpdater( indexUpdateMode );
updaterMap.put( descriptor, updater );
}
}
return updater;
}
@Override
public void close() throws UnderlyingStorageException
{
Set<Pair<IndexDescriptor, UnderlyingStorageException>> exceptions = null;
for ( Map.Entry<IndexDescriptor, IndexUpdater> updaterEntry : updaterMap.entrySet() )
{
IndexUpdater updater = updaterEntry.getValue();
try
{
updater.close();
}
catch ( IOException | IndexEntryConflictException e )
{
if ( null == exceptions )
{
exceptions = new HashSet<>();
}
exceptions.add( Pair.of( updaterEntry.getKey(), new UnderlyingStorageException( e ) ) );
}
}
clear();
if ( null != exceptions )
{
throw new MultipleUnderlyingStorageExceptions( exceptions );
}
}
public void clear()
{
updaterMap.clear();
}
public boolean isEmpty()
{
return updaterMap.isEmpty();
}
public int size()
{
return updaterMap.size();
}
@Override
public Iterator<IndexUpdater> iterator()
{
return new PrefetchingIterator<IndexUpdater>()
{
Iterator<IndexDescriptor> descriptors = indexMap.descriptors();
@Override
protected IndexUpdater fetchNextOrNull()
{
if ( descriptors.hasNext() )
{
return getUpdater( descriptors.next() );
}
return null;
}
};
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexUpdaterMap.java
|
1,036
|
public class IndexingServiceTest
{
@Rule
public final LifeRule life = new LifeRule();
private int labelId;
private int propertyKeyId;
private IndexPopulator populator;
private SchemaIndexProvider indexProvider;
private IndexUpdater updater;
private IndexAccessor accessor;
private IndexStoreView storeView;
@Before
public void setUp()
{
labelId = 7;
propertyKeyId = 15;
populator = mock( IndexPopulator.class );
updater = mock( IndexUpdater.class );
indexProvider = mock( SchemaIndexProvider.class );
accessor = mock( IndexAccessor.class );
storeView = mock( IndexStoreView.class );
}
@Test
public void shouldBringIndexOnlineAndFlipOverToIndexAccessor() throws Exception
{
// given
when( accessor.newUpdater( any( IndexUpdateMode.class ) ) ).thenReturn(updater);
IndexingService indexingService = newIndexingServiceWithMockedDependencies( populator, accessor, withData() );
life.start();
indexingService.startIndexes();
// when
indexingService.createIndex( indexRule( 0, labelId, propertyKeyId, PROVIDER_DESCRIPTOR ) );
IndexProxy proxy = indexingService.getProxyForRule( 0 );
verify( populator, timeout( 1000 ) ).close( true );
try (IndexUpdater updater = proxy.newUpdater( IndexUpdateMode.ONLINE ))
{
updater.process( add( 10, "foo" ) );
}
// then
assertEquals( InternalIndexState.ONLINE, proxy.getState() );
InOrder order = inOrder( populator, accessor, updater);
order.verify( populator ).create();
order.verify( populator ).close( true );
order.verify( accessor ).newUpdater( IndexUpdateMode.ONLINE );
order.verify( updater ).process( add( 10, "foo" ) );
order.verify( updater ).close();
}
@Test
public void indexCreationShouldBeIdempotent() throws Exception
{
// given
when( accessor.newUpdater( any( IndexUpdateMode.class ) ) ).thenReturn(updater);
IndexingService indexingService = newIndexingServiceWithMockedDependencies( populator, accessor, withData() );
life.start();
// when
indexingService.createIndex( IndexRule.indexRule( 0, labelId, propertyKeyId, PROVIDER_DESCRIPTOR ) );
indexingService.createIndex( IndexRule.indexRule( 0, labelId, propertyKeyId, PROVIDER_DESCRIPTOR ) );
// We are asserting that the second call to createIndex does not throw an exception.
}
@Test
public void shouldDeliverUpdatesThatOccurDuringPopulationToPopulator() throws Exception
{
// given
when( populator.newPopulatingUpdater( storeView ) ).thenReturn( updater );
CountDownLatch latch = new CountDownLatch( 1 );
doAnswer( afterAwaiting( latch ) ).when( populator ).add( anyLong(), any() );
IndexingService indexingService = newIndexingServiceWithMockedDependencies( populator, accessor, withData(
add( 1, "value1" )
) );
life.start();
indexingService.startIndexes();
// when
indexingService.createIndex( indexRule( 0, labelId, propertyKeyId, PROVIDER_DESCRIPTOR ) );
IndexProxy proxy = indexingService.getProxyForRule( 0 );
assertEquals( InternalIndexState.POPULATING, proxy.getState() );
try (IndexUpdater updater = proxy.newUpdater( IndexUpdateMode.ONLINE ))
{
updater.process( add( 2, "value2" ) );
}
latch.countDown();
verify( populator, timeout( 1000 ) ).close( true );
// then
assertEquals( InternalIndexState.ONLINE, proxy.getState() );
InOrder order = inOrder( populator, accessor, updater);
order.verify( populator ).create();
order.verify( populator ).add( 1, "value1" );
// this is invoked from indexAllNodes(),
// empty because the id we added (2) is bigger than the one we indexed (1)
order.verify( populator ).newPopulatingUpdater( storeView );
order.verify( updater ).close();
order.verify( populator ).verifyDeferredConstraints( storeView );
order.verify( populator ).newPopulatingUpdater( storeView );
order.verify( updater ).process( add( 2, "value2" ) );
order.verify( updater ).close();
order.verify( populator ).close( true );
verifyNoMoreInteractions(updater);
verifyNoMoreInteractions( populator );
verifyZeroInteractions( accessor );
}
@Test
public void shouldStillReportInternalIndexStateAsPopulatingWhenConstraintIndexIsDonePopulating() throws Exception
{
// given
when( accessor.newUpdater( any( IndexUpdateMode.class ) ) ).thenReturn(updater);
IndexingService indexingService = newIndexingServiceWithMockedDependencies( populator, accessor, withData() );
life.start();
indexingService.startIndexes();
// when
indexingService.createIndex( IndexRule.constraintIndexRule( 0, labelId, propertyKeyId, PROVIDER_DESCRIPTOR,
null ) );
IndexProxy proxy = indexingService.getProxyForRule( 0 );
verify( populator, timeout( 1000 ) ).close( true );
try (IndexUpdater updater = proxy.newUpdater( IndexUpdateMode.ONLINE ))
{
updater.process( add( 10, "foo" ) );
}
// then
assertEquals( InternalIndexState.POPULATING, proxy.getState() );
InOrder order = inOrder( populator, accessor, updater);
order.verify( populator ).create();
order.verify( populator ).close( true );
order.verify( accessor ).newUpdater( IndexUpdateMode.ONLINE );
order.verify(updater).process( add( 10, "foo") );
order.verify(updater).close();
}
@Test
public void shouldBringConstraintIndexOnlineWhenExplicitlyToldTo() throws Exception
{
// given
IndexingService indexingService = newIndexingServiceWithMockedDependencies( populator, accessor, withData() );
life.start();
indexingService.startIndexes();
// when
indexingService.createIndex( IndexRule.constraintIndexRule( 0, labelId, propertyKeyId, PROVIDER_DESCRIPTOR,
null ) );
IndexProxy proxy = indexingService.getProxyForRule( 0 );
indexingService.activateIndex( 0 );
// then
assertEquals( ONLINE, proxy.getState() );
InOrder order = inOrder( populator, accessor );
order.verify( populator ).create();
order.verify( populator ).close( true );
}
@Test
public void shouldLogIndexStateOnInit() throws Exception
{
// given
TestLogger logger = new TestLogger();
SchemaIndexProvider provider = mock( SchemaIndexProvider.class );
when( provider.getProviderDescriptor() ).thenReturn( PROVIDER_DESCRIPTOR );
SchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap( provider );
TokenNameLookup mockLookup = mock( TokenNameLookup.class );
IndexingService indexingService = new IndexingService(
mock( JobScheduler.class ),
providerMap,
mock( IndexStoreView.class ),
mockLookup,
mock( UpdateableSchemaState.class ),
mockLogging( logger ), IndexingService.NO_MONITOR );
IndexRule onlineIndex = indexRule( 1, 1, 1, PROVIDER_DESCRIPTOR );
IndexRule populatingIndex = indexRule( 2, 1, 2, PROVIDER_DESCRIPTOR );
IndexRule failedIndex = indexRule( 3, 2, 2, PROVIDER_DESCRIPTOR );
when( provider.getInitialState( onlineIndex.getId() ) ).thenReturn( ONLINE );
when( provider.getInitialState( populatingIndex.getId() ) ).thenReturn( InternalIndexState.POPULATING );
when( provider.getInitialState( failedIndex.getId() ) ).thenReturn( InternalIndexState.FAILED );
when(mockLookup.labelGetName( 1 )).thenReturn( "LabelOne" );
when(mockLookup.labelGetName( 2 )).thenReturn( "LabelTwo" );
when(mockLookup.propertyKeyGetName( 1 )).thenReturn( "propertyOne" );
when(mockLookup.propertyKeyGetName( 2 )).thenReturn( "propertyTwo" );
// when
indexingService.initIndexes( asList( onlineIndex, populatingIndex, failedIndex ).iterator() );
// then
logger.assertExactly(
info( "IndexingService.initIndexes: index on :LabelOne(propertyOne) is ONLINE" ),
info( "IndexingService.initIndexes: index on :LabelOne(propertyTwo) is POPULATING" ),
info( "IndexingService.initIndexes: index on :LabelTwo(propertyTwo) is FAILED" )
);
}
@Test
public void shouldLogIndexStateOnStart() throws Exception
{
// given
TestLogger logger = new TestLogger();
SchemaIndexProvider provider = mock( SchemaIndexProvider.class );
when( provider.getProviderDescriptor() ).thenReturn( PROVIDER_DESCRIPTOR );
SchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap( provider );
TokenNameLookup mockLookup = mock( TokenNameLookup.class );
IndexingService indexingService = new IndexingService(
mock( JobScheduler.class ),
providerMap,
mock( IndexStoreView.class ),
mockLookup,
mock( UpdateableSchemaState.class ),
mockLogging( logger ), IndexingService.NO_MONITOR );
IndexRule onlineIndex = indexRule( 1, 1, 1, PROVIDER_DESCRIPTOR );
IndexRule populatingIndex = indexRule( 2, 1, 2, PROVIDER_DESCRIPTOR );
IndexRule failedIndex = indexRule( 3, 2, 2, PROVIDER_DESCRIPTOR );
when( provider.getInitialState( onlineIndex.getId() ) ).thenReturn( ONLINE );
when( provider.getInitialState( populatingIndex.getId() ) ).thenReturn( InternalIndexState.POPULATING );
when( provider.getInitialState( failedIndex.getId() ) ).thenReturn( InternalIndexState.FAILED );
indexingService.initIndexes( asList( onlineIndex, populatingIndex, failedIndex ).iterator() );
when(mockLookup.labelGetName( 1 )).thenReturn( "LabelOne" );
when(mockLookup.labelGetName( 2 )).thenReturn( "LabelTwo" );
when(mockLookup.propertyKeyGetName( 1 )).thenReturn( "propertyOne" );
when(mockLookup.propertyKeyGetName( 2 )).thenReturn( "propertyTwo" );
logger.clear();
// when
indexingService.startIndexes();
// then
verify( provider ).getPopulationFailure( 3 );
logger.assertAtLeastOnce(
info( "IndexingService.start: index on :LabelOne(propertyOne) is ONLINE" ) );
logger.assertAtLeastOnce(
info( "IndexingService.start: index on :LabelOne(propertyTwo) is POPULATING" ) );
logger.assertAtLeastOnce(
info( "IndexingService.start: index on :LabelTwo(propertyTwo) is FAILED" ) );
}
@Test
public void shouldFailToStartIfMissingIndexProvider() throws Exception
{
// GIVEN an indexing service that has a schema index provider X
IndexingService indexing = newIndexingServiceWithMockedDependencies(
mock( IndexPopulator.class ), mock( IndexAccessor.class ),
new DataUpdates( new NodePropertyUpdate[0] ) );
String otherProviderKey = "something-completely-different";
SchemaIndexProvider.Descriptor otherDescriptor = new SchemaIndexProvider.Descriptor(
otherProviderKey, "no-version" );
IndexRule rule = indexRule( 1, 2, 3, otherDescriptor );
// WHEN trying to start up and initialize it with an index from provider Y
try
{
indexing.initIndexes( iterator( rule ) );
fail( "initIndexes with mismatching index provider should fail" );
}
catch ( IllegalArgumentException e )
{ // THEN starting up should fail
assertThat( e.getMessage(), containsString( "existing index" ) );
assertThat( e.getMessage(), containsString( otherProviderKey ) );
}
}
@Test
public void shouldSnapshotOnlineIndexes() throws Exception
{
// GIVEN
IndexAccessor indexAccessor = mock(IndexAccessor.class);
IndexingService indexing = newIndexingServiceWithMockedDependencies(
mock( IndexPopulator.class ), indexAccessor,
new DataUpdates( new NodePropertyUpdate[0] ) );
int indexId = 1;
int indexId2 = 2;
File theFile = new File( "Blah" );
IndexRule rule1 = indexRule( indexId, 2, 3, PROVIDER_DESCRIPTOR );
IndexRule rule2 = indexRule( indexId2, 4, 5, PROVIDER_DESCRIPTOR );
when( indexAccessor.snapshotFiles()).thenAnswer( newResourceIterator( theFile ) );
when( indexProvider.getInitialState( indexId ) ).thenReturn( ONLINE );
when( indexProvider.getInitialState( indexId2 ) ).thenReturn( ONLINE );
indexing.initIndexes( iterator(rule1, rule2) );
life.start();
// WHEN
ResourceIterator<File> files = indexing.snapshotStoreFiles();
// THEN
// We get a snapshot per online index
assertThat( asCollection( files ), equalTo( asCollection( iterator( theFile, theFile ) ) ) );
}
@Test
public void shouldNotSnapshotPopulatingIndexes() throws Exception
{
// GIVEN
CountDownLatch populatorLatch = new CountDownLatch(1);
IndexAccessor indexAccessor = mock(IndexAccessor.class);
IndexingService indexing = newIndexingServiceWithMockedDependencies(
populator, indexAccessor,
new DataUpdates( new NodePropertyUpdate[0] ) );
int indexId = 1;
int indexId2 = 2;
File theFile = new File( "Blah" );
IndexRule rule1 = indexRule( indexId, 2, 3, PROVIDER_DESCRIPTOR );
IndexRule rule2 = indexRule( indexId2, 4, 5, PROVIDER_DESCRIPTOR );
doAnswer( waitForLatch( populatorLatch ) ).when( populator ).create();
when(indexAccessor.snapshotFiles()).thenAnswer( newResourceIterator( theFile ) );
when( indexProvider.getInitialState( indexId ) ).thenReturn( POPULATING );
when( indexProvider.getInitialState( indexId2 ) ).thenReturn( ONLINE );
indexing.initIndexes( iterator(rule1, rule2) );
life.start();
indexing.startIndexes();
// WHEN
ResourceIterator<File> files = indexing.snapshotStoreFiles();
populatorLatch.countDown(); // only now, after the snapshot, is the population job allowed to finish
// THEN
// We get a snapshot from the online index, but no snapshot from the populating one
assertThat( asCollection( files ), equalTo( asCollection( iterator( theFile ) ) ) );
}
@Test
public void shouldIgnoreActivateCallDuringRecovery() throws Exception
{
// given
IndexingService indexingService = newIndexingServiceWithMockedDependencies( populator, accessor, withData() );
life.start();
// when
indexingService.activateIndex( 0 );
// then no exception should be thrown.
}
private Answer waitForLatch( final CountDownLatch latch ) {
return new Answer() {
@Override
public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
latch.await();
return null;
}
};
}
private Answer<ResourceIterator<File>> newResourceIterator( final File theFile )
{
return new Answer<ResourceIterator<File>>(){
@Override
public ResourceIterator<File> answer( InvocationOnMock invocationOnMock ) throws Throwable
{
return asResourceIterator(iterator( theFile ));
}
};
}
private static Logging mockLogging( StringLogger logger )
{
Logging logging = mock( Logging.class );
when( logging.getMessagesLog( any( Class.class ) ) ).thenReturn( logger );
return logging;
}
private NodePropertyUpdate add( long nodeId, Object propertyValue )
{
return NodePropertyUpdate.add( nodeId, propertyKeyId, propertyValue, new long[]{labelId} );
}
private IndexingService newIndexingServiceWithMockedDependencies( IndexPopulator populator,
IndexAccessor accessor,
DataUpdates data ) throws IOException
{
StringLogger logger = mock( StringLogger.class );
UpdateableSchemaState schemaState = mock( UpdateableSchemaState.class );
when( indexProvider.getProviderDescriptor() ).thenReturn( PROVIDER_DESCRIPTOR );
when( indexProvider.getPopulator( anyLong(), any( IndexDescriptor.class ), any( IndexConfiguration.class ) ) ).thenReturn( populator );
data.getsProcessedByStoreScanFrom( storeView );
when( indexProvider.getOnlineAccessor( anyLong(), any( IndexConfiguration.class ) ) ).thenReturn( accessor );
return life.add( new IndexingService(
life.add( new Neo4jJobScheduler( logger ) ), new DefaultSchemaIndexProviderMap( indexProvider ),
storeView, mock( TokenNameLookup.class ), schemaState, mockLogging( logger ), IndexingService.NO_MONITOR ) );
}
private DataUpdates withData( NodePropertyUpdate... updates )
{
return new DataUpdates( updates );
}
private static class DataUpdates implements Answer<StoreScan<RuntimeException>>, Iterable<NodePropertyUpdate>
{
private final NodePropertyUpdate[] updates;
DataUpdates( NodePropertyUpdate[] updates )
{
this.updates = updates;
}
void getsProcessedByStoreScanFrom( IndexStoreView mock )
{
when( mock.visitNodesWithPropertyAndLabel( any( IndexDescriptor.class ), visitor( any( Visitor.class ) ) ) )
.thenAnswer( this );
}
@Override
public StoreScan<RuntimeException> answer( InvocationOnMock invocation ) throws Throwable
{
final Visitor<NodePropertyUpdate, RuntimeException> visitor = visitor( invocation.getArguments()[1] );
return new StoreScan<RuntimeException>()
{
@Override
public void run()
{
for ( NodePropertyUpdate update : updates )
{
visitor.visit( update );
}
}
@Override
public void stop()
{
// throw new UnsupportedOperationException( "not implemented" );
}
};
}
@SuppressWarnings({ "unchecked", "rawtypes" })
private static Visitor<NodePropertyUpdate, RuntimeException> visitor( Object v )
{
return (Visitor) v;
}
@Override
public Iterator<NodePropertyUpdate> iterator()
{
return new ArrayIterator<>( updates );
}
@Override
public String toString()
{
return Arrays.toString( updates );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexingServiceTest.java
|
1,037
|
public static abstract class MonitorAdapter implements Monitor
{
@Override
public void appliedRecoveredData( Iterable<NodePropertyUpdate> updates )
{ // Do nothing
}
@Override
public void applyingRecoveredData( Collection<Long> nodeIds )
{ // Do nothing
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexingService.java
|
1,038
|
{
@Override
public IndexProxy create()
{
try
{
OnlineIndexProxy onlineProxy = new OnlineIndexProxy(
descriptor, providerDescriptor,
getOnlineAccessorFromProvider( providerDescriptor, ruleId,
new IndexConfiguration( constraint ) ) );
if ( constraint )
{
return new TentativeConstraintIndexProxy( flipper, onlineProxy );
}
return onlineProxy;
}
catch ( IOException e )
{
return
createAndStartFailedIndexProxy( ruleId, descriptor, providerDescriptor, constraint, failure( e ) );
}
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexingService.java
|
1,039
|
{
@Override
public void accept( Long indexId, IndexProxy indexProxy )
{
InternalIndexState state = indexProxy.getState();
logger.info( String.format( "IndexingService.start: index on %s is %s",
indexProxy.getDescriptor().userDescription( tokenNameLookup ), state.name() ) );
switch ( state )
{
case ONLINE:
// Don't do anything, index is ok.
break;
case POPULATING:
// Remember for rebuilding
rebuildingDescriptors.put( indexId, getIndexProxyDescriptors( indexProxy ) );
break;
case FAILED:
// Don't do anything, the user needs to drop the index and re-create
break;
}
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexingService.java
|
1,040
|
{
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexingService.java
|
1,041
|
public class IndexingService extends LifecycleAdapter
{
private final IndexMapReference indexMapReference = new IndexMapReference();
private final JobScheduler scheduler;
private final SchemaIndexProviderMap providerMap;
private final IndexStoreView storeView;
private final TokenNameLookup tokenNameLookup;
private final Logging logging;
private final StringLogger logger;
private final UpdateableSchemaState updateableSchemaState;
private final Set<Long> recoveredNodeIds = new HashSet<>();
private final Monitor monitor;
enum State
{
NOT_STARTED,
STARTING,
RUNNING,
STOPPED
}
public interface Monitor
{
void applyingRecoveredData( Collection<Long> nodeIds );
void appliedRecoveredData( Iterable<NodePropertyUpdate> updates );
}
public static abstract class MonitorAdapter implements Monitor
{
@Override
public void appliedRecoveredData( Iterable<NodePropertyUpdate> updates )
{ // Do nothing
}
@Override
public void applyingRecoveredData( Collection<Long> nodeIds )
{ // Do nothing
}
}
public static final Monitor NO_MONITOR = new MonitorAdapter()
{
};
private volatile State state = State.NOT_STARTED;
public IndexingService( JobScheduler scheduler,
SchemaIndexProviderMap providerMap,
IndexStoreView storeView,
TokenNameLookup tokenNameLookup,
UpdateableSchemaState updateableSchemaState,
Logging logging, Monitor monitor )
{
this.scheduler = scheduler;
this.providerMap = providerMap;
this.storeView = storeView;
this.logging = logging;
this.monitor = monitor;
this.logger = logging.getMessagesLog( getClass() );
this.updateableSchemaState = updateableSchemaState;
this.tokenNameLookup = tokenNameLookup;
if ( providerMap == null || providerMap.getDefaultProvider() == null )
{
// For now
throw new IllegalStateException( "You cannot run the database without an index provider, " +
"please make sure that a valid provider (subclass of " + SchemaIndexProvider.class.getName() +
") is on your classpath." );
}
}
/**
* Called while the database starts up, before recovery.
*
* @param indexRules Known index rules before recovery.
*/
public void initIndexes( Iterator<IndexRule> indexRules )
{
IndexMap indexMap = indexMapReference.getIndexMapCopy();
for ( IndexRule indexRule : loop( indexRules ) )
{
IndexProxy indexProxy;
long indexId = indexRule.getId();
IndexDescriptor descriptor = createDescriptor( indexRule );
SchemaIndexProvider.Descriptor providerDescriptor = indexRule.getProviderDescriptor();
SchemaIndexProvider provider = providerMap.apply( providerDescriptor );
InternalIndexState initialState = provider.getInitialState( indexId );
logger.info( format( "IndexingService.initIndexes: index on %s is %s",
descriptor.userDescription( tokenNameLookup ), initialState ) );
boolean constraint = indexRule.isConstraintIndex();
switch ( initialState )
{
case ONLINE:
indexProxy =
createAndStartOnlineIndexProxy( indexId, descriptor, providerDescriptor, constraint );
break;
case POPULATING:
// The database was shut down during population, or a crash has occurred, or some other sad thing.
indexProxy = createAndStartRecoveringIndexProxy( descriptor, providerDescriptor );
break;
case FAILED:
IndexPopulationFailure failure = failure( provider.getPopulationFailure( indexId ) );
indexProxy =
createAndStartFailedIndexProxy( indexId, descriptor, providerDescriptor, constraint, failure );
break;
default:
throw new IllegalArgumentException( "" + initialState );
}
indexMap.putIndexProxy( indexId, indexProxy );
}
indexMapReference.setIndexMap( indexMap );
}
// Recovery semantics: This is to be called after initIndexes, and after the database has run recovery.
public void startIndexes() throws IOException
{
state = State.STARTING;
applyRecoveredUpdates();
IndexMap indexMap = indexMapReference.getIndexMapCopy();
final Map<Long, Pair<IndexDescriptor, SchemaIndexProvider.Descriptor>> rebuildingDescriptors = new HashMap<>();
// Find all indexes that are not already online, do not require rebuilding, and create them
indexMap.foreachIndexProxy( new BiConsumer<Long, IndexProxy>()
{
@Override
public void accept( Long indexId, IndexProxy indexProxy )
{
InternalIndexState state = indexProxy.getState();
logger.info( String.format( "IndexingService.start: index on %s is %s",
indexProxy.getDescriptor().userDescription( tokenNameLookup ), state.name() ) );
switch ( state )
{
case ONLINE:
// Don't do anything, index is ok.
break;
case POPULATING:
// Remember for rebuilding
rebuildingDescriptors.put( indexId, getIndexProxyDescriptors( indexProxy ) );
break;
case FAILED:
// Don't do anything, the user needs to drop the index and re-create
break;
}
}
} );
// Drop placeholder proxies for indexes that need to be rebuilt
dropRecoveringIndexes( indexMap, rebuildingDescriptors );
// Rebuild indexes by recreating and repopulating them
for ( Map.Entry<Long, Pair<IndexDescriptor, SchemaIndexProvider.Descriptor>> entry :
rebuildingDescriptors.entrySet() )
{
long indexId = entry.getKey();
Pair<IndexDescriptor, SchemaIndexProvider.Descriptor> descriptors = entry.getValue();
IndexDescriptor indexDescriptor = descriptors.first();
SchemaIndexProvider.Descriptor providerDescriptor = descriptors.other();
/*
* Passing in "false" for unique here may seem surprising, and.. well, yes, it is, I was surprised too.
* However, it is actually perfectly safe, because whenever we have constraint indexes here, they will
* be in a state where they didn't finish populating, and despite the fact that we re-create them here,
* they will get dropped as soon as recovery is completed by the constraint system.
*/
IndexProxy indexProxy =
createAndStartPopulatingIndexProxy( indexId, indexDescriptor, providerDescriptor, false );
indexMap.putIndexProxy( indexId, indexProxy );
}
indexMapReference.setIndexMap( indexMap );
state = State.RUNNING;
}
@Override
public void stop()
{
state = State.STOPPED;
closeAllIndexes();
}
public IndexProxy getProxyForRule( long indexId ) throws IndexNotFoundKernelException
{
IndexProxy indexProxy = indexMapReference.getIndexProxy( indexId );
if ( indexProxy == null )
{
throw new IndexNotFoundKernelException( "No index with id " + indexId + " exists." );
}
return indexProxy;
}
/*
* Creates an index.
*
* This code is called from the transaction infrastructure during transaction commits, which means that
* it is *vital* that it is stable, and handles errors very well. Failing here means that the entire db
* will shut down.
*/
public void createIndex( IndexRule rule )
{
IndexMap indexMap = indexMapReference.getIndexMapCopy();
long ruleId = rule.getId();
IndexProxy index = indexMap.getIndexProxy( ruleId );
if (index != null)
{
// We already have this index
return;
}
final IndexDescriptor descriptor = createDescriptor( rule );
SchemaIndexProvider.Descriptor providerDescriptor = rule.getProviderDescriptor();
boolean constraint = rule.isConstraintIndex();
if ( state == State.RUNNING )
{
try
{
index = createAndStartPopulatingIndexProxy( ruleId, descriptor, providerDescriptor, constraint );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
else
{
index = createAndStartRecoveringIndexProxy( descriptor, providerDescriptor );
}
indexMap.putIndexProxy( rule.getId(), index );
indexMapReference.setIndexMap( indexMap );
}
private String indexUserDescription( final IndexDescriptor descriptor,
final SchemaIndexProvider.Descriptor providerDescriptor )
{
String userDescription = descriptor.userDescription( tokenNameLookup );
return String.format( "%s [provider: %s]", userDescription, providerDescriptor.toString() );
}
public void updateIndexes( IndexUpdates updates )
{
if ( state == State.RUNNING )
{
try ( IndexUpdaterMap updaterMap = indexMapReference.getIndexUpdaterMap( IndexUpdateMode.ONLINE ) )
{
applyUpdates( updates, updaterMap );
}
}
else
{
if( state == State.NOT_STARTED )
{
recoveredNodeIds.addAll( updates.changedNodeIds() );
}
else
{
// This is a temporary measure to resolve a corruption bug. We believe that it's caused by stray
// HA transactions, and we know that this measure will fix it. It appears, however, that the correct
// fix will be, as it is for several other issues, to modify the system to allow us to kill running
// transactions before state switches.
throw new IllegalStateException( "Cannot queue index updates while index service is " + state );
}
}
}
protected void applyRecoveredUpdates() throws IOException
{
logger.debug( "Applying recovered updates: " + recoveredNodeIds );
monitor.applyingRecoveredData( recoveredNodeIds );
if ( !recoveredNodeIds.isEmpty() )
{
try ( IndexUpdaterMap updaterMap = indexMapReference.getIndexUpdaterMap( IndexUpdateMode.RECOVERY ) )
{
for ( IndexUpdater updater : updaterMap )
{
updater.remove( recoveredNodeIds );
}
for ( long nodeId : recoveredNodeIds )
{
Iterable<NodePropertyUpdate> updates = storeView.nodeAsUpdates( nodeId );
applyUpdates( updates, updaterMap );
monitor.appliedRecoveredData( updates );
}
}
}
recoveredNodeIds.clear();
}
private void applyUpdates( Iterable<NodePropertyUpdate> updates, IndexUpdaterMap updaterMap )
{
for ( NodePropertyUpdate update : updates )
{
int propertyKeyId = update.getPropertyKeyId();
switch ( update.getUpdateMode() )
{
case ADDED:
for ( int len = update.getNumberOfLabelsAfter(), i = 0; i < len; i++ )
{
processUpdateIfIndexExists( updaterMap, update, propertyKeyId, update.getLabelAfter( i ) );
}
break;
case REMOVED:
for ( int len = update.getNumberOfLabelsBefore(), i = 0; i < len; i++ )
{
processUpdateIfIndexExists( updaterMap, update, propertyKeyId, update.getLabelBefore( i ) );
}
break;
case CHANGED:
int lenBefore = update.getNumberOfLabelsBefore();
int lenAfter = update.getNumberOfLabelsAfter();
for ( int i = 0, j = 0; i < lenBefore && j < lenAfter; )
{
int labelBefore = update.getLabelBefore( i );
int labelAfter = update.getLabelAfter( j );
if ( labelBefore == labelAfter )
{
processUpdateIfIndexExists( updaterMap, update, propertyKeyId, labelAfter );
i++;
j++;
}
else
{
if ( labelBefore < labelAfter )
{
i++;
}
else /* labelBefore > labelAfter */
{
j++;
}
}
}
break;
}
}
}
private void processUpdateIfIndexExists( IndexUpdaterMap updaterMap, NodePropertyUpdate update,
int propertyKeyId, int labelId )
{
IndexDescriptor descriptor = new IndexDescriptor( labelId, propertyKeyId );
try
{
IndexUpdater updater = updaterMap.getUpdater( descriptor );
if ( null != updater )
{
updater.process( update );
}
}
catch ( IOException | IndexEntryConflictException e )
{
throw new UnderlyingStorageException( e );
}
}
public void dropIndex( IndexRule rule )
{
long indexId = rule.getId();
IndexProxy index = indexMapReference.removeIndexProxy( indexId );
if ( state == State.RUNNING )
{
assert index != null : "Index " + rule + " doesn't exists";
try
{
Future<Void> dropFuture = index.drop();
awaitIndexFuture( dropFuture );
}
catch ( Exception e )
{
throw launderedException( e );
}
}
}
private IndexProxy createAndStartPopulatingIndexProxy( final long ruleId,
final IndexDescriptor descriptor,
final SchemaIndexProvider.Descriptor providerDescriptor,
final boolean constraint ) throws IOException
{
final FlippableIndexProxy flipper = new FlippableIndexProxy();
// TODO: This is here because there is a circular dependency from PopulatingIndexProxy to FlippableIndexProxy
final String indexUserDescription = indexUserDescription( descriptor, providerDescriptor );
IndexPopulator populator =
getPopulatorFromProvider( providerDescriptor, ruleId, descriptor, new IndexConfiguration( constraint ) );
FailedIndexProxyFactory failureDelegateFactory =
new FailedPopulatingIndexProxyFactory( descriptor, providerDescriptor, populator, indexUserDescription );
PopulatingIndexProxy populatingIndex =
new PopulatingIndexProxy( scheduler, descriptor, providerDescriptor,
failureDelegateFactory, populator, flipper, storeView,
indexUserDescription, updateableSchemaState, logging );
flipper.flipTo( populatingIndex );
// Prepare for flipping to online mode
flipper.setFlipTarget( new IndexProxyFactory()
{
@Override
public IndexProxy create()
{
try
{
OnlineIndexProxy onlineProxy = new OnlineIndexProxy(
descriptor, providerDescriptor,
getOnlineAccessorFromProvider( providerDescriptor, ruleId,
new IndexConfiguration( constraint ) ) );
if ( constraint )
{
return new TentativeConstraintIndexProxy( flipper, onlineProxy );
}
return onlineProxy;
}
catch ( IOException e )
{
return
createAndStartFailedIndexProxy( ruleId, descriptor, providerDescriptor, constraint, failure( e ) );
}
}
} );
IndexProxy result = contractCheckedProxy( flipper, false );
result.start();
return result;
}
private IndexProxy createAndStartOnlineIndexProxy( long ruleId,
IndexDescriptor descriptor,
SchemaIndexProvider.Descriptor providerDescriptor,
boolean unique )
{
// TODO Hook in version verification/migration calls to the SchemaIndexProvider here
try
{
IndexAccessor onlineAccessor = getOnlineAccessorFromProvider( providerDescriptor, ruleId,
new IndexConfiguration( unique ) );
IndexProxy result = new OnlineIndexProxy( descriptor, providerDescriptor, onlineAccessor );
result = contractCheckedProxy( result, true );
return result;
}
catch ( IOException e )
{
return createAndStartFailedIndexProxy( ruleId, descriptor, providerDescriptor, unique, failure( e ) );
}
}
private IndexProxy createAndStartFailedIndexProxy( long ruleId,
IndexDescriptor descriptor,
SchemaIndexProvider.Descriptor providerDescriptor,
boolean unique,
IndexPopulationFailure populationFailure )
{
IndexPopulator indexPopulator = getPopulatorFromProvider( providerDescriptor, ruleId,
descriptor, new IndexConfiguration( unique ) );
String indexUserDescription = indexUserDescription(descriptor, providerDescriptor);
IndexProxy result =
new FailedIndexProxy( descriptor, providerDescriptor, indexUserDescription,
indexPopulator, populationFailure );
result = contractCheckedProxy( result, true );
return result;
}
private IndexProxy createAndStartRecoveringIndexProxy( IndexDescriptor descriptor,
SchemaIndexProvider.Descriptor providerDescriptor )
{
IndexProxy result = new RecoveringIndexProxy( descriptor, providerDescriptor );
result = contractCheckedProxy( result, true );
return result;
}
private IndexPopulator getPopulatorFromProvider( SchemaIndexProvider.Descriptor providerDescriptor, long ruleId,
IndexDescriptor descriptor, IndexConfiguration config )
{
SchemaIndexProvider indexProvider = providerMap.apply( providerDescriptor );
return indexProvider.getPopulator( ruleId, descriptor, config );
}
private IndexAccessor getOnlineAccessorFromProvider( SchemaIndexProvider.Descriptor providerDescriptor,
long ruleId, IndexConfiguration config ) throws IOException
{
SchemaIndexProvider indexProvider = providerMap.apply( providerDescriptor );
return indexProvider.getOnlineAccessor( ruleId, config );
}
private IndexProxy contractCheckedProxy( IndexProxy result, boolean started )
{
result = new ContractCheckingIndexProxy( result, started );
return result;
}
private IndexDescriptor createDescriptor( IndexRule rule )
{
return new IndexDescriptor( rule.getLabel(), rule.getPropertyKey() );
}
private void awaitIndexFuture( Future<Void> future ) throws Exception
{
try
{
future.get( 1, MINUTES );
}
catch ( InterruptedException e )
{
Thread.interrupted();
throw e;
}
}
private void dropRecoveringIndexes(
IndexMap indexMap, Map<Long, Pair<IndexDescriptor,SchemaIndexProvider.Descriptor>> recoveringIndexes )
throws IOException
{
for ( long indexId : recoveringIndexes.keySet() )
{
IndexProxy indexProxy = indexMap.removeIndexProxy( indexId );
indexProxy.drop();
}
}
public void activateIndex( long indexId ) throws
IndexNotFoundKernelException, IndexActivationFailedKernelException, IndexPopulationFailedKernelException
{
try
{
if ( state == State.RUNNING ) // don't do this during recovery.
{
IndexProxy index = getProxyForRule( indexId );
index.awaitStoreScanCompleted();
index.activate();
}
}
catch ( InterruptedException e )
{
Thread.interrupted();
throw new IndexActivationFailedKernelException( e, "Unable to activate index, thread was interrupted." );
}
}
public void validateIndex( long indexId ) throws IndexNotFoundKernelException, ConstraintVerificationFailedKernelException, IndexPopulationFailedKernelException
{
getProxyForRule( indexId ).validate();
}
public void flushAll()
{
for ( IndexProxy index : indexMapReference.getAllIndexProxies() )
{
try
{
index.force();
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to force " + index, e );
}
}
}
private void closeAllIndexes()
{
Iterable<IndexProxy> indexesToStop = indexMapReference.clear();
Collection<Future<Void>> indexStopFutures = new ArrayList<>();
for ( IndexProxy index : indexesToStop )
{
try
{
indexStopFutures.add( index.close() );
}
catch ( IOException e )
{
logger.error( "Unable to close index", e );
}
}
for ( Future<Void> future : indexStopFutures )
{
try
{
awaitIndexFuture( future );
}
catch ( Exception e )
{
logger.error( "Error awaiting index to close", e );
}
}
}
private Pair<IndexDescriptor, SchemaIndexProvider.Descriptor> getIndexProxyDescriptors( IndexProxy indexProxy )
{
return Pair.of( indexProxy.getDescriptor(), indexProxy.getProviderDescriptor() );
}
public ResourceIterator<File> snapshotStoreFiles() throws IOException
{
Collection<ResourceIterator<File>> snapshots = new ArrayList<>();
for ( IndexProxy indexProxy : indexMapReference.getAllIndexProxies() )
{
snapshots.add(indexProxy.snapshotFiles());
}
return concatResourceIterators( snapshots.iterator() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexingService.java
|
1,042
|
public class IndexUpdaterMapTest
{
private IndexMap indexMap;
private IndexProxy indexProxy1;
private IndexDescriptor indexDescriptor1;
private IndexUpdater indexUpdater1;
private IndexProxy indexProxy2;
private IndexDescriptor indexDescriptor2;
private IndexUpdaterMap updaterMap;
@Before
public void before() throws IOException
{
indexMap = new IndexMap();
indexProxy1 = mock( IndexProxy.class );
indexDescriptor1 = new IndexDescriptor( 2, 3 );
indexUpdater1 = mock( IndexUpdater.class );
when( indexProxy1.getDescriptor() ).thenReturn( indexDescriptor1 );
when( indexProxy1.newUpdater( any( IndexUpdateMode.class ) ) ).thenReturn( indexUpdater1 );
indexProxy2 = mock( IndexProxy.class );
indexDescriptor2 = new IndexDescriptor( 5, 6 );
IndexUpdater indexUpdater2 = mock( IndexUpdater.class );
when( indexProxy2.getDescriptor() ).thenReturn( indexDescriptor2 );
when( indexProxy2.newUpdater( any( IndexUpdateMode.class ) ) ).thenReturn( indexUpdater2 );
updaterMap = new IndexUpdaterMap( IndexUpdateMode.ONLINE, indexMap );
}
@Test
public void shouldRetrieveUpdaterFromIndexMapForExistingIndex() throws Exception
{
// given
indexMap.putIndexProxy( 0, indexProxy1 );
// when
IndexUpdater updater = updaterMap.getUpdater( indexDescriptor1 );
// then
assertEquals( indexUpdater1, updater );
assertEquals( 1, updaterMap.size() );
}
@Test
public void shouldRetrieveSameUpdaterFromIndexMapForExistingIndexWhenCalledTwice() throws Exception
{
// given
indexMap.putIndexProxy( 0, indexProxy1 );
// when
IndexUpdater updater1 = updaterMap.getUpdater( indexDescriptor1 );
IndexUpdater updater2 = updaterMap.getUpdater( indexDescriptor1 );
// then
assertEquals( updater1, updater2 );
assertEquals( 1, updaterMap.size() );
}
@Test
public void shouldRetrieveNoUpdaterForNonExistingIndex() throws Exception
{
// when
IndexUpdater updater = updaterMap.getUpdater( indexDescriptor1 );
// then
assertNull( updater );
assertTrue( "updater map must be empty", updaterMap.isEmpty() );
}
@Test
public void shouldCloseAllUpdaters() throws Exception
{
// given
indexMap.putIndexProxy( 0, indexProxy1 );
indexMap.putIndexProxy( 1, indexProxy2 );
IndexUpdater updater1 = updaterMap.getUpdater( indexDescriptor1 );
IndexUpdater updater2 = updaterMap.getUpdater( indexDescriptor2 );
// hen
updaterMap.close();
// then
verify( updater1 ).close();
verify( updater2 ).close();
assertTrue( "updater map must be empty", updaterMap.isEmpty() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_IndexUpdaterMapTest.java
|
1,043
|
{
Iterator<IndexDescriptor> descriptors = indexMap.descriptors();
@Override
protected IndexUpdater fetchNextOrNull()
{
if ( descriptors.hasNext() )
{
return getUpdater( descriptors.next() );
}
return null;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_index_IndexUpdaterMap.java
|
1,044
|
{
@Override
public Future<Boolean> apply( GraphDatabaseService db )
{
db.createNode( label( "Label1" ) ).setProperty( "key1", "value1" );
try
{
return otherThread.execute( createNode( db, "Label1", "key1", "value1" ) );
}
finally
{
assertThat( otherThread, isWaiting() );
}
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_integrationtest_UniquenessConstraintValidationConcurrencyIT.java
|
1,045
|
{
@Override
public Boolean doWork( Void nothing ) throws Exception
{
Transaction tx = db.beginTx();
try
{
db.createNode( label( label ) ).setProperty( propertyKey, propertyValue );
tx.success();
return true;
}
catch ( ConstraintViolationException e )
{
return false;
}
finally
{
tx.finish();
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_integrationtest_UniquenessConstraintValidationConcurrencyIT.java
|
1,046
|
public class LruCache<K,E>
{
private final String name;
int maxSize = 1000;
private boolean resizing = false;
private boolean adaptive = false;
private final Map<K,E> cache = new LinkedHashMap<K,E>( 500, 0.75f, true )
{
@Override
protected boolean removeEldestEntry( Map.Entry<K,E> eldest )
{
// synchronization miss with old value on maxSize here is ok
if ( super.size() > maxSize )
{
super.remove( eldest.getKey() );
elementCleaned( eldest.getValue() );
}
return false;
}
};
/**
* Creates a LRU cache. If <CODE>maxSize < 1</CODE> an
* IllegalArgumentException is thrown.
*
* @param name
* name of cache
* @param maxSize
* maximum size of this cache
* @param cacheManager
* adaptive cache manager or null if adaptive caching not needed
*/
public LruCache( String name, int maxSize )
{
if ( name == null || maxSize < 1 )
{
throw new IllegalArgumentException( "maxSize=" + maxSize
+ ", name=" + name );
}
this.name = name;
this.maxSize = maxSize;
}
public String getName()
{
return this.name;
}
public synchronized void put( K key, E element )
{
if ( key == null || element == null )
{
throw new IllegalArgumentException( "key=" + key + ", element="
+ element );
}
cache.put( key, element );
}
public synchronized E remove( K key )
{
if ( key == null )
{
throw new IllegalArgumentException( "Null parameter" );
}
return cache.remove( key );
}
public synchronized E get( K key )
{
if ( key == null )
{
throw new IllegalArgumentException();
}
return counter.count( cache.get( key ) );
}
public synchronized void clear()
{
resizeInternal( 0 );
}
public synchronized int size()
{
return cache.size();
}
public synchronized Set<K> keySet()
{
return cache.keySet();
}
public synchronized Collection<E> values()
{
return cache.values();
}
public synchronized Set<Map.Entry<K,E>> entrySet()
{
return cache.entrySet();
}
/**
* Returns the maximum size of this cache.
*
* @return maximum size
*/
public int maxSize()
{
return maxSize;
}
/**
* Changes the max size of the cache. If <CODE>newMaxSize</CODE> is
* greater then <CODE>maxSize()</CODE> next invoke to <CODE>maxSize()</CODE>
* will return <CODE>newMaxSize</CODE> and the entries in cache will not
* be modified.
* <p>
* If <CODE>newMaxSize</CODE> is less then <CODE>size()</CODE>
* the cache will shrink itself removing least recently used element until
* <CODE>size()</CODE> equals <CODE>newMaxSize</CODE>. For each element
* removed the {@link #elementCleaned} method is invoked.
* <p>
* If <CODE>newMaxSize</CODE> is less then <CODE>1</CODE> an
* {@link IllegalArgumentException} is thrown.
*
* @param newMaxSize
* the new maximum size of the cache
*/
public synchronized void resize( int newMaxSize )
{
if ( newMaxSize < 1 )
{
throw new IllegalArgumentException( "newMaxSize=" + newMaxSize );
}
resizeInternal( newMaxSize );
}
private void resizeInternal( int newMaxSize )
{
resizing = true;
try
{
if ( newMaxSize >= size() )
{
maxSize = newMaxSize;
}
else if ( newMaxSize == 0 )
{
java.util.Iterator<Map.Entry<K,E>> itr = cache.entrySet().iterator();
while ( itr.hasNext())
{
E element = itr.next().getValue();
elementCleaned( element );
}
cache.clear();
}
else
{
maxSize = newMaxSize;
java.util.Iterator<Map.Entry<K,E>> itr = cache.entrySet()
.iterator();
while ( itr.hasNext() && cache.size() > maxSize )
{
E element = itr.next().getValue();
itr.remove();
elementCleaned( element );
}
}
}
finally
{
resizing = false;
}
}
boolean isResizing()
{
return resizing;
}
public void elementCleaned( E element )
{
}
public boolean isAdaptive()
{
return adaptive;
}
public void setAdaptiveStatus( boolean status )
{
this.adaptive = status;
}
public void putAll( Map<K, E> map )
{
cache.putAll( map );
}
private final HitCounter counter = new HitCounter();
public long hitCount()
{
return counter.getHitsCount();
}
public long missCount()
{
return counter.getMissCount();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_LruCache.java
|
1,047
|
public class DiskLayerTest
{
@Test
public void should_be_able_to_list_labels_for_node() throws Exception
{
// GIVEN
long nodeId;
int labelId1, labelId2;
try ( Transaction tx = db.beginTx() )
{
nodeId = db.createNode( label1, label2 ).getId();
String labelName1 = label1.name(), labelName2 = label2.name();
labelId1 = statement.labelGetForName( labelName1 );
labelId2 = statement.labelGetOrCreateForName( labelName2 );
tx.success();
}
// THEN
PrimitiveIntIterator readLabels = statement.nodeGetLabels( nodeId );
assertEquals( new HashSet<>( asList( labelId1, labelId2 ) ),
addToCollection( readLabels, new HashSet<Integer>() ) );
}
@Test
public void should_be_able_to_get_label_name_for_label() throws Exception
{
// GIVEN
String labelName = label1.name();
int labelId = statement.labelGetOrCreateForName( labelName );
// WHEN
String readLabelName = statement.labelGetName( labelId );
// THEN
assertEquals( labelName, readLabelName );
}
/*
* This test doesn't really belong here, but OTOH it does, as it has to do with this specific
* store solution. It creates its own IGD with cache_type:none to try reproduce to trigger the problem.
*/
@Test
public void labels_should_not_leak_out_as_properties() throws Exception
{
// GIVEN
GraphDatabaseService db = new TestGraphDatabaseFactory().newImpermanentDatabaseBuilder()
.setConfig( cache_type, "none" ).newGraphDatabase();
Node node = createLabeledNode( db, map( "name", "Node" ), label1 );
// WHEN THEN
assertThat( getPropertyKeys( db, node ), containsOnly( "name" ) );
}
@Test
public void should_return_all_nodes_with_label() throws Exception
{
// GIVEN
Node node1 = createLabeledNode( db, map( "name", "First", "age", 1L ), label1 );
Node node2 = createLabeledNode( db, map( "type", "Node", "count", 10 ), label1, label2 );
int labelId1 = statement.labelGetForName( label1.name() );
int labelId2 = statement.labelGetForName( label2.name() );
// WHEN
PrimitiveLongIterator nodesForLabel1 = statement.nodesGetForLabel( state, labelId1 );
PrimitiveLongIterator nodesForLabel2 = statement.nodesGetForLabel( state, labelId2 );
// THEN
assertEquals( asSet( node1.getId(), node2.getId() ), IteratorUtil.asSet( nodesForLabel1 ) );
assertEquals( asSet( node2.getId() ), IteratorUtil.asSet( nodesForLabel2 ) );
}
@Test
public void should_get_all_node_properties() throws Exception
{
// GIVEN
String longString =
"AlalalalalongAlalalalalongAlalalalalongAlalalalalongAlalalalalongAlalalalalongAlalalalalongAlalalalalong";
Object[] properties = {
longString,
gimme( String.class ),
gimme( long.class ),
gimme( int.class ),
gimme( byte.class ),
gimme( short.class ),
gimme( boolean.class ),
gimme( char.class ),
gimme( float.class ),
gimme( double.class ),
array( 0, String.class ),
array( 0, long.class ),
array( 0, int.class ),
array( 0, byte.class ),
array( 0, short.class ),
array( 0, boolean.class ),
array( 0, char.class ),
array( 0, float.class ),
array( 0, double.class ),
array( 1, String.class ),
array( 1, long.class ),
array( 1, int.class ),
array( 1, byte.class ),
array( 1, short.class ),
array( 1, boolean.class ),
array( 1, char.class ),
array( 1, float.class ),
array( 1, double.class ),
array( 256, String.class ),
array( 256, long.class ),
array( 256, int.class ),
array( 256, byte.class ),
array( 256, short.class ),
array( 256, boolean.class ),
array( 256, char.class ),
array( 256, float.class ),
array( 256, double.class ),
};
for ( Object value : properties )
{
// given
long nodeId = createLabeledNode( db, singletonMap( "prop", value ), label1 ).getId();
// when
Property property = single( statement.nodeGetAllProperties( nodeId ) );
//then
assertTrue( property + ".valueEquals(" + value + ")", property.valueEquals( value ) );
}
}
@Test
public void should_create_property_key_if_not_exists() throws Exception
{
// WHEN
long id = statement.propertyKeyGetOrCreateForName( propertyKey );
// THEN
assertTrue( "Should have created a non-negative id", id >= 0 );
}
@Test
public void should_get_previously_created_property_key() throws Exception
{
// GIVEN
long id = statement.propertyKeyGetOrCreateForName( propertyKey );
// WHEN
long secondId = statement.propertyKeyGetForName( propertyKey );
// THEN
assertEquals( id, secondId );
}
@Test
public void should_be_able_to_get_or_create_previously_created_property_key() throws Exception
{
// GIVEN
long id = statement.propertyKeyGetOrCreateForName( propertyKey );
// WHEN
long secondId = statement.propertyKeyGetOrCreateForName( propertyKey );
// THEN
assertEquals( id, secondId );
}
@Test
public void should_fail_if_get_non_existent_property_key() throws Exception
{
// WHEN
int propertyKey = statement.propertyKeyGetForName( "non-existent-property-key" );
// THEN
assertEquals( KeyReadOperations.NO_SUCH_PROPERTY_KEY, propertyKey );
}
@Test
public void should_find_nodes_with_given_label_and_property_via_index() throws Exception
{
// GIVEN
IndexDescriptor index = createIndexAndAwaitOnline( label1, propertyKey );
String name = "Mr. Taylor";
Node mrTaylor = createLabeledNode( db, map( propertyKey, name ), label1 );
try ( Transaction ignored = db.beginTx() )
{
// WHEN
Set<Long> foundNodes = asUniqueSet( statement.nodesGetFromIndexLookup( state, 1l, name ) );
// THEN
assertEquals( asSet( mrTaylor.getId() ), foundNodes );
}
}
@SuppressWarnings("deprecation") private GraphDatabaseAPI db;
private final Label label1 = label( "first-label" ), label2 = label( "second-label" );
private final String propertyKey = "name";
private KernelStatement state;
private DiskLayer statement;
@SuppressWarnings("deprecation")
@Before
public void before()
{
db = (GraphDatabaseAPI) new TestGraphDatabaseFactory().newImpermanentDatabase();
DependencyResolver resolver = db.getDependencyResolver();
IndexingService indexingService = resolver.resolveDependency( IndexingService.class );
NeoStore neoStore = resolver.resolveDependency( XaDataSourceManager.class )
.getNeoStoreDataSource().getNeoStore();
this.statement = new DiskLayer(
resolver.resolveDependency( PropertyKeyTokenHolder.class ),
resolver.resolveDependency( LabelTokenHolder.class ),
resolver.resolveDependency( RelationshipTypeTokenHolder.class ),
new SchemaStorage( neoStore.getSchemaStore() ),
singletonProvider( neoStore ),
indexingService );
this.state = new KernelStatement( null, new IndexReaderFactory.Caching( indexingService ),
resolver.resolveDependency( LabelScanStore.class ), null,
null, null, null, null );
}
@After
public void after()
{
db.shutdown();
}
private static Node createLabeledNode( GraphDatabaseService db, Map<String, Object> properties, Label... labels )
{
try ( Transaction tx = db.beginTx() )
{
Node node = db.createNode( labels );
for ( Map.Entry<String, Object> property : properties.entrySet() )
{
node.setProperty( property.getKey(), property.getValue() );
}
tx.success();
return node;
}
}
private IndexDescriptor createIndexAndAwaitOnline( Label label, String propertyKey ) throws Exception
{
IndexDefinition index;
try ( Transaction tx = db.beginTx() )
{
index = db.schema().indexFor( label ).on( propertyKey ).create();
tx.success();
}
try ( Transaction ignored = db.beginTx() )
{
db.schema().awaitIndexOnline( index, 10, SECONDS );
return statement.indexesGetForLabelAndPropertyKey( statement.labelGetForName( label.name() ),
statement.propertyKeyGetForName( propertyKey ) );
}
}
private Object array( int length, Class<?> componentType )
{
Object array = Array.newInstance( componentType, length );
for ( int i = 0; i < length; i++ )
{
Array.set( array, i, gimme( componentType ) );
}
return array;
}
private Object gimme( Class<?> type )
{
if ( type == int.class )
{
return 666;
}
if ( type == long.class )
{
return 17l;
}
if ( type == double.class )
{
return 6.28318530717958647692d;
}
if ( type == float.class )
{
return 3.14f;
}
if ( type == short.class )
{
return (short) 8733;
}
if ( type == byte.class )
{
return (byte) 123;
}
if ( type == boolean.class )
{
return false;
}
if ( type == char.class )
{
return 'Z';
}
if ( type == String.class )
{
return "hello world";
}
throw new IllegalArgumentException( type.getName() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_store_DiskLayerTest.java
|
1,048
|
{
@Override
public IndexDescriptor apply( SchemaRule from )
{
return descriptor( (IndexRule) from );
}
}, filtered );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,049
|
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getKind() == SchemaRule.Kind.CONSTRAINT_INDEX_RULE;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,050
|
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getKind() == SchemaRule.Kind.INDEX_RULE;
}
}, CONSTRAINT_INDEX_RULES = new Predicate<SchemaRule>()
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,051
|
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getLabel() == labelId && rule.getKind() == SchemaRule.Kind.CONSTRAINT_INDEX_RULE;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,052
|
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getLabel() == labelId && rule.getKind() == SchemaRule.Kind.INDEX_RULE;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,053
|
{
private int cursor;
@Override
public boolean hasNext()
{
return cursor < labels.length;
}
@Override
public int next()
{
if ( !hasNext() )
{
throw new NoSuchElementException();
}
return safeCastLongToInt( labels[cursor++] );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,054
|
{
@Override
public PropertyStore instance()
{
return neoStore.instance().getPropertyStore();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,055
|
{
@Override
public UniquenessConstraint apply( UniquenessConstraintRule rule )
{
// We can use propertyKeyId straight up here, without reading from the record, since we have
// verified that it has that propertyKeyId in the predicate. And since we currently only support
// uniqueness on single properties, there is nothing else to pass in to UniquenessConstraint.
return new UniquenessConstraint( rule.getLabel(), rule.getPropertyKey() );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,056
|
public class DiskLayer
{
private static final Function<UniquenessConstraintRule, UniquenessConstraint> UNIQUENESS_CONSTRAINT_TO_RULE =
new Function<UniquenessConstraintRule, UniquenessConstraint>()
{
@Override
public UniquenessConstraint apply( UniquenessConstraintRule rule )
{
// We can use propertyKeyId straight up here, without reading from the record, since we have
// verified that it has that propertyKeyId in the predicate. And since we currently only support
// uniqueness on single properties, there is nothing else to pass in to UniquenessConstraint.
return new UniquenessConstraint( rule.getLabel(), rule.getPropertyKey() );
}
};
// These token holders should perhaps move to the cache layer.. not really any reason to have them here?
private final PropertyKeyTokenHolder propertyKeyTokenHolder;
private final LabelTokenHolder labelTokenHolder;
private final RelationshipTypeTokenHolder relationshipTokenHolder;
private final NeoStore neoStore;
private final IndexingService indexService;
private final NodeStore nodeStore;
private final RelationshipStore relationshipStore;
private final PropertyStore propertyStore;
private final SchemaStorage schemaStorage;
private final Provider<PropertyStore> propertyStoreProvider;
/**
* A note on this taking Provider<NeoStore> rather than just neo store: This is a workaround until the cache is
* removed. Because the neostore may be restarted while the database is running, and because lazy properties keep
* a reference to the property store, we need a way to resolve the property store on demand for properties in the
* cache. As such, this takes a provider, and uses that provider to provide property store references when resolving
* lazy properties.
*/
public DiskLayer( PropertyKeyTokenHolder propertyKeyTokenHolder, LabelTokenHolder labelTokenHolder,
RelationshipTypeTokenHolder relationshipTokenHolder, SchemaStorage schemaStorage,
final Provider<NeoStore> neoStore, IndexingService indexService )
{
this.relationshipTokenHolder = relationshipTokenHolder;
this.schemaStorage = schemaStorage;
assert neoStore != null : "No neoStore provided";
this.indexService = indexService;
this.propertyKeyTokenHolder = propertyKeyTokenHolder;
this.labelTokenHolder = labelTokenHolder;
this.neoStore = neoStore.instance();
this.nodeStore = this.neoStore.getNodeStore();
this.relationshipStore = this.neoStore.getRelationshipStore();
this.propertyStore = this.neoStore.getPropertyStore();
this.propertyStoreProvider = new Provider<PropertyStore>()
{
@Override
public PropertyStore instance()
{
return neoStore.instance().getPropertyStore();
}
};
}
public int labelGetOrCreateForName( String label ) throws TooManyLabelsException
{
try
{
return labelTokenHolder.getOrCreateId( label );
}
catch ( TransactionFailureException e )
{
// Temporary workaround for the property store based label
// implementation. Actual
// implementation should not depend on internal kernel exception
// messages like this.
if ( e.getCause() instanceof UnderlyingStorageException
&& e.getCause().getMessage().equals( "Id capacity exceeded" ) )
{
throw new TooManyLabelsException( e );
}
else
{
throw e;
}
}
}
public int labelGetForName( String label )
{
return labelTokenHolder.getIdByName( label );
}
public boolean nodeHasLabel( long nodeId, int labelId )
{
try
{
return IteratorUtil.contains( nodeGetLabels( nodeId ), labelId );
}
catch ( InvalidRecordException e )
{
return false;
}
}
public PrimitiveIntIterator nodeGetLabels( long nodeId )
{
try
{
final long[] labels = parseLabelsField( nodeStore.getRecord( nodeId ) ).get( nodeStore );
return new PrimitiveIntIterator()
{
private int cursor;
@Override
public boolean hasNext()
{
return cursor < labels.length;
}
@Override
public int next()
{
if ( !hasNext() )
{
throw new NoSuchElementException();
}
return safeCastLongToInt( labels[cursor++] );
}
};
}
catch ( InvalidRecordException e )
{ // TODO Might hide invalid dynamic record problem. It's here because this method
// might get called with a nodeId that doesn't exist.
return emptyPrimitiveIntIterator();
}
}
public String labelGetName( int labelId ) throws LabelNotFoundKernelException
{
try
{
return labelTokenHolder.getTokenById( labelId ).name();
}
catch ( TokenNotFoundException e )
{
throw new LabelNotFoundKernelException( "Label by id " + labelId, e );
}
}
public PrimitiveLongIterator nodesGetForLabel( KernelStatement state, int labelId )
{
return state.getLabelScanReader().nodesWithLabel( labelId );
}
public IndexDescriptor indexesGetForLabelAndPropertyKey( int labelId, int propertyKey )
throws SchemaRuleNotFoundException
{
return descriptor( schemaStorage.indexRule( labelId, propertyKey ) );
}
private static IndexDescriptor descriptor( IndexRule ruleRecord )
{
return new IndexDescriptor( ruleRecord.getLabel(), ruleRecord.getPropertyKey() );
}
public Iterator<IndexDescriptor> indexesGetForLabel( int labelId )
{
return getIndexDescriptorsFor( indexRules( labelId ) );
}
public Iterator<IndexDescriptor> indexesGetAll()
{
return getIndexDescriptorsFor( INDEX_RULES );
}
public Iterator<IndexDescriptor> uniqueIndexesGetForLabel( int labelId )
{
return getIndexDescriptorsFor( constraintIndexRules( labelId ) );
}
public Iterator<IndexDescriptor> uniqueIndexesGetAll()
{
return getIndexDescriptorsFor( CONSTRAINT_INDEX_RULES );
}
private static Predicate<SchemaRule> indexRules( final int labelId )
{
return new Predicate<SchemaRule>()
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getLabel() == labelId && rule.getKind() == SchemaRule.Kind.INDEX_RULE;
}
};
}
private static Predicate<SchemaRule> constraintIndexRules( final int labelId )
{
return new Predicate<SchemaRule>()
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getLabel() == labelId && rule.getKind() == SchemaRule.Kind.CONSTRAINT_INDEX_RULE;
}
};
}
private static final Predicate<SchemaRule> INDEX_RULES = new Predicate<SchemaRule>()
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getKind() == SchemaRule.Kind.INDEX_RULE;
}
}, CONSTRAINT_INDEX_RULES = new Predicate<SchemaRule>()
{
@Override
public boolean accept( SchemaRule rule )
{
return rule.getKind() == SchemaRule.Kind.CONSTRAINT_INDEX_RULE;
}
};
private Iterator<IndexDescriptor> getIndexDescriptorsFor( Predicate<SchemaRule> filter )
{
Iterator<SchemaRule> filtered = filter( filter, neoStore.getSchemaStore().loadAllSchemaRules() );
return map( new Function<SchemaRule, IndexDescriptor>()
{
@Override
public IndexDescriptor apply( SchemaRule from )
{
return descriptor( (IndexRule) from );
}
}, filtered );
}
public Long indexGetOwningUniquenessConstraintId( IndexDescriptor index )
throws SchemaRuleNotFoundException
{
return schemaStorage.indexRule( index.getLabelId(), index.getPropertyKeyId() ).getOwningConstraint();
}
public long indexGetCommittedId( IndexDescriptor index ) throws SchemaRuleNotFoundException
{
return schemaStorage.indexRule( index.getLabelId(), index.getPropertyKeyId() ).getId();
}
public InternalIndexState indexGetState( IndexDescriptor descriptor )
throws IndexNotFoundKernelException
{
return indexService.getProxyForRule( indexId( descriptor ) ).getState();
}
public String indexGetFailure( IndexDescriptor descriptor ) throws IndexNotFoundKernelException
{
return indexService.getProxyForRule( indexId( descriptor ) ).getPopulationFailure().asString();
}
private long indexId( IndexDescriptor descriptor ) throws IndexNotFoundKernelException
{
try
{
return schemaStorage.indexRule( descriptor.getLabelId(), descriptor.getPropertyKeyId() ).getId();
}
catch ( SchemaRuleNotFoundException e )
{
throw new IndexNotFoundKernelException( e.getMessage(), e );
}
}
public Iterator<UniquenessConstraint> constraintsGetForLabelAndPropertyKey( int labelId, final int propertyKeyId )
{
return schemaStorage.schemaRules( UNIQUENESS_CONSTRAINT_TO_RULE, UniquenessConstraintRule.class,
labelId, new Predicate<UniquenessConstraintRule>()
{
@Override
public boolean accept( UniquenessConstraintRule rule )
{
return rule.containsPropertyKeyId( propertyKeyId );
}
} );
}
public Iterator<UniquenessConstraint> constraintsGetForLabel( int labelId )
{
return schemaStorage.schemaRules( UNIQUENESS_CONSTRAINT_TO_RULE, UniquenessConstraintRule.class,
labelId, Predicates.<UniquenessConstraintRule>TRUE() );
}
public Iterator<UniquenessConstraint> constraintsGetAll()
{
return schemaStorage.schemaRules( UNIQUENESS_CONSTRAINT_TO_RULE, SchemaRule.Kind.UNIQUENESS_CONSTRAINT,
Predicates.<UniquenessConstraintRule>TRUE() );
}
public int propertyKeyGetOrCreateForName( String propertyKey )
{
return propertyKeyTokenHolder.getOrCreateId( propertyKey );
}
public int propertyKeyGetForName( String propertyKey )
{
return propertyKeyTokenHolder.getIdByName( propertyKey );
}
public String propertyKeyGetName( int propertyKeyId )
throws PropertyKeyIdNotFoundKernelException
{
try
{
return propertyKeyTokenHolder.getTokenById( propertyKeyId ).name();
}
catch ( TokenNotFoundException e )
{
throw new PropertyKeyIdNotFoundKernelException( propertyKeyId, e );
}
}
public Iterator<DefinedProperty> nodeGetAllProperties( long nodeId )
throws EntityNotFoundException
{
try
{
return loadAllPropertiesOf( nodeStore.getRecord( nodeId ) );
}
catch ( InvalidRecordException e )
{
throw new EntityNotFoundException( EntityType.NODE, nodeId, e );
}
}
public Iterator<DefinedProperty> relationshipGetAllProperties( long relationshipId )
throws EntityNotFoundException
{
try
{
return loadAllPropertiesOf( relationshipStore.getRecord( relationshipId ) );
}
catch ( InvalidRecordException e )
{
throw new EntityNotFoundException( EntityType.RELATIONSHIP, relationshipId, e );
}
}
public Iterator<DefinedProperty> graphGetAllProperties()
{
return loadAllPropertiesOf( neoStore.asRecord() );
}
public PrimitiveLongResourceIterator nodeGetUniqueFromIndexLookup( KernelStatement state,
long indexId, Object value )
throws IndexNotFoundKernelException
{
/* Here we have an intricate scenario where we need to return the PrimitiveLongIterator
* since subsequent filtering will happen outside, but at the same time have the ability to
* close the IndexReader when done iterating over the lookup result. This is because we get
* a fresh reader that isn't associated with the current transaction and hence will not be
* automatically closed. */
IndexReader reader = state.getFreshIndexReader( indexId );
return resourceIterator( reader.lookup( value ), reader );
}
public PrimitiveLongIterator nodesGetFromIndexLookup( KernelStatement state, long index, Object value )
throws IndexNotFoundKernelException
{
return state.getIndexReader( index ).lookup( value );
}
private Iterator<DefinedProperty> loadAllPropertiesOf( PrimitiveRecord primitiveRecord )
{
Collection<PropertyRecord> records = propertyStore.getPropertyRecordChain( primitiveRecord.getNextProp() );
if ( null == records )
{
return IteratorUtil.emptyIterator();
}
List<DefinedProperty> properties = new ArrayList<>();
for ( PropertyRecord record : records )
{
for ( PropertyBlock block : record.getPropertyBlocks() )
{
properties.add( block.getType().readProperty( block.getKeyIndexId(), block, propertyStoreProvider ) );
}
}
return properties.iterator();
}
public Iterable<Token> propertyKeyGetAllTokens()
{
return propertyKeyTokenHolder.getAllTokens();
}
public Iterable<Token> labelGetAllTokens()
{
return labelTokenHolder.getAllTokens();
}
public int relationshipTypeGetForName( String relationshipTypeName )
{
return relationshipTokenHolder.getIdByName( relationshipTypeName );
}
public String relationshipTypeGetName( int relationshipTypeId ) throws RelationshipTypeIdNotFoundKernelException
{
try
{
return ((Token)relationshipTokenHolder.getTokenById( relationshipTypeId )).name();
}
catch ( TokenNotFoundException e )
{
throw new RelationshipTypeIdNotFoundKernelException( relationshipTypeId, e );
}
}
public int relationshipTypeGetOrCreateForName( String relationshipTypeName )
{
return relationshipTokenHolder.getOrCreateId( relationshipTypeName );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,057
|
{
@Override
public void newSize( Primitive entity, int size )
{
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheUpdateListener.java
|
1,058
|
public class CacheLayerTest
{
private final DiskLayer diskLayer = mock( DiskLayer.class );
private final PersistenceCache persistenceCache = mock( PersistenceCache.class );
private final SchemaCache schemaCache = mock( SchemaCache.class );
private final IndexingService indexingService = mock( IndexingService.class );
private final CacheLayer context = new CacheLayer( diskLayer, persistenceCache, indexingService, schemaCache );
@Test
public void shouldGetCachedLabelsIfCached() throws EntityNotFoundException
{
// GIVEN
long nodeId = 3;
int[] labels = new int[] {1, 2, 3};
when( persistenceCache.nodeGetLabels( any( KernelStatement.class ), eq( nodeId ), any( CacheLoader.class ) ) )
.thenReturn( labels );
// WHEN
PrimitiveIntIterator receivedLabels = context.nodeGetLabels( mockedState(), nodeId );
// THEN
assertArrayEquals( labels, primitiveIntIteratorToIntArray( receivedLabels ) );
}
@Test
public void shouldLoadAllConstraintsFromCache() throws Exception
{
// Given
Set<UniquenessConstraint> constraints = asSet( new UniquenessConstraint( 0, 1 ) );
when(schemaCache.constraints()).thenReturn( constraints.iterator() );
// When & Then
assertThat( asSet( context.constraintsGetAll( mockedState() ) ), equalTo( constraints ) );
}
@Test
public void shouldLoadConstraintsByLabelFromCache() throws Exception
{
// Given
int labelId = 0;
Set<UniquenessConstraint> constraints = asSet( new UniquenessConstraint( labelId, 1 ) );
when(schemaCache.constraintsForLabel(labelId)).thenReturn( constraints.iterator() );
// When & Then
assertThat( asSet( context.constraintsGetForLabel( mockedState(), labelId ) ), equalTo( constraints ) );
}
@Test
public void shouldLoadConstraintsByLabelAndPropertyFromCache() throws Exception
{
// Given
int labelId = 0, propertyId = 1;
Set<UniquenessConstraint> constraints = asSet( new UniquenessConstraint( labelId, propertyId ) );
when(schemaCache.constraintsForLabelAndProperty(labelId, propertyId)).thenReturn( constraints.iterator() );
// When & Then
assertThat( asSet( context.constraintsGetForLabelAndPropertyKey( mockedState(), labelId, propertyId ) ),
equalTo( constraints ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_store_CacheLayerTest.java
|
1,059
|
{
@Override
public boolean accept( SchemaRule item )
{
return item.getKind() == kind;
}
}, rules.iterator() );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java
|
1,060
|
{
@Override
public int[] load( long id ) throws EntityNotFoundException
{
return primitiveIntIteratorToIntArray( diskLayer.nodeGetLabels( id ) );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java
|
1,061
|
{
@Override
public Iterator<DefinedProperty> load( long id ) throws EntityNotFoundException
{
return diskLayer.graphGetAllProperties();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java
|
1,062
|
{
@Override
public Iterator<DefinedProperty> load( long id ) throws EntityNotFoundException
{
return diskLayer.relationshipGetAllProperties( id );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java
|
1,063
|
{
@Override
public Iterator<DefinedProperty> load( long id ) throws EntityNotFoundException
{
return diskLayer.nodeGetAllProperties( id );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java
|
1,064
|
{
@Override
public IndexDescriptor apply( SchemaRule from )
{
IndexRule rule = (IndexRule) from;
// We know that we only have int range of property key ids.
return new IndexDescriptor( rule.getLabel(), rule.getPropertyKey() );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_CacheLayer.java
|
1,065
|
public class TxStateTest
{
private PersistenceManager persistenceManager;
@Test
public void shouldGetAddedLabels() throws Exception
{
// GIVEN
state.nodeDoAddLabel( 1, 0 );
state.nodeDoAddLabel( 1, 1 );
state.nodeDoAddLabel( 2, 1 );
// WHEN
Set<Integer> addedLabels = state.nodeStateLabelDiffSets( 1 ).getAdded();
// THEN
assertEquals( asSet( 1, 2 ), addedLabels );
}
@Test
public void shouldGetRemovedLabels() throws Exception
{
// GIVEN
state.nodeDoRemoveLabel( 1, 0 );
state.nodeDoRemoveLabel( 1, 1 );
state.nodeDoRemoveLabel( 2, 1 );
// WHEN
Set<Integer> removedLabels = state.nodeStateLabelDiffSets( 1 ).getRemoved();
// THEN
assertEquals( asSet( 1, 2 ), removedLabels );
}
@Test
public void removeAddedLabelShouldRemoveFromAdded() throws Exception
{
// GIVEN
state.nodeDoAddLabel( 1, 0 );
state.nodeDoAddLabel( 1, 1 );
state.nodeDoAddLabel( 2, 1 );
// WHEN
state.nodeDoRemoveLabel( 1, 1 );
// THEN
assertEquals( asSet( 2 ), state.nodeStateLabelDiffSets( 1 ).getAdded() );
}
@Test
public void addRemovedLabelShouldRemoveFromRemoved() throws Exception
{
// GIVEN
state.nodeDoRemoveLabel( 1, 0 );
state.nodeDoRemoveLabel( 1, 1 );
state.nodeDoRemoveLabel( 2, 1 );
// WHEN
state.nodeDoAddLabel( 1, 1 );
// THEN
assertEquals( asSet( 2 ), state.nodeStateLabelDiffSets( 1 ).getRemoved() );
}
@Test
public void shouldMapFromAddedLabelToNodes() throws Exception
{
// GIVEN
state.nodeDoAddLabel( 1, 0 );
state.nodeDoAddLabel( 2, 0 );
state.nodeDoAddLabel( 1, 1 );
state.nodeDoAddLabel( 3, 1 );
state.nodeDoAddLabel( 2, 2 );
// WHEN
Set<Long> nodes = state.nodesWithLabelAdded( 2 );
// THEN
assertEquals( asSet( 0L, 2L ), asSet( nodes ) );
}
@Test
public void shouldMapFromRemovedLabelToNodes() throws Exception
{
// GIVEN
state.nodeDoRemoveLabel( 1, 0 );
state.nodeDoRemoveLabel( 2, 0 );
state.nodeDoRemoveLabel( 1, 1 );
state.nodeDoRemoveLabel( 3, 1 );
state.nodeDoRemoveLabel( 2, 2 );
// WHEN
Set<Long> nodes = state.nodesWithLabelChanged( 2 ).getRemoved();
// THEN
assertEquals( asSet( 0L, 2L ), asSet( nodes ) );
}
@Test
public void shouldAddAndGetByLabel() throws Exception
{
// GIVEN
int labelId = 2, labelId2 = 5, propertyKey = 3;
// WHEN
IndexDescriptor rule = new IndexDescriptor( labelId, propertyKey );
state.indexRuleDoAdd( rule );
state.indexRuleDoAdd( new IndexDescriptor( labelId2, propertyKey ) );
// THEN
assertEquals( asSet( rule ), state.indexDiffSetsByLabel( labelId ).getAdded() );
}
@Test
public void shouldAddAndGetByRuleId() throws Exception
{
// GIVEN
int labelId = 2, propertyKey = 3;
// WHEN
IndexDescriptor rule = new IndexDescriptor( labelId, propertyKey );
state.indexRuleDoAdd( rule );
// THEN
assertEquals( asSet( rule ), state.indexChanges().getAdded() );
}
@Test
public void shouldIncludeAddedNodesWithCorrectProperty() throws Exception
{
// Given
long nodeId = 1337l;
int propertyKey = 2;
int propValue = 42;
DiffSets<Long> nodesWithChangedProp = new DiffSets<>( asSet( nodeId ), emptySet );
when( legacyState.getNodesWithChangedProperty( propertyKey, propValue ) ).thenReturn( nodesWithChangedProp );
// When
DiffSets<Long> diff = state.nodesWithChangedProperty( propertyKey, propValue );
// Then
assertThat( diff.getAdded(), equalTo( asSet( nodeId ) ) );
assertThat( diff.getRemoved(), equalTo( emptySet ) );
}
@Test
public void shouldExcludeNodesWithCorrectPropertyRemoved() throws Exception
{
// Given
long nodeId = 1337l;
int propertyKey = 2;
int propValue = 42;
DiffSets<Long> nodesWithChangedProp = new DiffSets<>( emptySet, asSet( nodeId ) );
when( legacyState.getNodesWithChangedProperty( propertyKey, propValue ) ).thenReturn( nodesWithChangedProp );
// When
DiffSets<Long> diff = state.nodesWithChangedProperty( propertyKey, propValue );
// Then
assertThat( diff.getAdded(), equalTo( emptySet ) );
assertThat( diff.getRemoved(), equalTo( asSet( nodeId ) ) );
}
@Test
public void shouldListNodeAsDeletedIfItIsDeleted() throws Exception
{
// Given
// When
long nodeId = 1337l;
state.nodeDoDelete( nodeId );
// Then
verify( legacyState ).deleteNode( nodeId );
verifyNoMoreInteractions( legacyState, persistenceManager );
assertThat( asSet( state.nodesDeletedInTx().getRemoved() ), equalTo( asSet( nodeId ) ) );
}
@Test
public void shouldAddUniquenessConstraint() throws Exception
{
// when
UniquenessConstraint constraint = new UniquenessConstraint( 1, 17 );
state.constraintDoAdd( constraint, 7 );
// then
DiffSets<UniquenessConstraint> diff = state.constraintsChangesForLabel( 1 );
assertEquals( Collections.singleton( constraint ), diff.getAdded() );
assertTrue( diff.getRemoved().isEmpty() );
}
@Test
public void addingUniquenessConstraintShouldBeIdempotent() throws Exception
{
// given
UniquenessConstraint constraint1 = new UniquenessConstraint( 1, 17 );
state.constraintDoAdd( constraint1, 7 );
// when
UniquenessConstraint constraint2 = new UniquenessConstraint( 1, 17 );
state.constraintDoAdd( constraint2, 19 );
// then
assertEquals( constraint1, constraint2 );
assertEquals( Collections.singleton( constraint1 ), state.constraintsChangesForLabel( 1 ).getAdded() );
}
@Test
public void shouldDifferentiateBetweenUniquenessConstraintsForDifferentLabels() throws Exception
{
// when
UniquenessConstraint constraint1 = new UniquenessConstraint( 1, 17 );
state.constraintDoAdd( constraint1, 7 );
UniquenessConstraint constraint2 = new UniquenessConstraint( 2, 17 );
state.constraintDoAdd( constraint2, 19 );
// then
assertEquals( Collections.singleton( constraint1 ), state.constraintsChangesForLabel( 1 ).getAdded() );
assertEquals( Collections.singleton( constraint2 ), state.constraintsChangesForLabel( 2 ).getAdded() );
}
private TxState state;
private OldTxStateBridge legacyState;
private final Set<Long> emptySet = Collections.emptySet();
@Before
public void before() throws Exception
{
legacyState = mock( OldTxStateBridge.class );
persistenceManager = mock( PersistenceManager.class );
state = new TxStateImpl( legacyState,
persistenceManager, mock( TxState.IdGeneration.class )
);
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_state_TxStateTest.java
|
1,066
|
{
@Override
public IndexDescriptor apply( UniquenessConstraint constraint )
{
return new IndexDescriptor( constraint.label(), constraint.propertyKeyId() );
}
}, constraintMap.keySet() );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_TxStateImpl.java
|
1,067
|
{
@Override
public boolean accept( UniquenessConstraintRule rule )
{
return rule.containsPropertyKeyId( propertyKeyId );
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_DiskLayer.java
|
1,068
|
public class PersistenceCache
{
private final CacheUpdateListener NODE_CACHE_SIZE_LISTENER = new CacheUpdateListener()
{
@Override
public void newSize( Primitive entity, int size )
{
nodeCache.updateSize( (NodeImpl) entity, size );
}
};
private final CacheUpdateListener RELATIONSHIP_CACHE_SIZE_LISTENER = new CacheUpdateListener()
{
@Override
public void newSize( Primitive entity, int size )
{
relationshipCache.updateSize( (RelationshipImpl) entity, size );
}
};
private final AutoLoadingCache<NodeImpl> nodeCache;
private final AutoLoadingCache<RelationshipImpl> relationshipCache;
private final Thunk<GraphPropertiesImpl> graphProperties;
public PersistenceCache(
AutoLoadingCache<NodeImpl> nodeCache,
AutoLoadingCache<RelationshipImpl> relationshipCache,
Thunk<GraphPropertiesImpl> graphProperties )
{
this.nodeCache = nodeCache;
this.relationshipCache = relationshipCache;
this.graphProperties = graphProperties;
}
public boolean nodeHasLabel( KernelStatement state, long nodeId, int labelId, CacheLoader<int[]> cacheLoader )
throws EntityNotFoundException
{
return getNode( nodeId ).hasLabel( state, labelId, cacheLoader );
}
public int[] nodeGetLabels( KernelStatement state, long nodeId, CacheLoader<int[]> loader )
throws EntityNotFoundException
{
return getNode( nodeId ).getLabels( state, loader );
}
private NodeImpl getNode( long nodeId ) throws EntityNotFoundException
{
NodeImpl node = nodeCache.get( nodeId );
if ( node == null )
{
throw new EntityNotFoundException( EntityType.NODE, nodeId );
}
return node;
}
private RelationshipImpl getRelationship( long relationshipId ) throws EntityNotFoundException
{
RelationshipImpl relationship = relationshipCache.get( relationshipId );
if ( relationship == null )
{
throw new EntityNotFoundException( EntityType.RELATIONSHIP, relationshipId );
}
return relationship;
}
public void apply( Collection<NodeLabelUpdate> updates )
{
for ( NodeLabelUpdate update : updates )
{
NodeImpl node = nodeCache.getIfCached( update.getNodeId() );
if(node != null)
{
// TODO: This is because the labels are still longs in WriteTransaction, this should go away once
// we make labels be ints everywhere.
long[] labelsAfter = update.getLabelsAfter();
int[] labels = new int[labelsAfter.length];
for(int i=0;i<labels.length;i++)
{
labels[i] = (int)labelsAfter[i];
}
node.commitLabels( labels );
}
}
}
public void evictNode( long nodeId )
{
nodeCache.remove( nodeId );
}
public Iterator<DefinedProperty> nodeGetProperties( long nodeId,
CacheLoader<Iterator<DefinedProperty>> cacheLoader )
throws EntityNotFoundException
{
return getNode( nodeId ).getProperties( cacheLoader, NODE_CACHE_SIZE_LISTENER );
}
public PrimitiveLongIterator nodeGetPropertyKeys( long nodeId,
CacheLoader<Iterator<DefinedProperty>> cacheLoader )
throws EntityNotFoundException
{
return getNode( nodeId ).getPropertyKeys( cacheLoader, NODE_CACHE_SIZE_LISTENER );
}
public Property nodeGetProperty( long nodeId, int propertyKeyId,
CacheLoader<Iterator<DefinedProperty>> cacheLoader )
throws EntityNotFoundException
{
return getNode( nodeId ).getProperty( cacheLoader, NODE_CACHE_SIZE_LISTENER, propertyKeyId );
}
public Iterator<DefinedProperty> relationshipGetProperties( long relationshipId,
CacheLoader<Iterator<DefinedProperty>> cacheLoader ) throws EntityNotFoundException
{
return getRelationship( relationshipId ).getProperties( cacheLoader,
RELATIONSHIP_CACHE_SIZE_LISTENER );
}
public Property relationshipGetProperty( long relationshipId, int propertyKeyId,
CacheLoader<Iterator<DefinedProperty>> cacheLoader ) throws EntityNotFoundException
{
return getRelationship( relationshipId ).getProperty( cacheLoader, RELATIONSHIP_CACHE_SIZE_LISTENER,
propertyKeyId );
}
public Iterator<DefinedProperty> graphGetProperties( CacheLoader<Iterator<DefinedProperty>> cacheLoader )
{
return graphProperties.evaluate().getProperties( cacheLoader, NO_UPDATES );
}
public PrimitiveLongIterator graphGetPropertyKeys( CacheLoader<Iterator<DefinedProperty>> cacheLoader )
{
return graphProperties.evaluate().getPropertyKeys( cacheLoader, NO_UPDATES );
}
public Property graphGetProperty( CacheLoader<Iterator<DefinedProperty>> cacheLoader,
int propertyKeyId )
{
return graphProperties.evaluate().getProperty( cacheLoader, NO_UPDATES, propertyKeyId );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_PersistenceCache.java
|
1,069
|
public class UniquenessConstraintValidationIT extends KernelIntegrationTest
{
@Test
public void shouldEnforceUniquenessConstraintOnSetProperty() throws Exception
{
// given
constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
Node node = db.createNode( label( "Label1" ) );
try
{
node.setProperty( "key1", "value1" );
fail( "should have thrown exception" );
}
// then
catch ( ConstraintViolationException e )
{
assertThat( e.getMessage(), containsString( "\"key1\"=[value1]" ) );
}
}
@Test
public void shouldEnforceUniquenessConstraintOnAddLabel() throws Exception
{
// given
constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
Node node = db.createNode();
node.setProperty( "key1", "value1" );
try
{
node.addLabel( label( "Label1" ) );
fail( "should have thrown exception" );
}
// then
catch ( ConstraintViolationException e )
{
assertThat( e.getMessage(), containsString( "\"key1\"=[value1]" ) );
}
}
@Test
public void shouldAllowRemoveAndAddConflictingDataInOneTransaction_DeleteNode() throws Exception
{
// given
Node node = constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
node.delete();
db.createNode( label( "Label1" ) ).setProperty( "key1", "value1" );
commit();
}
@Test
public void shouldAllowRemoveAndAddConflictingDataInOneTransaction_RemoveLabel() throws Exception
{
// given
Node node = constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
node.removeLabel( label( "Label1" ) );
db.createNode( label( "Label1" ) ).setProperty( "key1", "value1" );
commit();
}
@Test
public void shouldAllowRemoveAndAddConflictingDataInOneTransaction_RemoveProperty() throws Exception
{
// given
Node node = constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
node.removeProperty( "key1" );
db.createNode( label( "Label1" ) ).setProperty( "key1", "value1" );
commit();
}
@Test
public void shouldAllowRemoveAndAddConflictingDataInOneTransaction_ChangeProperty() throws Exception
{
// given
Node node = constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
node.setProperty( "key1", "value2" );
db.createNode( label( "Label1" ) ).setProperty( "key1", "value1" );
commit();
}
@Test
public void shouldPreventConflictingDataInSameTransaction() throws Exception
{
// given
constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
db.createNode( label( "Label1" ) ).setProperty( "key1", "value2" );
try
{
db.createNode( label( "Label1" ) ).setProperty( "key1", "value2" );
fail( "expected exception" );
}
// then
catch ( ConstraintViolationException e )
{
assertThat( e.getMessage(), containsString( "\"key1\"=[value2]" ) );
}
}
@Test
public void shouldAllowNoopPropertyUpdate() throws KernelException
{
// given
Node node = constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
node.setProperty( "key1", "value1" );
// then should not throw exception
}
@Test
public void shouldAllowNoopLabelUpdate() throws KernelException
{
// given
Node node = constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
node.addLabel( label( "Label1" ) );
// then should not throw exception
}
@Test
public void shouldAllowCreationOfNonConflictingData() throws Exception
{
// given
constrainedNode( "Label1", "key1", "value1" );
dataWriteOperationsInNewTransaction();
// when
db.createNode().setProperty( "key1", "value1" );
db.createNode( label( "Label2" ) ).setProperty( "key1", "value1" );
db.createNode( label( "Label1" ) ).setProperty( "key1", "value2" );
db.createNode( label( "Label1" ) ).setProperty( "key2", "value1" );
commit();
// then
dataWriteOperationsInNewTransaction();
assertEquals( "number of nodes", 5, count( GlobalGraphOperations.at( db ).getAllNodes() ) );
rollback();
}
@Test
public void unrelatedNodesWithSamePropertyShouldNotInterfereWithUniquenessCheck() throws Exception
{
// given
createConstraint( "Person", "id" );
Node ourNode;
{
dataWriteOperationsInNewTransaction();
ourNode = db.createNode( label( "Person" ) );
ourNode.setProperty( "id", 1 );
db.createNode( label( "Item" ) ).setProperty( "id", 2 );
commit();
}
DataWriteOperations statement = dataWriteOperationsInNewTransaction();
IndexDescriptor idx = statement.uniqueIndexGetForLabelAndPropertyKey( statement
.labelGetForName( "Person" ), statement.propertyKeyGetForName( "id" ) );
// when
db.createNode( label( "Item" ) ).setProperty( "id", 2 );
// then I should find the original node
assertThat( statement.nodeGetUniqueFromIndexLookup( idx, 1 ), equalTo( ourNode.getId() ));
}
@Test
public void addingUniqueNodeWithUnrelatedValueShouldNotAffectLookup() throws Exception
{
// given
createConstraint( "Person", "id" );
Node ourNode;
{
dataWriteOperationsInNewTransaction();
ourNode = db.createNode( label( "Person" ) );
ourNode.setProperty( "id", 1 );
commit();
}
DataWriteOperations statement = dataWriteOperationsInNewTransaction();
IndexDescriptor idx = statement.uniqueIndexGetForLabelAndPropertyKey( statement
.labelGetForName( "Person" ), statement.propertyKeyGetForName( "id" ) );
// when
db.createNode( label( "Person" ) ).setProperty( "id", 2 );
// then I should find the original node
assertThat( statement.nodeGetUniqueFromIndexLookup( idx, 1 ), equalTo( ourNode.getId() ));
}
private Node constrainedNode( String labelName, String propertyKey, Object propertyValue )
throws KernelException
{
Node node;
{
dataWriteOperationsInNewTransaction();
node = db.createNode( label( labelName ) );
node.setProperty( propertyKey, propertyValue );
commit();
}
createConstraint( labelName, propertyKey );
return node;
}
private void createConstraint( String label, String propertyKey ) throws KernelException
{
int labelId, propertyKeyId;
{
DataWriteOperations statement = dataWriteOperationsInNewTransaction();
labelId = statement.labelGetOrCreateForName( label );
propertyKeyId = statement.propertyKeyGetOrCreateForName( propertyKey );
commit();
}
{
SchemaWriteOperations statement = schemaWriteOperationsInNewTransaction();
statement.uniquenessConstraintCreate( labelId, propertyKeyId );
commit();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_integrationtest_UniquenessConstraintValidationIT.java
|
1,070
|
{
@Override
public void newSize( Primitive entity, int size )
{
nodeCache.updateSize( (NodeImpl) entity, size );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_PersistenceCache.java
|
1,071
|
final class HitCounter
{
private final Counter hits, miss;
public HitCounter( )
{
this.hits = new Counter();
this.miss = new Counter();
}
public <T> T count( T item )
{
( ( item == null ) ? miss : hits ).inc();
return item;
}
public long getHitsCount()
{
return hits.count();
}
public long getMissCount()
{
return miss.count();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_HitCounter.java
|
1,072
|
public class HighPerformanceCacheSettings
{
public static final Setting<Long> node_cache_size = setting( "node_cache_size", Settings.BYTES, NO_DEFAULT );
public static final Setting<Long> relationship_cache_size =
setting( "relationship_cache_size", Settings.BYTES,NO_DEFAULT );
@SuppressWarnings("unchecked")
public static final Setting<Float> node_cache_array_fraction =
setting( "node_cache_array_fraction", FLOAT, "1.0",range( 1.0f, 10.0f ) );
@SuppressWarnings("unchecked")
public static final Setting<Float> relationship_cache_array_fraction =
setting("relationship_cache_array_fraction", FLOAT, "1.0", range( 1.0f, 10.0f ) );
public static final Setting<Long> log_interval = setting( "high_performance_cache_min_log_interval", DURATION, "60s" );
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_impl_cache_HighPerformanceCacheSettings.java
|
1,073
|
@Service.Implementation(CacheProvider.class)
public class HighPerformanceCacheProvider extends CacheProvider
{
public static final String NAME = "hpc";
public HighPerformanceCacheProvider()
{
super( NAME, "High-Performance Cache" );
}
@Override
public Cache<NodeImpl> newNodeCache( StringLogger logger, Config config, Monitors monitors )
{
Long node = config.get( HighPerformanceCacheSettings.node_cache_size );
if ( node == null )
{
node = Runtime.getRuntime().maxMemory() / 4;
}
Long rel = config.get( HighPerformanceCacheSettings.relationship_cache_size );
if ( rel == null )
{
rel = Runtime.getRuntime().maxMemory() / 4;
}
checkMemToUse( logger, node, rel, Runtime.getRuntime().maxMemory() );
return new HighPerformanceCache<>( node, config.get( HighPerformanceCacheSettings.node_cache_array_fraction ),
config.get( HighPerformanceCacheSettings.log_interval ),
NODE_CACHE_NAME, logger, monitors.newMonitor( HighPerformanceCache.Monitor.class ) );
}
@Override
public Cache<RelationshipImpl> newRelationshipCache( StringLogger logger, Config config, Monitors monitors )
{
Long node = config.get( HighPerformanceCacheSettings.node_cache_size );
if ( node == null )
{
node = Runtime.getRuntime().maxMemory() / 4;
}
Long rel = config.get( HighPerformanceCacheSettings.relationship_cache_size );
if ( rel == null )
{
rel = Runtime.getRuntime().maxMemory() / 4;
}
checkMemToUse( logger, node, rel, Runtime.getRuntime().maxMemory() );
return new HighPerformanceCache<>( rel, config.get( HighPerformanceCacheSettings
.relationship_cache_array_fraction ), config.get( HighPerformanceCacheSettings.log_interval ),
RELATIONSHIP_CACHE_NAME, logger, monitors.newMonitor( HighPerformanceCache.Monitor.class ) );
}
// TODO: Move into validation method of config setting?
@SuppressWarnings("boxing")
private void checkMemToUse( StringLogger logger, long node, long rel, long available )
{
long advicedMax = available / 2;
long total = 0;
node = Math.max( HighPerformanceCache.MIN_SIZE, node );
total += node;
rel = Math.max( HighPerformanceCache.MIN_SIZE, rel );
total += rel;
if ( total > available )
{
throw new IllegalArgumentException(
String.format( "Configured cache memory limits (node=%s, relationship=%s, " +
"total=%s) exceeds available heap space (%s)",
node, rel, total, available ) );
}
if ( total > advicedMax )
{
logger.logMessage( String.format( "Configured cache memory limits(node=%s, relationship=%s, " +
"total=%s) exceeds recommended limit (%s)",
node, rel, total, advicedMax ) );
}
}
@Override
public Class getSettingsClass()
{
return HighPerformanceCacheSettings.class;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_impl_cache_HighPerformanceCacheProvider.java
|
1,074
|
public class HighPerformanceCache<E extends EntityWithSizeObject> implements Cache<E>, DiagnosticsProvider
{
public interface Monitor {
void purged( long sizeBefore, long sizeAfter, int numberOfEntitiesPurged );
}
public static final long MIN_SIZE = 1;
private final AtomicReferenceArray<E> cache;
private final long maxSize;
private long closeToMaxSize;
private long purgeStopSize;
private long purgeHandoffSize;
private final AtomicLong currentSize = new AtomicLong( 0 );
private final long minLogInterval;
private final String name;
private final AtomicLong highestIdSet = new AtomicLong();
// non thread safe, only ~statistics (atomic update will affect performance)
private long hitCount = 0;
private long missCount = 0;
private long totalPuts = 0;
private long collisions = 0;
private long purgeCount = 0;
private final StringLogger logger;
private final AtomicBoolean purging = new AtomicBoolean();
private final AtomicInteger avertedPurgeWaits = new AtomicInteger();
private final AtomicInteger forcedPurgeWaits = new AtomicInteger();
private long purgeTime;
private Monitor monitor;
HighPerformanceCache( AtomicReferenceArray<E> cache )
{
this.cache = cache;
this.minLogInterval = Long.MAX_VALUE;
this.maxSize = 1024l*1024*1024;
this.name = "test cache";
this.logger = null;
calculateSizes();
}
public HighPerformanceCache( long maxSizeInBytes, float arrayHeapFraction, long minLogInterval, String name,
StringLogger logger, Monitor monitor )
{
if ( logger == null )
{
throw new IllegalArgumentException( "Null logger" );
}
this.minLogInterval = minLogInterval;
if ( arrayHeapFraction < 1 || arrayHeapFraction > 10 )
{
throw new IllegalArgumentException(
"The heap fraction used by the High-Performance Cache must be between 1% and 10%, not "
+ arrayHeapFraction + "%" );
}
long memToUse = (long)(((double)arrayHeapFraction) * Runtime.getRuntime().maxMemory() / 100);
long maxElementCount = (int) ( memToUse / 8 );
if ( memToUse > Integer.MAX_VALUE )
{
maxElementCount = Integer.MAX_VALUE;
}
if ( maxSizeInBytes < MIN_SIZE )
{
throw new IllegalArgumentException( "Max size can not be " + maxSizeInBytes );
}
this.cache = new AtomicReferenceArray<>( (int) maxElementCount );
this.maxSize = maxSizeInBytes;
this.name = name == null ? super.toString() : name;
this.logger = logger;
this.monitor = monitor;
calculateSizes();
}
private void calculateSizes()
{
this.closeToMaxSize = (long)(maxSize * 0.95d);
this.purgeStopSize = (long)(maxSize * 0.90d);
this.purgeHandoffSize = (long)(maxSize * 1.05d);
}
protected int getPosition( EntityWithSizeObject obj )
{
return (int) ( obj.getId() % cache.length() );
}
private int getPosition( long id )
{
return (int) ( id % cache.length() );
}
private long putTimeStamp = 0;
@Override
public E put( E obj )
{
long time = System.currentTimeMillis();
if ( time - putTimeStamp > minLogInterval )
{
putTimeStamp = time;
printStatistics();
}
int pos = getPosition( obj );
E oldObj = cache.get( pos );
while ( oldObj != obj )
{
if ( oldObj != null && oldObj.getId() == obj.getId() )
{ // There's an existing element representing the same entity at this position, return the existing
return oldObj;
}
// Either we're trying to put a new element that doesn't exist at this location an element
// that doesn't represent the same entity that is already here. In any case, overwrite what's there
int objectSize = obj.sizeOfObjectInBytesIncludingOverhead();
if ( cache.compareAndSet( pos, oldObj, obj ) )
{
setHighest( pos );
int oldObjSize = 0;
if ( oldObj != null )
{
oldObjSize = oldObj.getRegisteredSize();
}
long size = currentSize.addAndGet( objectSize - oldObjSize );
obj.setRegisteredSize( objectSize );
if ( oldObj != null )
{
collisions++;
}
totalPuts++;
if ( size > closeToMaxSize )
{
purgeFrom( pos );
}
// We successfully updated the cache with our new element, break and have it returned.
break;
}
// Someone else put an element here right in front of our very nose
// Get the element that was just set and have another go
oldObj = cache.get( pos );
}
return obj;
}
/**
* Updates the highest set id if the given id is higher than any previously registered id.
* Helps {@link #clear()} performance wise so that only the used part of the array is cleared.
* @param id the id just put into the cache.
*/
private void setHighest( long id )
{
while ( true )
{
long highest = highestIdSet.get();
if ( id > highest )
{
if ( highestIdSet.compareAndSet( highest, id ) )
{
break;
}
}
else
{
break;
}
}
}
@Override
public E remove( long id )
{
int pos = getPosition( id );
E obj = cache.get(pos);
if ( obj != null )
{
if ( cache.compareAndSet( pos, obj, null ) )
{
currentSize.addAndGet( obj.getRegisteredSize() * -1 );
}
}
return obj;
}
@Override
public E get( long id )
{
int pos = getPosition( id );
E obj = cache.get( pos );
if ( obj != null && obj.getId() == id )
{
hitCount++;
return obj;
}
missCount++;
return null;
}
private long lastPurgeLogTimestamp = 0;
private void purgeFrom( int pos )
{
long myCurrentSize = currentSize.get();
if ( myCurrentSize <= closeToMaxSize )
{
return;
}
// if we're within 0.95 < size < 1.05 and someone else is purging then just return and let
// the other one purge for us. if we're above 1.05 then wait for the purger to finish before returning.
if ( purging.compareAndSet( false, true ) )
{ // We're going to do the purge
try
{
doPurge( pos );
}
finally
{
purging.set( false );
}
}
else
{ // Someone else is currently doing a purge
if ( myCurrentSize < purgeHandoffSize )
{ // It's safe to just return and let the purger do its thing
avertedPurgeWaits.incrementAndGet();
}
else
{
// Wait for the current purge to complete. Some threads might slip through here before
// A thread just entering doPurge above, but that's fine
forcedPurgeWaits.incrementAndGet();
waitForCurrentPurgeToComplete();
}
}
}
private synchronized void waitForCurrentPurgeToComplete()
{
// Just a nice way of saying "wait for the monitor on this object currently held by the thread doing a purge"
}
private synchronized void doPurge( int pos )
{
long sizeBefore = currentSize.get();
if ( sizeBefore <= closeToMaxSize )
{
return;
}
long startTime = System.currentTimeMillis();
purgeCount++;
int numberOfEntitiesPurged = 0;
try
{
int index = 1;
do
{
int minusPos = pos - index;
if ( minusPos >= 0 )
{
if ( remove( minusPos ) != null )
{
numberOfEntitiesPurged++;
if ( currentSize.get() <= purgeStopSize )
{
return;
}
}
}
int plusPos = pos + index;
if ( plusPos < cache.length() )
{
if ( remove( plusPos ) != null )
{
numberOfEntitiesPurged++;
if ( currentSize.get() <= purgeStopSize )
{
return;
}
}
}
index++;
}
while ( ( pos - index ) >= 0 || ( pos + index ) < cache.length() );
// current object larger than max size, clear it
remove( pos );
}
finally
{
long timestamp = System.currentTimeMillis();
purgeTime += (timestamp-startTime);
if ( timestamp - lastPurgeLogTimestamp > minLogInterval )
{
lastPurgeLogTimestamp = timestamp;
long sizeAfter = currentSize.get();
String sizeBeforeStr = getSize( numberOfEntitiesPurged );
String sizeAfterStr = getSize( sizeAfter );
String diffStr = getSize( numberOfEntitiesPurged - sizeAfter );
String missPercentage = ((float) missCount / (float) (hitCount+missCount) * 100.0f) + "%";
String colPercentage = ((float) collisions / (float) totalPuts * 100.0f) + "%";
logger.logMessage( name + " purge (nr " + purgeCount + ") " + sizeBeforeStr + " -> " + sizeAfterStr + " (" + diffStr +
") " + missPercentage + " misses, " + colPercentage + " collisions (" + collisions + ").", true );
printAccurateStatistics();
}
monitor.purged( sizeBefore, currentSize.get(), numberOfEntitiesPurged );
}
}
private void printAccurateStatistics()
{
int elementCount = 0;
long actualSize = 0;
long registeredSize = 0;
for ( int i = 0; i < cache.length(); i++ )
{
EntityWithSizeObject obj = cache.get( i );
if ( obj != null )
{
elementCount++;
actualSize += obj.sizeOfObjectInBytesIncludingOverhead();
registeredSize += obj.getRegisteredSize();
}
}
logger.logMessage( name + " purge (nr " + purgeCount + "): elementCount:" + elementCount + " and sizes actual:" + getSize( actualSize ) +
", perceived:" + getSize( currentSize.get() ) + " (diff:" + getSize(currentSize.get() - actualSize) + "), registered:" + getSize( registeredSize ), true );
}
@Override
public void printStatistics()
{
logStatistics( logger );
// printAccurateStatistics();
}
@Override
public String getDiagnosticsIdentifier()
{
return getName();
}
@Override
public void acceptDiagnosticsVisitor( Object visitor )
{
// accept no visitors.
}
@Override
public void dump( DiagnosticsPhase phase, StringLogger log )
{
if (phase.isExplicitlyRequested())
{
logStatistics(log);
}
}
private void logStatistics( StringLogger log )
{
log.debug( this.toString() );
}
@Override
public String toString()
{
String currentSizeStr = getSize( currentSize.get() );
String missPercentage = ((float) missCount / (float) (hitCount+missCount) * 100.0f) + "%";
String colPercentage = ((float) collisions / (float) totalPuts * 100.0f) + "%";
return name + " array:" + cache.length() + " purge:" + purgeCount + " size:" + currentSizeStr +
" misses:" + missPercentage + " collisions:" + colPercentage + " (" + collisions + ") av.purge waits:" +
avertedPurgeWaits.get() + " purge waits:" + forcedPurgeWaits.get() + " avg. purge time:" + (purgeCount > 0 ? (purgeTime/purgeCount) + "ms" : "N/A");
}
private String getSize( long size )
{
if ( size > ( 1024 * 1024 * 1024 ) )
{
float value = size / 1024.0f / 1024.0f / 1024.0f;
return value + "Gb";
}
if ( size > ( 1024 * 1024 ) )
{
float value = size / 1024.0f / 1024.0f;
return value + "Mb";
}
if ( size > 1024 )
{
float value = size / 1024.0f / 1024.0f;
return value + "kb";
}
return size + "b";
}
@Override
public void clear()
{
for ( int i = 0; i <= highestIdSet.get() /*cache.length()*/; i++ )
{
cache.set( i, null );
}
currentSize.set( 0 );
highestIdSet.set( 0 );
}
@Override
public void putAll( Collection<E> objects )
{
for ( E obj : objects )
{
put( obj );
}
}
@Override
public String getName()
{
return name;
}
@Override
public long size()
{
return currentSize.get();
}
@Override
public long hitCount()
{
return hitCount;
}
@Override
public long missCount()
{
return missCount;
}
@Override
public void updateSize( E obj, int newSize )
{
int pos = getPosition( obj );
E existingObj = cache.get( pos );
if ( existingObj != obj )
{
return;
}
long size = currentSize.addAndGet( newSize - existingObj.getRegisteredSize() );
obj.setRegisteredSize( newSize );
if ( size > closeToMaxSize )
{
purgeFrom( pos );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_impl_cache_HighPerformanceCache.java
|
1,075
|
private static class Page<E>
{
volatile boolean flag = true;
volatile E value;
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !( obj instanceof Page ) )
{
return false;
}
Page<?> other = (Page) obj;
if ( value == null )
{
return other.value == null;
}
return value.equals( other.value );
}
@Override
public int hashCode()
{
return value == null ? 0 : value.hashCode();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_ClockCache.java
|
1,076
|
public class ClockCache<K, V>
{
private final Queue<Page<V>> clock = new ConcurrentLinkedQueue<Page<V>>();
private final Map<K, Page<V>> cache = new ConcurrentHashMap<K, Page<V>>();
private final int maxSize;
private final AtomicInteger currentSize = new AtomicInteger( 0 );
private final String name;
public ClockCache( String name, int size )
{
if ( name == null )
{
throw new IllegalArgumentException( "name cannot be null" );
}
if ( size <= 0 )
{
throw new IllegalArgumentException( size + " is not > 0" );
}
this.name = name;
this.maxSize = size;
}
public void put( K key, V value )
{
if ( key == null )
{
throw new IllegalArgumentException( "null key not allowed" );
}
if ( value == null )
{
throw new IllegalArgumentException( "null value not allowed" );
}
Page<V> theValue = cache.get( key );
if ( theValue == null )
{
theValue = new Page<V>();
cache.put( key, theValue );
clock.offer( theValue );
}
if ( theValue.value == null )
{
currentSize.incrementAndGet();
}
theValue.flag = true;
theValue.value = value;
checkSize();
}
public V get( K key )
{
if ( key == null )
{
throw new IllegalArgumentException( "cannot get null key" );
}
Page<V> theElement = cache.get( key );
if ( theElement == null || theElement.value == null )
{
return null;
}
theElement.flag = true;
return theElement.value;
}
private void checkSize()
{
while ( currentSize.get() > maxSize )
{
evict();
}
}
private void evict()
{
Page<V> theElement = null;
while ( ( theElement = clock.poll() ) != null )
{
try
{
if ( theElement.flag )
{
theElement.flag = false;
}
else
{
V valueCleaned = theElement.value;
elementCleaned( valueCleaned );
theElement.value = null;
currentSize.decrementAndGet();
return;
}
}
finally
{
clock.offer( theElement );
}
}
}
protected void elementCleaned( V element )
{
// to be overridden as required
}
public synchronized Set<K> keySet()
{
return cache.keySet();
}
public Collection<V> values()
{
Set<V> toReturn = new HashSet<V>();
for ( Page<V> page : cache.values() )
{
if ( page.value != null )
{
toReturn.add( page.value );
}
}
return toReturn;
}
public synchronized Set<Map.Entry<K, V>> entrySet()
{
Map<K, V> temp = new HashMap<K, V>();
for ( Map.Entry<K, Page<V>> entry : cache.entrySet() )
{
if ( entry.getValue().value != null )
{
temp.put( entry.getKey(), entry.getValue().value );
}
}
return temp.entrySet();
}
public V remove( K key )
{
if ( key == null )
{
throw new IllegalArgumentException( "cannot remove null key" );
}
Page<V> toRemove = cache.remove( key );
if ( toRemove == null || toRemove.value == null )
{
return null;
}
currentSize.decrementAndGet();
V toReturn = toRemove.value;
toRemove.value = null;
toRemove.flag = false;
return toReturn;
}
public String getName()
{
return name;
}
public void clear()
{
cache.clear();
clock.clear();
currentSize.set( 0 );
}
public int size()
{
return currentSize.get();
}
private static class Page<E>
{
volatile boolean flag = true;
volatile E value;
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !( obj instanceof Page ) )
{
return false;
}
Page<?> other = (Page) obj;
if ( value == null )
{
return other.value == null;
}
return value.equals( other.value );
}
@Override
public int hashCode()
{
return value == null ? 0 : value.hashCode();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_ClockCache.java
|
1,077
|
public class CacheTypesIT extends AbstractNeo4jTestCase
{
private GraphDatabaseAPI newDb( String cacheType )
{
return (GraphDatabaseAPI) new TestGraphDatabaseFactory().newImpermanentDatabaseBuilder().setConfig( GraphDatabaseSettings.cache_type.name(), cacheType ).newGraphDatabase();
}
@Test
public void testDefaultCache()
{
GraphDatabaseAPI db = newDb( null );
assertEquals( SoftCacheProvider.NAME, nodeManager( db ).getCacheType().getName() );
db.shutdown();
}
@Test
public void testWeakRefCache()
{
GraphDatabaseAPI db = newDb( WeakCacheProvider.NAME );
assertEquals( WeakCacheProvider.NAME, nodeManager( db ).getCacheType().getName() );
db.shutdown();
}
@Test
public void testSoftRefCache()
{
GraphDatabaseAPI db = newDb( SoftCacheProvider.NAME );
assertEquals( SoftCacheProvider.NAME, nodeManager( db ).getCacheType().getName() );
db.shutdown();
}
@Test
public void testNoCache()
{
GraphDatabaseAPI db = newDb( NoCacheProvider.NAME );
assertEquals( NoCacheProvider.NAME, nodeManager( db ).getCacheType().getName() );
db.shutdown();
}
@Test
public void testStrongCache()
{
GraphDatabaseAPI db = newDb( StrongCacheProvider.NAME );
assertEquals( StrongCacheProvider.NAME, nodeManager( db ).getCacheType().getName() );
db.shutdown();
}
@Test
public void testInvalidCache()
{
// invalid cache type should fail
GraphDatabaseAPI db = null;
try
{
db = newDb( "whatever" );
fail( "Wrong cache type should not be allowed" );
}
catch( Exception e )
{
// Ok
}
}
private NodeManager nodeManager( GraphDatabaseAPI db )
{
return db.getDependencyResolver().resolveDependency( NodeManager.class );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_cache_CacheTypesIT.java
|
1,078
|
public abstract class CacheProvider extends Service implements HasSettings
{
protected static final String NODE_CACHE_NAME = "NodeCache";
protected static final String RELATIONSHIP_CACHE_NAME = "RelationshipCache";
private final String name;
private final String description;
protected CacheProvider( String key, String description )
{
super( key );
this.name = key;
this.description = description;
}
public abstract Cache<NodeImpl> newNodeCache( StringLogger logger, Config config, Monitors monitors );
public abstract Cache<RelationshipImpl> newRelationshipCache( StringLogger logger, Config config,
Monitors monitors );
public String getName()
{
return name;
}
public String getDescription()
{
return description;
}
@Override
public Class getSettingsClass()
{
return null;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_CacheProvider.java
|
1,079
|
public class BridgingCacheAccess implements CacheAccessBackDoor
{
private final NodeManager nodeManager;
private final SchemaCache schemaCache;
private final SchemaState schemaState;
private final PersistenceCache persistenceCache;
public BridgingCacheAccess( NodeManager nodeManager, SchemaCache schemaCache, SchemaState schemaState,
PersistenceCache persistenceCache )
{
this.nodeManager = nodeManager;
this.schemaCache = schemaCache;
this.schemaState = schemaState;
this.persistenceCache = persistenceCache;
}
@Override
public void removeNodeFromCache( long nodeId )
{
nodeManager.removeNodeFromCache( nodeId );
persistenceCache.evictNode( nodeId );
}
@Override
public void removeRelationshipFromCache( long id )
{
nodeManager.removeRelationshipFromCache( id );
}
@Override
public void removeRelationshipTypeFromCache( int id )
{
nodeManager.removeRelationshipTypeFromCache( id );
}
@Override
public void removeGraphPropertiesFromCache()
{
nodeManager.removeGraphPropertiesFromCache();
}
@Override
public void addSchemaRule( SchemaRule rule )
{
schemaCache.addSchemaRule( rule );
}
@Override
public void removeSchemaRuleFromCache( long id )
{
schemaCache.removeSchemaRule( id );
schemaState.clear();
}
@Override
public void addRelationshipTypeToken( Token type )
{
nodeManager.addRelationshipTypeToken( type );
}
@Override
public void addLabelToken( Token label )
{
nodeManager.addLabelToken( label );
}
@Override
public void addPropertyKeyToken( Token propertyKey )
{
nodeManager.addPropertyKeyToken( propertyKey );
}
@Override
public void patchDeletedRelationshipNodes( long relId, long firstNodeId, long firstNodeNextRelId,
long secondNodeId, long secondNodeNextRelId )
{
nodeManager.patchDeletedRelationshipNodes( relId, firstNodeId, firstNodeNextRelId, secondNodeId,
secondNodeNextRelId );
}
@Override
public void applyLabelUpdates( Collection<NodeLabelUpdate> labelUpdates )
{
persistenceCache.apply( labelUpdates );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_BridgingCacheAccess.java
|
1,080
|
public class AutoLoadingCache<E extends EntityWithSizeObject> implements Cache<E>
{
private final Cache<E> actual;
private final Loader<E> loader;
public interface Loader<E>
{
/**
* Load the entity, or null if no entity exists.
*/
E loadById( long id );
}
public AutoLoadingCache( Cache<E> actual, Loader<E> loader )
{
this.loader = loader;
this.actual = actual;
}
@Override
public String getName()
{
return actual.getName();
}
@Override
public E put( E value )
{
return actual.put( value );
}
@Override
public E remove( long key )
{
return actual.remove( key );
}
@Override
public E get( long key )
{
E result = actual.get( key );
if ( result != null )
{
return result;
}
/* MP/JS | A note about locking:
* Previously this block of code below was wrapped in a lock, striped on the key where there were
* an arbitrary number of stripes. The lock would prevent multiple threads to load the same entity
* at the same time, or more specifically prevent multiple threads to put two versions of the same entity
* into the cache at the same time.
* So without the lock there can be thread T1 loading an entity into an entity object E1 at the same time
* as thread T2 loading the same entity into another entity object E2. E1 and E2 represent the same entity E.
* This race would have one of the threads win and have its version put in the cache last,
* the other overwritten. Consider the similarities of that race with cache eviction coming into play,
* where T1 loads E1 and puts it in cache and returns it. After that and before T2 comes in
* and wants that same entity there has been an eviction of E1. T2 would then load E2 and
* put into cache resulting in two "live" versions of E as well. It would seem that having the lock would
* reduce the chance for this happening, but not prevent it.
* There doesn't seem to be any reason as to why having two versions of the same entity object would cause
* problems (keep in mind that there will only be one version in the cache). Also, the overhead of
* locking grows with the number of threads/cores to eventually become a bottle neck.
*
* Based on that the locking was removed. */
result = loader.loadById( key );
if ( result == null )
{
return null;
}
return actual.put( result );
}
public E getIfCached( long key )
{
return actual.get( key );
}
@Override
public void clear()
{
actual.clear();
}
@Override
public long size()
{
return actual.size();
}
@Override
public void putAll( Collection values )
{
actual.putAll( values );
}
@Override
public long hitCount()
{
return actual.hitCount();
}
@Override
public long missCount()
{
return actual.missCount();
}
@Override
public void updateSize( E entity, int newSize )
{
actual.updateSize( entity, newSize );
}
@Override
public void printStatistics()
{
actual.printStatistics();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_cache_AutoLoadingCache.java
|
1,081
|
public class TestReferenceDangling
{
@Rule
public ImpermanentDatabaseRule dbRule = new ImpermanentDatabaseRule( );
@Test
public void testPropertyStoreReferencesOnRead() throws Throwable
{
// Given
GraphDatabaseAPI db = dbRule.getGraphDatabaseAPI();
// and Given the cache contains a LazyProperty
long nId = ensurePropertyIsCachedLazyProperty( db, "some" );
// When
restartNeoDataSource( db );
// Then reading the property is still possible
try( Transaction tx = db.beginTx() )
{
db.getNodeById( nId ).getProperty( "some" );
tx.success();
}
}
@Test
public void testPropertyStoreReferencesOnWrite() throws Throwable
{
// Given
GraphDatabaseAPI db = dbRule.getGraphDatabaseAPI();
// and Given the cache contains a LazyProperty
long nId = ensurePropertyIsCachedLazyProperty( db, "some" );
// When
restartNeoDataSource( db );
// Then it should still be possible to manipulate properties on this node
try( Transaction tx = db.beginTx() )
{
db.getNodeById( nId ).setProperty( "some", new long[]{-1,2,2,3,4,5,5} );
tx.success();
}
}
private long ensurePropertyIsCachedLazyProperty( GraphDatabaseAPI slave, String key )
{
long nId;
try( Transaction tx = slave.beginTx() )
{
Node n = slave.createNode();
nId = n.getId();
n.setProperty( key, new long[]{-1,2,2,3,4,5,5} );
tx.success();
}
slave.getDependencyResolver().resolveDependency( NodeManager.class ).clearCache();
try( Transaction tx = slave.beginTx() )
{
slave.getNodeById( nId ).hasProperty( key );
tx.success();
}
return nId;
}
private void restartNeoDataSource( GraphDatabaseAPI slave ) throws Throwable
{
slave.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).getXaDataSource(
NeoStoreXaDataSource.DEFAULT_DATA_SOURCE_NAME ).stop();
slave.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).getXaDataSource(
NeoStoreXaDataSource.DEFAULT_DATA_SOURCE_NAME ).start();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_store_TestReferenceDangling.java
|
1,082
|
public class SchemaCacheTest
{
final SchemaRule hans = newIndexRule( 1, 0, 5 );
final SchemaRule witch = newIndexRule( 2, 3, 6 );
final SchemaRule gretel = newIndexRule( 3, 0, 7 );
@Test
public void should_construct_schema_cache()
{
// GIVEN
Collection<SchemaRule> rules = asList( hans, witch, gretel );
SchemaCache cache = new SchemaCache( rules );
// THEN
assertEquals( asSet( hans, gretel ), asSet( cache.schemaRulesForLabel( 0 ) ) );
assertEquals( asSet( witch ), asSet( cache.schemaRulesForLabel( 3 ) ) );
assertEquals( asSet( rules ), asSet( cache.schemaRules() ) );
}
@Test
public void should_add_schema_rules_to_a_label() {
// GIVEN
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
// WHEN
cache.addSchemaRule( hans );
cache.addSchemaRule( gretel );
// THEN
assertEquals( asSet( hans, gretel ), asSet( cache.schemaRulesForLabel( 0 ) ) );
}
@Test
public void should_to_retrieve_all_schema_rules()
{
// GIVEN
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
// WHEN
cache.addSchemaRule( hans );
cache.addSchemaRule( gretel );
// THEN
assertEquals( asSet( hans, gretel ), asSet( cache.schemaRules() ) );
}
@Test
public void should_list_constraints()
{
// GIVEN
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
// WHEN
cache.addSchemaRule( uniquenessConstraintRule( 0l, 1, 2, 133l ) );
cache.addSchemaRule( uniquenessConstraintRule( 1l, 3, 4, 133l ) );
// THEN
assertEquals(
asSet( new UniquenessConstraint( 1, 2 ), new UniquenessConstraint( 3, 4 ) ),
asSet( cache.constraints() ) );
assertEquals(
asSet( new UniquenessConstraint( 1, 2 ) ),
asSet( cache.constraintsForLabel( 1 ) ) );
assertEquals(
asSet( new UniquenessConstraint( 1, 2 ) ),
asSet( cache.constraintsForLabelAndProperty( 1, 2 ) ) );
assertEquals(
asSet( ),
asSet( cache.constraintsForLabelAndProperty( 1, 3 ) ) );
}
@Test
public void should_remove_constraints()
{
// GIVEN
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
cache.addSchemaRule( uniquenessConstraintRule( 0l, 1, 2, 133l ) );
cache.addSchemaRule( uniquenessConstraintRule( 1l, 3, 4, 133l ) );
// WHEN
cache.removeSchemaRule( 0l );
// THEN
assertEquals(
asSet( new UniquenessConstraint( 3, 4 ) ),
asSet( cache.constraints() ) );
assertEquals(
asSet( ),
asSet( cache.constraintsForLabel( 1 )) );
assertEquals(
asSet( ),
asSet( cache.constraintsForLabelAndProperty( 1, 2 ) ) );
}
@Test
public void adding_constraints_should_be_idempotent() throws Exception
{
// given
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
cache.addSchemaRule( uniquenessConstraintRule( 0l, 1, 2, 133l ) );
// when
cache.addSchemaRule( uniquenessConstraintRule( 0l, 1, 2, 133l ) );
// then
assertEquals(
asList( new UniquenessConstraint( 1, 2 ) ),
IteratorUtil.asList( cache.constraints() ) );
}
@Test
public void shouldResolveIndexId() throws Exception
{
// Given
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
cache.addSchemaRule( newIndexRule( 1l, 1, 2 ) );
cache.addSchemaRule( newIndexRule( 2l, 1, 3 ) );
cache.addSchemaRule( newIndexRule( 3l, 2, 2 ) );
// When
long indexId = cache.indexId( new IndexDescriptor( 1, 3 ) );
// Then
assertThat(indexId, equalTo(2l));
}
@Test
public void shouldResolveIndexDescriptor() throws Exception
{
// Given
Collection<SchemaRule> rules = asList();
SchemaCache cache = new SchemaCache( rules );
cache.addSchemaRule( newIndexRule( 1l, 1, 2 ) );
cache.addSchemaRule( newIndexRule( 2l, 1, 3 ) );
cache.addSchemaRule( newIndexRule( 3l, 2, 2 ) );
// When
try
{
cache.indexDescriptor( 9, 9 );
fail( "Should have thrown exception saying there's no index descriptor for that label/property" );
}
catch ( SchemaRuleNotFoundException e )
{ // Good
}
IndexDescriptor descriptor = cache.indexDescriptor( 1, 3 );
// Then
assertEquals( 1, descriptor.getLabelId() );
assertEquals( 3, descriptor.getPropertyKeyId() );
}
private IndexRule newIndexRule( long id, int label, int propertyKey )
{
return IndexRule.indexRule( id, label, propertyKey, PROVIDER_DESCRIPTOR );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_store_SchemaCacheTest.java
|
1,083
|
private static class CommittedIndexDescriptor
{
private final IndexDescriptor descriptor;
private final long id;
public CommittedIndexDescriptor( int labelId, int propertyKey, long id )
{
this.descriptor = new IndexDescriptor( labelId, propertyKey );
this.id = id;
}
public IndexDescriptor getDescriptor()
{
return descriptor;
}
public long getId()
{
return id;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_SchemaCache.java
|
1,084
|
{
@Override
public boolean accept( UniquenessConstraint item )
{
return item.label() == label && item.propertyKeyId() == property;
}
}, constraints.iterator() );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_SchemaCache.java
|
1,085
|
{
@Override
public boolean accept( UniquenessConstraint item )
{
return item.label() == label;
}
}, constraints.iterator() );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_SchemaCache.java
|
1,086
|
{
@Override
protected Iterator<SchemaRule> createNestedIterator( Map<Long,SchemaRule> item )
{
return item.values().iterator();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_SchemaCache.java
|
1,087
|
public class SchemaCache
{
private final Map<Integer, Map<Long,SchemaRule>> rulesByLabelMap = new HashMap<>();
private final Map<Long, SchemaRule> ruleByIdMap = new HashMap<>();
private final Collection<UniquenessConstraint> constraints = new HashSet<>();
private final Map<Integer, Map<Integer, CommittedIndexDescriptor>> indexDescriptors = new HashMap<>();
public SchemaCache( Iterable<SchemaRule> initialRules )
{
splitUpInitialRules( initialRules );
}
private void splitUpInitialRules( Iterable<SchemaRule> initialRules )
{
for ( SchemaRule rule : initialRules )
{
addSchemaRule( rule );
}
}
private Map<Long,SchemaRule> getOrCreateSchemaRulesMapForLabel( int label )
{
Map<Long,SchemaRule> rulesForLabel = rulesByLabelMap.get( label );
if ( rulesForLabel == null )
{
rulesForLabel = new HashMap<>();
rulesByLabelMap.put( label, rulesForLabel );
}
return rulesForLabel;
}
public Iterable<SchemaRule> schemaRules()
{
return new NestingIterable<SchemaRule, Map<Long,SchemaRule>>( rulesByLabelMap.values() )
{
@Override
protected Iterator<SchemaRule> createNestedIterator( Map<Long,SchemaRule> item )
{
return item.values().iterator();
}
};
}
public Collection<SchemaRule> schemaRulesForLabel( int label )
{
Map<Long,SchemaRule> rulesForLabel = rulesByLabelMap.get( label );
return rulesForLabel != null ? unmodifiableCollection( rulesForLabel.values() ) :
Collections.<SchemaRule>emptyList();
}
public Iterator<UniquenessConstraint> constraints()
{
return constraints.iterator();
}
public Iterator<UniquenessConstraint> constraintsForLabel( final int label )
{
return filter( new Predicate<UniquenessConstraint>()
{
@Override
public boolean accept( UniquenessConstraint item )
{
return item.label() == label;
}
}, constraints.iterator() );
}
public Iterator<UniquenessConstraint> constraintsForLabelAndProperty( final int label, final int property )
{
return filter( new Predicate<UniquenessConstraint>()
{
@Override
public boolean accept( UniquenessConstraint item )
{
return item.label() == label && item.propertyKeyId() == property;
}
}, constraints.iterator() );
}
public void addSchemaRule( SchemaRule rule )
{
getOrCreateSchemaRulesMapForLabel( rule.getLabel() ).put( rule.getId(), rule );
ruleByIdMap.put( rule.getId(), rule );
// Note: If you start adding more unmarshalling of other types of things here,
// make this into a more generic thing rather than adding more branch statement.
if( rule instanceof UniquenessConstraintRule )
{
constraints.add( ruleToConstraint( (UniquenessConstraintRule) rule ) );
}
else if( rule instanceof IndexRule )
{
IndexRule indexRule = (IndexRule) rule;
Map<Integer, CommittedIndexDescriptor> byLabel = indexDescriptors.get( indexRule.getLabel() );
if ( byLabel == null )
{
indexDescriptors.put( indexRule.getLabel(), byLabel = new HashMap<>() );
}
byLabel.put( indexRule.getPropertyKey(), new CommittedIndexDescriptor( indexRule.getLabel(),
indexRule.getPropertyKey(), indexRule.getId() ) );
}
}
// We could have had this class extend IndexDescriptor instead. That way we could have gotten the id
// from an IndexDescriptor instance directly. The problem is that it would only work for index descriptors
// instantiated by a SchemaCache. Perhaps that is always the case. Anyways, doing it like that resulted
// in unit test failures regarding the schema cache, so this way (the wrapping way) is a more generic
// and stable way of doing it.
private static class CommittedIndexDescriptor
{
private final IndexDescriptor descriptor;
private final long id;
public CommittedIndexDescriptor( int labelId, int propertyKey, long id )
{
this.descriptor = new IndexDescriptor( labelId, propertyKey );
this.id = id;
}
public IndexDescriptor getDescriptor()
{
return descriptor;
}
public long getId()
{
return id;
}
}
public void removeSchemaRule( long id )
{
SchemaRule rule = ruleByIdMap.remove( id );
if ( rule == null )
{
return;
}
int labelId = rule.getLabel();
Map<Long, SchemaRule> rules = rulesByLabelMap.get( labelId );
if ( rules.remove( id ) != null && rules.isEmpty() )
{
rulesByLabelMap.remove( labelId );
}
if( rule instanceof UniquenessConstraintRule )
{
constraints.remove( ruleToConstraint( (UniquenessConstraintRule)rule ) );
}
else if( rule instanceof IndexRule )
{
IndexRule indexRule = (IndexRule) rule;
Map<Integer, CommittedIndexDescriptor> byLabel = indexDescriptors.get( indexRule.getLabel() );
byLabel.remove( indexRule.getPropertyKey() );
if ( byLabel.isEmpty() )
{
indexDescriptors.remove( indexRule.getLabel() );
}
}
}
public long indexId( IndexDescriptor index )
{
Map<Integer, CommittedIndexDescriptor> byLabel = indexDescriptors.get( index.getLabelId() );
if ( byLabel != null )
{
CommittedIndexDescriptor committed = byLabel.get( index.getPropertyKeyId() );
if ( committed != null )
{
return committed.getId();
}
}
throw new IllegalStateException( "Couldn't resolve index id for " + index +
" at this point. Schema rule not committed yet?" );
}
private UniquenessConstraint ruleToConstraint( UniquenessConstraintRule constraintRule )
{
return new UniquenessConstraint( constraintRule.getLabel(), constraintRule.getPropertyKey() );
}
public IndexDescriptor indexDescriptor( int labelId, int propertyKey )
throws SchemaRuleNotFoundException
{
Map<Integer, CommittedIndexDescriptor> byLabel = indexDescriptors.get( labelId );
if ( byLabel != null )
{
CommittedIndexDescriptor committed = byLabel.get( propertyKey );
if ( committed != null )
{
return committed.getDescriptor();
}
}
throw new SchemaRuleNotFoundException( labelId, propertyKey, "No such index found" );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_SchemaCache.java
|
1,088
|
public class PersistenceCacheTest
{
@Test
public void shouldLoadAndCacheNodeLabels() throws Exception
{
// GIVEN
int[] labels = new int[] {1, 2, 3};
@SuppressWarnings( "unchecked" )
CacheLoader<int[]> loader = mock( CacheLoader.class );
when( loader.load( nodeId ) ).thenReturn( labels );
NodeImpl node = new NodeImpl( nodeId );
when( nodeCache.get( nodeId ) ).thenReturn( node );
// WHEN
boolean hasLabel1 = persistenceCache.nodeHasLabel( state, nodeId, 1, loader );
boolean hasLabel2 = persistenceCache.nodeHasLabel( state, nodeId, 2, loader );
// THEN
assertTrue( hasLabel1 );
assertTrue( hasLabel2 );
verify( loader, times( 1 ) ).load( nodeId );
verify( nodeCache, times( 2 ) ).get( nodeId );
}
@Test
public void shouldEvictNode() throws Exception
{
// WHEN
persistenceCache.evictNode( nodeId );
// THEN
verify( nodeCache, times( 1 ) ).remove( nodeId );
}
@Test
public void shouldApplyUpdates() throws Exception
{
// GIVEN
NodeImpl node = mock(NodeImpl.class);
when( nodeCache.getIfCached( nodeId ) ).thenReturn( node );
// WHEN
persistenceCache.apply(asList( labelChanges( nodeId, new long[]{2l}, new long[]{1l} )));
// THEN
verify(node).commitLabels( new int[]{1} );
}
private PersistenceCache persistenceCache;
private AutoLoadingCache<NodeImpl> nodeCache;
private final long nodeId = 1;
private final KernelStatement state = mock( KernelStatement.class );
@SuppressWarnings( "unchecked" )
@Before
public void init()
{
nodeCache = mock( AutoLoadingCache.class );
AutoLoadingCache<RelationshipImpl> relCache = mock( AutoLoadingCache.class );
persistenceCache = new PersistenceCache( nodeCache, relCache, mock( Thunk.class ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_store_PersistenceCacheTest.java
|
1,089
|
{
@Override
public void newSize( Primitive entity, int size )
{
relationshipCache.updateSize( (RelationshipImpl) entity, size );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_store_PersistenceCache.java
|
1,090
|
{
@Override
public boolean accept( UniquenessConstraint item )
{
return item.propertyKeyId() == propertyKey;
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_TxStateImpl.java
|
1,091
|
{
@Override
public void visitAdded( IndexDescriptor element )
{
visitor.visitAddedIndex( element, forConstraint );
}
@Override
public void visitRemoved( IndexDescriptor element )
{
visitor.visitRemovedIndex( element, forConstraint );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_TxStateImpl.java
|
1,092
|
{
@Override
public void visitAdded( UniquenessConstraint element )
{
visitor.visitAddedConstraint( element );
}
@Override
public void visitRemoved( UniquenessConstraint element )
{
visitor.visitRemovedConstraint( element );
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_TxStateImpl.java
|
1,093
|
{
@Override
public RelationshipState newState( long id )
{
return new RelationshipState( id );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_TxStateImpl.java
|
1,094
|
public class EntityState
{
private final long id;
public EntityState( long id )
{
this.id = id;
}
public long getId()
{
return id;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_EntityState.java
|
1,095
|
{
@Override
public Void perform( Statement kernelStatement )
{
// NOTE: This creates the index (obviously) but it DOES NOT grab a schema
// write lock. It is assumed that the transaction that invoked this "inner" transaction
// holds a schema write lock, and that it will wait for this inner transaction to do its
// work.
// TODO (Ben+Jake): The Transactor is really part of the kernel internals, so it needs access to the
// internal implementation of Statement. However it is currently used by the external
// RemoveOrphanConstraintIndexesOnStartup job. This needs revisiting.
((KernelStatement) kernelStatement).txState().constraintIndexDoDrop( descriptor );
return null;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_ConstraintIndexCreator.java
|
1,096
|
{
@Override
public IndexDescriptor perform( Statement kernelStatement )
{
// NOTE: This creates the index (obviously) but it DOES NOT grab a schema
// write lock. It is assumed that the transaction that invoked this "inner" transaction
// holds a schema write lock, and that it will wait for this inner transaction to do its
// work.
IndexDescriptor descriptor = new IndexDescriptor( labelId, propertyKeyId );
// TODO (Ben+Jake): The Transactor is really part of the kernel internals, so it needs access to the
// internal implementation of Statement. However it is currently used by the external
// RemoveOrphanConstraintIndexesOnStartup job. This needs revisiting.
((KernelStatement) kernelStatement).txState().constraintIndexRuleDoAdd( descriptor );
return descriptor;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_ConstraintIndexCreator.java
|
1,097
|
public class ConstraintIndexCreator
{
private final Transactor transactor;
private final IndexingService indexingService;
public ConstraintIndexCreator( Transactor transactor, IndexingService indexingService )
{
this.transactor = transactor;
this.indexingService = indexingService;
}
/**
* You MUST hold a schema write lock before you call this method.
*/
public long createUniquenessConstraintIndex( KernelStatement state, SchemaReadOperations schema,
int labelId, int propertyKeyId )
throws ConstraintVerificationFailedKernelException, TransactionalException,
CreateConstraintFailureException, DropIndexFailureException
{
IndexDescriptor descriptor = transactor.execute( createConstraintIndex( labelId, propertyKeyId ) );
UniquenessConstraint constraint = new UniquenessConstraint( labelId, propertyKeyId );
boolean success = false;
try
{
long indexId = schema.indexGetCommittedId( state, descriptor, SchemaStorage.IndexRuleKind.CONSTRAINT );
awaitIndexPopulation( constraint, indexId );
success = true;
return indexId;
}
catch ( SchemaRuleNotFoundException e )
{
throw new IllegalStateException(
String.format( "Index (%s) that we just created does not exist.", descriptor ) );
}
catch ( InterruptedException e )
{
throw new CreateConstraintFailureException( constraint, e );
}
finally
{
if ( !success )
{
dropUniquenessConstraintIndex( descriptor );
}
}
}
/**
* You MUST hold a schema write lock before you call this method.
*/
public void dropUniquenessConstraintIndex( IndexDescriptor descriptor )
throws TransactionalException, DropIndexFailureException
{
transactor.execute( dropConstraintIndex( descriptor ) );
}
private void awaitIndexPopulation( UniquenessConstraint constraint, long indexId )
throws InterruptedException, ConstraintVerificationFailedKernelException
{
try
{
indexingService.getProxyForRule( indexId ).awaitStoreScanCompleted();
}
catch ( IndexNotFoundKernelException e )
{
throw new IllegalStateException(
String.format( "Index (indexId=%d) that we just created does not exist.", indexId ) );
}
catch ( IndexPopulationFailedKernelException e )
{
Throwable cause = e.getCause();
if ( cause instanceof IndexEntryConflictException )
{
throw new ConstraintVerificationFailedKernelException( constraint, singleton(
new ConstraintVerificationFailedKernelException.Evidence(
(IndexEntryConflictException) cause ) ) );
}
else
{
throw new ConstraintVerificationFailedKernelException( constraint, cause );
}
}
}
public static Transactor.Work<IndexDescriptor, CreateConstraintFailureException> createConstraintIndex(
final int labelId, final int propertyKeyId )
{
return new Transactor.Work<IndexDescriptor, CreateConstraintFailureException>()
{
@Override
public IndexDescriptor perform( Statement kernelStatement )
{
// NOTE: This creates the index (obviously) but it DOES NOT grab a schema
// write lock. It is assumed that the transaction that invoked this "inner" transaction
// holds a schema write lock, and that it will wait for this inner transaction to do its
// work.
IndexDescriptor descriptor = new IndexDescriptor( labelId, propertyKeyId );
// TODO (Ben+Jake): The Transactor is really part of the kernel internals, so it needs access to the
// internal implementation of Statement. However it is currently used by the external
// RemoveOrphanConstraintIndexesOnStartup job. This needs revisiting.
((KernelStatement) kernelStatement).txState().constraintIndexRuleDoAdd( descriptor );
return descriptor;
}
};
}
private static Transactor.Work<Void, DropIndexFailureException> dropConstraintIndex(
final IndexDescriptor descriptor )
{
return new Transactor.Work<Void, DropIndexFailureException>()
{
@Override
public Void perform( Statement kernelStatement )
{
// NOTE: This creates the index (obviously) but it DOES NOT grab a schema
// write lock. It is assumed that the transaction that invoked this "inner" transaction
// holds a schema write lock, and that it will wait for this inner transaction to do its
// work.
// TODO (Ben+Jake): The Transactor is really part of the kernel internals, so it needs access to the
// internal implementation of Statement. However it is currently used by the external
// RemoveOrphanConstraintIndexesOnStartup job. This needs revisiting.
((KernelStatement) kernelStatement).txState().constraintIndexDoDrop( descriptor );
return null;
}
};
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_state_ConstraintIndexCreator.java
|
1,098
|
{
int index;
{
computeNext();
}
@Override
protected void computeNext()
{
if ( index <= labels.length )
{
next( labels[index++].id() );
}
else
{
endReached();
}
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_scan_LabelScanStoreProvider.java
|
1,099
|
{
private final long[] NO_LABELS = new long[0];
private final NodeStore nodeStore = neoStoreProvider.evaluate().getNodeStore();
private final long highId = nodeStore.getHighestPossibleIdInUse();
private long current;
@Override
protected NodeLabelUpdate fetchNextOrNull()
{
while ( current <= highId )
{
NodeRecord node = nodeStore.forceGetRecord( current++ );
if ( node.inUse() )
{
long[] labels = NodeLabelsField.parseLabelsField( node ).get( nodeStore );
if ( labels.length > 0 )
{
return NodeLabelUpdate.labelChanges( node.getId(), NO_LABELS, labels );
}
}
}
return null;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_scan_LabelScanStoreProvider.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.