Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
4,200
|
public class RecordScannerTest
{
@Test
public void shouldProcessRecordsAndUpdateProgress() throws Exception
{
// given
ProgressMonitorFactory.MultiPartBuilder progressBuilder = mock( ProgressMonitorFactory.MultiPartBuilder.class );
ProgressListener progressListener = mock( ProgressListener.class );
when( progressBuilder.progressForPart( anyString(), anyLong() ) ).thenReturn( progressListener );
@SuppressWarnings("unchecked")
BoundedIterable<Integer> store = mock( BoundedIterable.class );
when( store.iterator() ).thenReturn( asList( 42, 75, 192 ).iterator() );
@SuppressWarnings("unchecked")
RecordProcessor<Integer> recordProcessor = mock( RecordProcessor.class );
RecordScanner scanner = new RecordScanner<>( store, "our test task", progressBuilder, recordProcessor );
// when
scanner.run();
// then
verify( recordProcessor ).process( 42 );
verify( recordProcessor ).process( 75 );
verify( recordProcessor ).process( 192 );
verify( recordProcessor ).close();
verify( store ).close();
verify( progressListener ).set( 0 );
verify( progressListener ).set( 1 );
verify( progressListener ).set( 2 );
verify( progressListener ).done();
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_RecordScannerTest.java
|
4,201
|
public class RecordScanner<RECORD> implements StoppableRunnable
{
private final ProgressListener progress;
private final BoundedIterable<RECORD> store;
private final RecordProcessor<RECORD> processor;
private volatile boolean continueScanning = true;
public RecordScanner( BoundedIterable<RECORD> store,
String taskName,
ProgressMonitorFactory.MultiPartBuilder builder,
RecordProcessor<RECORD> processor )
{
this.store = store;
this.processor = processor;
this.progress = builder.progressForPart( taskName, store.maxCount() );
}
@Override
public void run()
{
try
{
int entryCount = 0;
for ( RECORD record : store )
{
if ( !continueScanning )
{
return;
}
processor.process( record );
progress.set( entryCount++ );
}
}
finally
{
try
{
store.close();
}
catch ( IOException e )
{
progress.failed( e );
}
processor.close();
progress.done();
}
}
@Override
public void stopScanning()
{
continueScanning = false;
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_RecordScanner.java
|
4,202
|
public class PropertyReader
{
private final PropertyStore propertyStore;
public PropertyReader( PropertyStore propertyStore )
{
this.propertyStore = propertyStore;
}
public List<PropertyBlock> propertyBlocks( NodeRecord nodeRecord )
{
Collection<PropertyRecord> records = propertyStore.getPropertyRecordChain( nodeRecord.getNextProp() );
List<PropertyBlock> propertyBlocks = new ArrayList<>();
for ( PropertyRecord record : records )
{
propertyBlocks.addAll( record.getPropertyBlocks() );
}
return propertyBlocks;
}
public DefinedProperty propertyValue( PropertyBlock block )
{
return block.getType().readProperty( block.getKeyIndexId(), block, singletonProvider(propertyStore) );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_PropertyReader.java
|
4,203
|
static class UnknownOwner extends PropertyOwner<PrimitiveRecord> implements RecordReference<PrimitiveRecord>
{
private PendingReferenceCheck<PrimitiveRecord> reporter;
@Override
RecordReference<PrimitiveRecord> record( RecordAccess records )
{
// Getting the record for this owner means that some other owner replaced it
// that means that it isn't an orphan, so we skip this orphan check
// and return a record for conflict check that always is ok (by skipping the check)
this.markInCustody();
return skipReference();
}
@Override
public void checkOrphanage()
{
PendingReferenceCheck<PrimitiveRecord> reporter;
synchronized ( this )
{
reporter = this.reporter;
this.reporter = null;
}
if ( reporter != null )
{
reporter.checkReference( null, null );
}
}
synchronized void markInCustody()
{
if ( reporter != null )
{
reporter.skip();
reporter = null;
}
}
@Override
public synchronized void dispatch( PendingReferenceCheck<PrimitiveRecord> reporter )
{
this.reporter = reporter;
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_PropertyOwner.java
|
4,204
|
static class OwningRelationship extends PropertyOwner<RelationshipRecord>
{
private final long id;
OwningRelationship( RelationshipRecord record )
{
this.id = record.getId();
}
@Override
RecordReference<RelationshipRecord> record( RecordAccess records )
{
return records.relationship( id );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_PropertyOwner.java
|
4,205
|
static class OwningNode extends PropertyOwner<NodeRecord>
{
private final long id;
OwningNode( NodeRecord record )
{
this.id = record.getId();
}
@Override
RecordReference<NodeRecord> record( RecordAccess records )
{
return records.node( id );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_PropertyOwner.java
|
4,206
|
public class NodeInUseWithCorrectLabelsCheck
<RECORD extends AbstractBaseRecord, REPORT extends ConsistencyReport.NodeInUseWithCorrectLabelsReport>
implements ComparativeRecordChecker<RECORD, NodeRecord, REPORT>
{
private final long[] expectedLabels;
public NodeInUseWithCorrectLabelsCheck( long[] expectedLabels )
{
this.expectedLabels = expectedLabels;
}
@Override
public void checkReference( RECORD record, NodeRecord nodeRecord,
CheckerEngine<RECORD, REPORT> engine, RecordAccess records )
{
if ( nodeRecord.inUse() )
{
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( nodeRecord );
if ( nodeLabels instanceof DynamicNodeLabels )
{
DynamicNodeLabels dynamicNodeLabels = (DynamicNodeLabels) nodeLabels;
long firstRecordId = dynamicNodeLabels.getFirstDynamicRecordId();
RecordReference<DynamicRecord> firstRecordReference = records.nodeLabels( firstRecordId );
engine.comparativeCheck( firstRecordReference,
new LabelChainWalker<RECORD, REPORT>
(new ExpectedNodeLabelsChecker( nodeRecord )) );
nodeRecord.getDynamicLabelRecords(); // I think this is empty in production
}
else
{
long[] actualLabels = nodeLabels.get( null );
REPORT report = engine.report();
validateLabelIds( nodeRecord, actualLabels, report );
}
}
else
{
engine.report().nodeNotInUse( nodeRecord );
}
}
private void validateLabelIds( NodeRecord nodeRecord, long[] actualLabels, REPORT report )
{
sort(actualLabels);
for ( long expectedLabel : expectedLabels )
{
int labelIndex = binarySearch( actualLabels, expectedLabel );
if (labelIndex < 0)
{
report.nodeDoesNotHaveExpectedLabel( nodeRecord, expectedLabel );
}
}
}
private class ExpectedNodeLabelsChecker implements
LabelChainWalker.Validator<RECORD, REPORT>
{
private final NodeRecord nodeRecord;
public ExpectedNodeLabelsChecker( NodeRecord nodeRecord )
{
this.nodeRecord = nodeRecord;
}
@Override
public void onRecordNotInUse( DynamicRecord dynamicRecord, CheckerEngine<RECORD, REPORT> engine )
{
// checked elsewhere
}
@Override
public void onRecordChainCycle( DynamicRecord record, CheckerEngine<RECORD, REPORT> engine )
{
// checked elsewhere
}
@Override
public void onWellFormedChain( long[] labelIds, CheckerEngine<RECORD, REPORT> engine, RecordAccess records )
{
validateLabelIds( nodeRecord, labelIds, engine.report() );
}
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_NodeInUseWithCorrectLabelsCheck.java
|
4,207
|
{
@Override
public PrimitiveLongIterator lookup( Object value )
{
if ( entries.containsKey( value ) )
{
return asPrimitiveIterator( entries.get( value ) );
}
return emptyPrimitiveLongIterator();
}
@Override
public void close()
{
}
};
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_NodeCorrectlyIndexedCheckTest.java
|
4,208
|
private static class IndexAccessorStub implements IndexAccessor
{
private final Map<Object, long[]> entries;
private IndexAccessorStub( Map<Object, long[]> entries )
{
this.entries = entries;
}
@Override
public IndexReader newReader()
{
return new IndexReader()
{
@Override
public PrimitiveLongIterator lookup( Object value )
{
if ( entries.containsKey( value ) )
{
return asPrimitiveIterator( entries.get( value ) );
}
return emptyPrimitiveLongIterator();
}
@Override
public void close()
{
}
};
}
@Override
public void close() throws IOException
{
}
@Override
public void drop() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public IndexUpdater newUpdater( IndexUpdateMode mode )
{
throw new UnsupportedOperationException();
}
@Override
public void force() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public BoundedIterable<Long> newAllEntriesReader()
{
throw new UnsupportedOperationException();
}
@Override
public ResourceIterator<File> snapshotFiles() throws IOException
{
throw new UnsupportedOperationException();
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_NodeCorrectlyIndexedCheckTest.java
|
4,209
|
public class NodeCorrectlyIndexedCheckTest
{
private static final int indexId = 12;
private static final int labelId = 34;
private static final int propertyKeyId = 56;
private static final long constraintId = 78;
@Test
public void shouldBeSilentWhenNodesCorrectlyIndexed() throws Exception
{
// given
IndexRule indexRule = indexRule( indexId, labelId, propertyKeyId, new Descriptor( "provider1", "version1" ) );
NodeRecord nodeRecord = nodeWithLabels( labelId );
NodeCorrectlyIndexedCheck check = new NodeCorrectlyIndexedCheck(
indexContaining( indexRule, Collections.<Object, long[]>singletonMap(
"propertyValue", new long[]{nodeRecord.getId()} ) ),
nodeHasProperty( nodeRecord, "propertyValue" ) );
ConsistencyReport.NodeConsistencyReport report = mock( ConsistencyReport.NodeConsistencyReport.class );
// when
check.check( nodeRecord, engineFor( report ), null );
// then
verifyZeroInteractions( report );
}
@Test
public void shouldReportNodeThatIsNotIndexed() throws Exception
{
// given
IndexRule indexRule = indexRule( indexId, labelId, propertyKeyId, new Descriptor( "provider1", "version1" ) );
NodeRecord nodeRecord = nodeWithLabels( labelId );
NodeCorrectlyIndexedCheck check = new NodeCorrectlyIndexedCheck(
indexContaining( indexRule, new HashMap<Object, long[]>() ),
nodeHasProperty( nodeRecord, "propertyValue" ) );
ConsistencyReport.NodeConsistencyReport report = mock( ConsistencyReport.NodeConsistencyReport.class );
// when
check.check( nodeRecord, engineFor( report ), null );
// then
verify( report ).notIndexed( indexRule, "propertyValue" );
}
@Test
public void shouldReportDuplicateNode() throws Exception
{
// given
IndexRule indexRule = constraintIndexRule( indexId, labelId, propertyKeyId,
new Descriptor( "provider1", "version1" ), constraintId );
NodeRecord nodeRecord = nodeWithLabels( labelId );
long duplicateNodeId1 = 1;
long duplicateNodeId2 = 2;
NodeCorrectlyIndexedCheck check = new NodeCorrectlyIndexedCheck(
indexContaining( indexRule, Collections.<Object, long[]>singletonMap(
"propertyValue", new long[]{nodeRecord.getId(), duplicateNodeId1, duplicateNodeId2} ) ),
nodeHasProperty( nodeRecord, "propertyValue" ) );
ConsistencyReport.NodeConsistencyReport report = mock( ConsistencyReport.NodeConsistencyReport.class );
// when
check.check( nodeRecord, engineFor( report ), null );
// then
verify( report ).uniqueIndexNotUnique( indexRule, "propertyValue", duplicateNodeId1 );
verify( report ).uniqueIndexNotUnique( indexRule, "propertyValue", duplicateNodeId2 );
}
private IndexAccessors indexContaining( IndexRule indexRule, Map<Object, long[]> entries ) throws IOException
{
IndexAccessorStub reader = new IndexAccessorStub( entries );
IndexAccessors indexes = mock( IndexAccessors.class );
when( indexes.accessorFor( any( IndexRule.class ) ) )
.thenReturn( reader );
when (indexes.rules() )
.thenReturn( asList(indexRule) );
return indexes;
}
private PropertyReader nodeHasProperty( NodeRecord nodeRecord, String propertyValue )
{
PropertyReader propertyReader = mock( PropertyReader.class );
PropertyBlock propertyBlock = mock( PropertyBlock.class );
when( propertyBlock.getKeyIndexId() )
.thenReturn( propertyKeyId );
when( propertyReader.propertyBlocks( nodeRecord ) )
.thenReturn( asList( propertyBlock ) );
when( propertyReader.propertyValue( any( PropertyBlock.class ) ) )
.thenReturn( stringProperty( propertyKeyId, propertyValue ) );
return propertyReader;
}
private NodeRecord nodeWithLabels( long... labelIds )
{
NodeRecord nodeRecord = new NodeRecord( 0, 0, 0 );
NodeLabelsField.parseLabelsField( nodeRecord ).put( labelIds, null );
return nodeRecord;
}
@SuppressWarnings("unchecked")
private CheckerEngine<NodeRecord, ConsistencyReport.NodeConsistencyReport>
engineFor( ConsistencyReport.NodeConsistencyReport report )
{
CheckerEngine engine = mock( CheckerEngine.class );
when( engine.report() ).thenReturn( report );
return engine;
}
private static class IndexAccessorStub implements IndexAccessor
{
private final Map<Object, long[]> entries;
private IndexAccessorStub( Map<Object, long[]> entries )
{
this.entries = entries;
}
@Override
public IndexReader newReader()
{
return new IndexReader()
{
@Override
public PrimitiveLongIterator lookup( Object value )
{
if ( entries.containsKey( value ) )
{
return asPrimitiveIterator( entries.get( value ) );
}
return emptyPrimitiveLongIterator();
}
@Override
public void close()
{
}
};
}
@Override
public void close() throws IOException
{
}
@Override
public void drop() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public IndexUpdater newUpdater( IndexUpdateMode mode )
{
throw new UnsupportedOperationException();
}
@Override
public void force() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public BoundedIterable<Long> newAllEntriesReader()
{
throw new UnsupportedOperationException();
}
@Override
public ResourceIterator<File> snapshotFiles() throws IOException
{
throw new UnsupportedOperationException();
}
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_NodeCorrectlyIndexedCheckTest.java
|
4,210
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
NodeRecord nodeRecord = new NodeRecord( next.node(), -1, -1 );
DynamicRecord record = inUse( new DynamicRecord( next.nodeLabel() ) );
Collection<DynamicRecord> newRecords = allocateFromNumbers( prependNodeId( nodeRecord.getLongId(),
new long[]{42l} ),
iterator( record ), new PreAllocatedRecords( 60 ) );
nodeRecord.setLabelField( dynamicPointer( newRecords ), newRecords );
tx.create( nodeRecord );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,211
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
NodeRecord nodeRecord = new NodeRecord( next.node(), -1, -1 );
NodeLabelsField.parseLabelsField( nodeRecord ).add( 10, null );
tx.create( nodeRecord );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,212
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
tx.create( new NodeRecord( next.node(), next.relationship(), -1 ) );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,213
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
inconsistentKey.set( next.propertyKey() );
tx.propertyKey( inconsistentKey.get(), "FOO" );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,214
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
inconsistentName.set( next.propertyKey() );
tx.propertyKey( inconsistentName.get(), "FOO" );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,215
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
inconsistentName.set( next.relationshipType() );
tx.relationshipType( inconsistentName.get(), "FOO" );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,216
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
DynamicRecord array = new DynamicRecord( next.arrayProperty() );
array.setInUse( true );
array.setCreated();
array.setType( ARRAY.intValue() );
array.setNextBlock( next.arrayProperty() );
array.setData( UTF8.encode( "hello world" ) );
PropertyBlock block = new PropertyBlock();
block.setSingleBlock( (((long) ARRAY.intValue()) << 24) | (array.getId() << 28) );
block.addValueRecord( array );
PropertyRecord property = new PropertyRecord( next.property() );
property.addPropertyBlock( block );
tx.create( property );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,217
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
NeoStoreRecord record = new NeoStoreRecord();
record.setNextProp( next.property() );
tx.update( record );
// We get exceptions when only the above happens in a transaction...
tx.create( new NodeRecord( next.node(), -1, -1 ) );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,218
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
int ruleId1 = (int) next.schema();
int ruleId2 = (int) next.schema();
int labelId = next.label();
int propertyKeyId = next.propertyKey();
DynamicRecord record1 = new DynamicRecord( ruleId1 );
DynamicRecord record2 = new DynamicRecord( ruleId2 );
DynamicRecord record1Before = record1.clone();
DynamicRecord record2Before = record2.clone();
SchemaIndexProvider.Descriptor providerDescriptor = new SchemaIndexProvider.Descriptor( "lucene", "1.0" );
IndexRule rule1 = IndexRule.constraintIndexRule( ruleId1, labelId, propertyKeyId, providerDescriptor, (long) ruleId2 );
UniquenessConstraintRule rule2 = UniquenessConstraintRule.uniquenessConstraintRule( ruleId2, labelId, propertyKeyId, ruleId2 );
Collection<DynamicRecord> records1 = serializeRule( rule1, record1 );
Collection<DynamicRecord> records2 = serializeRule( rule2, record2 );
assertEquals( asList( record1 ), records1 );
assertEquals( asList( record2 ), records2 );
tx.nodeLabel( labelId, "label" );
tx.propertyKey( propertyKeyId, "property" );
tx.createSchema( asList(record1Before), records1 );
tx.createSchema( asList(record2Before), records2 );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,219
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
int ruleId1 = (int) next.schema();
int ruleId2 = (int) next.schema();
int labelId = next.label();
int propertyKeyId = next.propertyKey();
DynamicRecord record1 = new DynamicRecord( ruleId1 );
DynamicRecord record2 = new DynamicRecord( ruleId2 );
DynamicRecord record1Before = record1.clone();
DynamicRecord record2Before = record2.clone();
SchemaIndexProvider.Descriptor providerDescriptor = new SchemaIndexProvider.Descriptor( "lucene", "1.0" );
IndexRule rule1 = IndexRule.constraintIndexRule( ruleId1, labelId, propertyKeyId, providerDescriptor,
(long) ruleId1 );
IndexRule rule2 = IndexRule.constraintIndexRule( ruleId2, labelId, propertyKeyId, providerDescriptor, (long) ruleId1 );
Collection<DynamicRecord> records1 = serializeRule( rule1, record1 );
Collection<DynamicRecord> records2 = serializeRule( rule2, record2 );
assertEquals( asList( record1 ), records1 );
assertEquals( asList( record2 ), records2 );
tx.nodeLabel( labelId, "label" );
tx.propertyKey( propertyKeyId, "property" );
tx.createSchema( asList(record1Before), records1 );
tx.createSchema( asList(record2Before), records2 );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,220
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
DynamicRecord schema = new DynamicRecord( next.schema() );
DynamicRecord schemaBefore = schema.clone();
schema.setNextBlock( next.schema() ); // Point to a record that isn't in use.
IndexRule rule = IndexRule.indexRule( 1, 1, 1,
new SchemaIndexProvider.Descriptor( "lucene", "1.0" ) );
schema.setData( new RecordSerializer().append( rule ).serialize() );
tx.createSchema( asList( schemaBefore ), asList( schema ) );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,221
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
DynamicRecord string = new DynamicRecord( next.stringProperty() );
string.setInUse( true );
string.setCreated();
string.setType( PropertyType.STRING.intValue() );
string.setNextBlock( next.stringProperty() );
string.setData( UTF8.encode( "hello world" ) );
PropertyBlock block = new PropertyBlock();
block.setSingleBlock( (((long) PropertyType.STRING.intValue()) << 24) | (string.getId() << 28) );
block.addValueRecord( string );
PropertyRecord property = new PropertyRecord( next.property() );
property.addPropertyBlock( block );
tx.create( property );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,222
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
PropertyRecord property = new PropertyRecord( next.property() );
property.setPrevProp( next.property() );
PropertyBlock block = new PropertyBlock();
block.setSingleBlock( 1 | (((long) PropertyType.INT.intValue()) << 24) | (666 << 28) );
property.addPropertyBlock( block );
tx.create( property );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,223
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
tx.create( new RelationshipRecord( next.relationship(), 1, 2, 0 ) );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,224
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
tx.nodeLabel( 42, "Label" );
NodeRecord nodeRecord = new NodeRecord( next.node(), -1, -1 );
DynamicRecord record = inUse( new DynamicRecord( next.nodeLabel() ) );
Collection<DynamicRecord> newRecords = allocateFromNumbers( prependNodeId( next.node(), new long[]{42l} ),
iterator( record ), new PreAllocatedRecords( 60 ) );
nodeRecord.setLabelField( dynamicPointer( newRecords ), newRecords );
tx.create( nodeRecord );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,225
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
tx.nodeLabel( 42, "Label" );
NodeRecord nodeRecord = new NodeRecord( next.node(), -1, -1 );
DynamicRecord record = inUse( new DynamicRecord( next.nodeLabel() ) );
Collection<DynamicRecord> newRecords = allocateFromNumbers(
prependNodeId( nodeRecord.getLongId(), new long[]{42l, 42l} ),
iterator( record ), new PreAllocatedRecords( 60 ) );
nodeRecord.setLabelField( dynamicPointer( newRecords ), newRecords );
tx.create( nodeRecord );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,226
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
NodeRecord nodeRecord = new NodeRecord( next.node(), -1, -1 );
DynamicRecord record1 = inUse( new DynamicRecord( next.nodeLabel() ) );
DynamicRecord record2 = inUse( new DynamicRecord( next.nodeLabel() ) );
DynamicRecord record3 = inUse( new DynamicRecord( next.nodeLabel() ) );
labels[0] = nodeRecord.getLongId(); // the first id should not be a label id, but the id of the node
PreAllocatedRecords allocator = new PreAllocatedRecords( 60 );
chain.addAll( allocateFromNumbers(
labels, iterator( record1, record2, record3 ), allocator ) );
nodeRecord.setLabelField( dynamicPointer( chain ), chain );
tx.create( nodeRecord );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,227
|
{ // Neo4j can create no more than one label per transaction...
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
Integer label = next.label();
tx.nodeLabel( (int) (labels[offset] = label), "label:" + offset );
createdLabels.add( label );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,228
|
{
@Override
protected void generateInitialData( GraphDatabaseService graphDb )
{
try ( org.neo4j.graphdb.Transaction tx = graphDb.beginTx())
{
graphDb.schema().indexFor( label("label3") ).on( "key" ).create();
graphDb.schema().constraintFor( label( "label4" ) ).assertPropertyIsUnique( "key" ).create();
tx.success();
}
try ( org.neo4j.graphdb.Transaction tx = graphDb.beginTx())
{
Node node1 = set( graphDb.createNode( label( "label1" ) ) );
Node node2 = set( graphDb.createNode( label( "label2" ) ), property( "key", "value" ) );
node1.createRelationshipTo( node2, withName( "C" ) );
indexedNodes.add( set( graphDb.createNode( label( "label3" ) ), property( "key", "value" ) ).getId() );
set( graphDb.createNode( label( "label4" ) ), property( "key", "value" ) );
tx.success();
}
}
};
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,229
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
DynamicRecord record1 = inUse( new DynamicRecord( chain.get( 0 ).getId() ) );
DynamicRecord record2 = notInUse( new DynamicRecord( chain.get( 1 ).getId() ) );
long[] data = (long[]) getRightArray( readFullByteArrayFromHeavyRecords( chain, ARRAY ) );
PreAllocatedRecords allocator = new PreAllocatedRecords( 60 );
allocateFromNumbers( Arrays.copyOf( data, 11 ), iterator( record1 ), allocator );
NodeRecord before = inUse( new NodeRecord( data[0], -1, -1 ) );
NodeRecord after = inUse( new NodeRecord( data[0], -1, -1 ) );
before.setLabelField( dynamicPointer( asList( record1 ) ), chain );
after.setLabelField( dynamicPointer( asList( record1 ) ), asList( record1, record2 ) );
tx.update( before, after );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,230
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
NodeRecord node = new NodeRecord( 42, -1, -1 );
node.setInUse( true );
List<DynamicRecord> dynamicRecords;
dynamicRecords = pair.first();
labels.addAll( pair.other() );
node.setLabelField( dynamicPointer( dynamicRecords ), dynamicRecords );
tx.create( node );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,231
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
NodeRecord node = new NodeRecord( 42, -1, -1 );
node.setInUse( true );
node.setLabelField( inlinedLabelsLongRepresentation( 1, 2 ), Collections.<DynamicRecord>emptySet() );
tx.create( node );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,232
|
ARRAYS
{
@Override
RecordStore getRecordStore( StoreAccess storeAccess )
{
return storeAccess.getNodeStore();
}
};
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_MultiPassStore.java
|
4,233
|
public class NodeCorrectlyIndexedCheck implements RecordCheck<NodeRecord, ConsistencyReport.NodeConsistencyReport>
{
private final IndexAccessors indexes;
private final PropertyReader propertyReader;
public NodeCorrectlyIndexedCheck( IndexAccessors indexes,
PropertyReader propertyReader )
{
this.indexes = indexes;
this.propertyReader = propertyReader;
}
@Override
public void check( NodeRecord record,
CheckerEngine<NodeRecord, ConsistencyReport.NodeConsistencyReport> engine,
RecordAccess records )
{
Set<Long> labels = NodeLabelReader.getListOfLabels( record, records, engine );
for ( IndexRule indexRule : indexes.rules() )
{
if ( !labels.contains( (long) indexRule.getLabel() ) )
{
continue;
}
List<PropertyBlock> properties = propertyReader.propertyBlocks( record );
PropertyBlock property = propertyWithKey( properties, indexRule.getPropertyKey() );
if ( property == null )
{
continue;
}
try ( IndexReader reader = indexes.accessorFor( indexRule ).newReader() )
{
Object propertyValue = propertyReader.propertyValue( property ).value();
PrimitiveLongIterator indexedNodeIds = reader.lookup( propertyValue );
verifyNodeCorrectlyIndexed( record, engine, indexRule, propertyValue, indexedNodeIds );
}
}
}
private void verifyNodeCorrectlyIndexed(
NodeRecord record,
CheckerEngine<NodeRecord, ConsistencyReport.NodeConsistencyReport> engine,
IndexRule indexRule,
Object propertyValue,
PrimitiveLongIterator indexedNodeIds )
{
boolean matched = false;
while ( indexedNodeIds.hasNext() )
{
long nodeId = indexedNodeIds.next();
if ( nodeId == record.getId() )
{
matched = true;
}
else
{
if ( indexRule.isConstraintIndex() )
{
engine.report().uniqueIndexNotUnique( indexRule, propertyValue, nodeId );
}
}
}
if ( !matched )
{
engine.report().notIndexed( indexRule, propertyValue );
}
}
private PropertyBlock propertyWithKey( List<PropertyBlock> propertyBlocks, int propertyKey )
{
for ( PropertyBlock propertyBlock : propertyBlocks )
{
if ( propertyBlock.getKeyIndexId() == propertyKey )
{
return propertyBlock;
}
}
return null;
}
@Override
public void checkChange( NodeRecord oldRecord, NodeRecord newRecord,
CheckerEngine<NodeRecord, ConsistencyReport.NodeConsistencyReport> engine,
DiffRecordAccess records )
{
check( newRecord, engine, records );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_NodeCorrectlyIndexedCheck.java
|
4,234
|
@RunWith(JUnit4.class)
public static class Strings extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.STRINGS;
}
@Override
protected RecordReference<DynamicRecord> record( DiffRecordAccess filter, long id )
{
return filter.string( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.relationship( id );
filter.property( id );
filter.array( id );
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_MultiPassStoreTest.java
|
4,235
|
@RunWith(JUnit4.class)
public static class Relationships extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.RELATIONSHIPS;
}
@Override
protected RecordReference<RelationshipRecord> record( DiffRecordAccess filter, long id )
{
return filter.relationship( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.property( id );
filter.string( id );
filter.array( id );
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_MultiPassStoreTest.java
|
4,236
|
@RunWith(JUnit4.class)
public static class Properties extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.PROPERTIES;
}
@Override
protected RecordReference<PropertyRecord> record( DiffRecordAccess filter, long id )
{
return filter.property( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.relationship( id );
filter.string( id );
filter.array( id );
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_MultiPassStoreTest.java
|
4,237
|
@RunWith(JUnit4.class)
public static class Nodes extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.NODES;
}
@Override
protected RecordReference<NodeRecord> record( DiffRecordAccess filter, long id )
{
return filter.node( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.relationship( id );
filter.property( id );
filter.string( id );
filter.array( id );
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_MultiPassStoreTest.java
|
4,238
|
@RunWith(JUnit4.class)
public static class Arrays extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.ARRAYS;
}
@Override
protected RecordReference<DynamicRecord> record( DiffRecordAccess filter, long id )
{
return filter.array( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.relationship( id );
filter.property( id );
filter.string( id );
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_MultiPassStoreTest.java
|
4,239
|
@RunWith(Suite.class)
@Suite.SuiteClasses({
MultiPassStoreTest.Nodes.class,
MultiPassStoreTest.Relationships.class,
MultiPassStoreTest.Properties.class,
MultiPassStoreTest.Strings.class,
MultiPassStoreTest.Arrays.class
})
public abstract class MultiPassStoreTest
{
@Test
public void createsListOfFiltersWhichEachSkipRecordsOutsideOfARangeOfMappableIds() throws Exception
{
// given
StoreAccess storeAccess = storeAccess( 1000L, 9 );
DiffRecordAccess recordAccess = mock( DiffRecordAccess.class );
long memoryPerPass = 900L;
// when
List<DiffRecordAccess> filters = multiPassStore().multiPassFilters(
memoryPerPass, storeAccess, recordAccess, MultiPassStore.values() );
// then
assertEquals( 11, filters.size() );
assertFinds( record( filters.get( 0 ), 0 ) );
assertFinds( record( filters.get( 0 ), 99 ) );
assertFinds( record( filters.get( 1 ), 100 ) );
assertFinds( record( filters.get( 1 ), 199 ) );
assertFinds( record( filters.get( 2 ), 200 ) );
assertFinds( record( filters.get( 2 ), 299 ) );
assertFinds( record( filters.get( 10 ), 1000 ) );
assertSkips( record( filters.get( 1 ), 0 ) );
assertSkips( record( filters.get( 1 ), 99 ) );
assertSkips( record( filters.get( 2 ), 100 ) );
assertSkips( record( filters.get( 2 ), 199 ) );
assertSkips( record( filters.get( 0 ), 100 ) );
assertSkips( record( filters.get( 0 ), 199 ) );
assertSkips( record( filters.get( 1 ), 200 ) );
assertSkips( record( filters.get( 1 ), 299 ) );
}
@Test
public void shouldSkipOtherKindsOfRecords() throws Exception
{
// given
StoreAccess storeAccess = storeAccess( 1000L, 9 );
DiffRecordAccess recordAccess = mock( DiffRecordAccess.class );
long memoryPerPass = 900L;
// when
List<DiffRecordAccess> filters = multiPassStore().multiPassFilters(
memoryPerPass, storeAccess, recordAccess, MultiPassStore.values() );
// then
for ( DiffRecordAccess filter : filters )
{
for ( long id : new long[] {0, 100, 200, 300, 400, 500, 600, 700, 800, 900} )
{
otherRecords( filter, id );
}
}
verifyZeroInteractions( recordAccess );
}
private static <RECORD extends AbstractBaseRecord> void assertSkips( RecordReference<RECORD> recordReference )
{
assertSame( skipReference(), recordReference );
}
private static <RECORD extends AbstractBaseRecord> void assertFinds( RecordReference<RECORD> recordReference )
{
assertNotSame( skipReference(), recordReference );
}
@SuppressWarnings("unchecked")
private StoreAccess storeAccess( long highId, int recordSize )
{
StoreAccess storeAccess = mock( StoreAccess.class );
RecordStore recordStore = mock( RecordStore.class );
when( multiPassStore().getRecordStore( storeAccess ) ).thenReturn( recordStore );
when( recordStore.getHighId() ).thenReturn( highId );
when( recordStore.getRecordSize() ).thenReturn( recordSize );
return storeAccess;
}
protected abstract MultiPassStore multiPassStore();
protected abstract RecordReference<? extends AbstractBaseRecord> record( DiffRecordAccess filter, long id );
protected abstract void otherRecords( DiffRecordAccess filter, long id );
@RunWith(JUnit4.class)
public static class Nodes extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.NODES;
}
@Override
protected RecordReference<NodeRecord> record( DiffRecordAccess filter, long id )
{
return filter.node( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.relationship( id );
filter.property( id );
filter.string( id );
filter.array( id );
}
}
@RunWith(JUnit4.class)
public static class Relationships extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.RELATIONSHIPS;
}
@Override
protected RecordReference<RelationshipRecord> record( DiffRecordAccess filter, long id )
{
return filter.relationship( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.property( id );
filter.string( id );
filter.array( id );
}
}
@RunWith(JUnit4.class)
public static class Properties extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.PROPERTIES;
}
@Override
protected RecordReference<PropertyRecord> record( DiffRecordAccess filter, long id )
{
return filter.property( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.relationship( id );
filter.string( id );
filter.array( id );
}
}
@RunWith(JUnit4.class)
public static class Strings extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.STRINGS;
}
@Override
protected RecordReference<DynamicRecord> record( DiffRecordAccess filter, long id )
{
return filter.string( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.relationship( id );
filter.property( id );
filter.array( id );
}
}
@RunWith(JUnit4.class)
public static class Arrays extends MultiPassStoreTest
{
@Override
protected MultiPassStore multiPassStore()
{
return MultiPassStore.ARRAYS;
}
@Override
protected RecordReference<DynamicRecord> record( DiffRecordAccess filter, long id )
{
return filter.array( id );
}
protected void otherRecords( DiffRecordAccess filter, long id )
{
filter.node( id );
filter.relationship( id );
filter.property( id );
filter.string( id );
}
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_MultiPassStoreTest.java
|
4,240
|
static class Factory
{
private final CheckDecorator decorator;
private final DiffRecordAccess recordAccess;
private final long totalMappedMemory;
private final StoreAccess storeAccess;
private final InconsistencyReport report;
Factory( CheckDecorator decorator, long totalMappedMemory,
StoreAccess storeAccess, DiffRecordAccess recordAccess, InconsistencyReport report )
{
this.decorator = decorator;
this.totalMappedMemory = totalMappedMemory;
this.storeAccess = storeAccess;
this.recordAccess = recordAccess;
this.report = report;
}
ConsistencyReporter[] reporters( TaskExecutionOrder order, MultiPassStore... stores )
{
if ( order == TaskExecutionOrder.MULTI_PASS )
{
return reporters( stores );
}
else
{
return new ConsistencyReporter[]{new ConsistencyReporter( recordAccess, report )};
}
}
ConsistencyReporter[] reporters( MultiPassStore... stores )
{
List<ConsistencyReporter> result = new ArrayList<>();
for ( MultiPassStore store : stores )
{
List<DiffRecordAccess> filters = store.multiPassFilters( totalMappedMemory, storeAccess,
recordAccess, stores );
for ( DiffRecordAccess filter : filters )
{
result.add( new ConsistencyReporter( filter, report ) );
}
}
return result.toArray( new ConsistencyReporter[result.size()] );
}
StoreProcessor[] processors( MultiPassStore... stores )
{
List<StoreProcessor> result = new ArrayList<>();
for ( ConsistencyReporter reporter : reporters( stores ) )
{
result.add( new StoreProcessor( decorator, reporter ) );
}
return result.toArray( new StoreProcessor[result.size()] );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_MultiPassStore.java
|
4,241
|
STRINGS
{
@Override
RecordStore getRecordStore( StoreAccess storeAccess )
{
return storeAccess.getNodeStore();
}
},
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_MultiPassStore.java
|
4,242
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
long nodeId = ((long[]) getRightArray( readFullByteArrayFromHeavyRecords( chain, ARRAY ) ))[0];
NodeRecord before = inUse( new NodeRecord( nodeId, -1, -1 ) );
NodeRecord after = inUse( new NodeRecord( nodeId, -1, -1 ) );
DynamicRecord record1 = chain.get( 0 ).clone();
DynamicRecord record2 = chain.get( 1 ).clone();
DynamicRecord record3 = chain.get( 2 ).clone();
record3.setNextBlock( record2.getId() );
before.setLabelField( dynamicPointer( chain ), chain );
after.setLabelField( dynamicPointer( chain ), asList( record1, record2, record3 ) );
tx.update( before, after );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,243
|
PROPERTIES
{
@Override
RecordStore getRecordStore( StoreAccess storeAccess )
{
return storeAccess.getPropertyStore();
}
},
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_MultiPassStore.java
|
4,244
|
RELATIONSHIPS
{
@Override
RecordStore getRecordStore( StoreAccess storeAccess )
{
return storeAccess.getRelationshipStore();
}
},
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_MultiPassStore.java
|
4,245
|
NODES
{
@Override
RecordStore getRecordStore( StoreAccess storeAccess )
{
return storeAccess.getNodeStore();
}
},
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_MultiPassStore.java
|
4,246
|
public class LabelsMatchCheck implements
RecordCheck<NodeRecord, ConsistencyReport.LabelsMatchReport>
{
private final LabelScanReader labelScanReader;
public LabelsMatchCheck( LabelScanReader labelScanReader )
{
this.labelScanReader = labelScanReader;
}
@Override
public void check( NodeRecord record, CheckerEngine<NodeRecord, ConsistencyReport.LabelsMatchReport> engine,
RecordAccess records )
{
Set<Long> labelsFromNode = NodeLabelReader.getListOfLabels( record, records, engine );
Iterator<Long> labelsFromLabelScanStore = labelScanReader.labelsForNode( record.getId() );
while ( labelsFromLabelScanStore.hasNext() )
{
labelsFromNode.remove( labelsFromLabelScanStore.next() );
}
for ( Long labelId : labelsFromNode )
{
engine.report().nodeLabelNotInIndex( record, labelId );
}
}
@Override
public void checkChange( NodeRecord oldRecord, NodeRecord newRecord, CheckerEngine<NodeRecord, ConsistencyReport.LabelsMatchReport> engine, DiffRecordAccess records )
{
check( newRecord, engine, records );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_LabelsMatchCheck.java
|
4,247
|
{
};
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_IterableStore.java
|
4,248
|
public class IterableStore<RECORD extends AbstractBaseRecord> implements BoundedIterable<RECORD>
{
private final RecordStore<RECORD> store;
public IterableStore( RecordStore<RECORD> store )
{
this.store = store;
}
@Override
public long maxCount()
{
return store.getHighId();
}
@Override
public void close() throws IOException
{
}
@Override
public Iterator<RECORD> iterator()
{
RecordStore.Processor<RuntimeException> processor = new RecordStore.Processor<RuntimeException>()
{
};
return processor.scan( store, IN_USE ).iterator();
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_IterableStore.java
|
4,249
|
public class IndexCheck implements RecordCheck<IndexEntry, ConsistencyReport.IndexConsistencyReport>
{
private final IndexRule indexRule;
public IndexCheck( IndexRule indexRule )
{
this.indexRule = indexRule;
}
@Override
public void check( IndexEntry record, CheckerEngine<IndexEntry, ConsistencyReport.IndexConsistencyReport> engine, RecordAccess records )
{
engine.comparativeCheck( records.node( record.getId() ),
new NodeInUseWithCorrectLabelsCheck<IndexEntry,ConsistencyReport.IndexConsistencyReport>(new long[] {indexRule.getLabel()}) );
}
@Override
public void checkChange( IndexEntry oldRecord, IndexEntry newRecord, CheckerEngine<IndexEntry, ConsistencyReport
.IndexConsistencyReport> engine, DiffRecordAccess records )
{
check( newRecord, engine, records );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_full_IndexCheck.java
|
4,250
|
private static class Reference<T>
{
private T value;
void set(T value)
{
this.value = value;
}
T get()
{
return value;
}
@Override
public String toString()
{
return String.valueOf( value );
}
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_checking_full_FullCheckIntegrationTest.java
|
4,251
|
class DynamicRecordCheck
implements RecordCheck<DynamicRecord, ConsistencyReport.DynamicConsistencyReport>,
ComparativeRecordChecker<DynamicRecord, DynamicRecord, ConsistencyReport.DynamicConsistencyReport>
{
private final int blockSize;
private final DynamicStore dereference;
private final RecordStore<DynamicRecord> store;
DynamicRecordCheck( RecordStore<DynamicRecord> store, DynamicStore dereference )
{
this.blockSize = store.getRecordSize() - store.getRecordHeaderSize();
this.dereference = dereference;
this.store = store;
}
@Override
public void checkChange( DynamicRecord oldRecord, DynamicRecord newRecord,
CheckerEngine<DynamicRecord, ConsistencyReport.DynamicConsistencyReport> engine,
DiffRecordAccess records )
{
check( newRecord, engine, records );
if ( oldRecord.inUse() && !Record.NO_NEXT_BLOCK.is( oldRecord.getNextBlock() ) )
{
if ( !newRecord.inUse() || oldRecord.getNextBlock() != newRecord.getNextBlock() )
{
DynamicRecord next = dereference.changed( records, oldRecord.getNextBlock() );
if ( next == null )
{
engine.report().nextNotUpdated();
}
// TODO: how to check that the owner of 'next' is now a different property record.
// TODO: implement previous logic? DynamicRecord must change from used to unused or from unused to used
}
}
}
@Override
public void check( DynamicRecord record,
CheckerEngine<DynamicRecord, ConsistencyReport.DynamicConsistencyReport> engine,
RecordAccess records )
{
if ( !record.inUse() )
{
return;
}
if ( record.getLength() == 0 )
{
engine.report().emptyBlock();
}
else if ( record.getLength() < 0 )
{
engine.report().invalidLength();
}
if ( !Record.NO_NEXT_BLOCK.is( record.getNextBlock() ) )
{
if ( record.getNextBlock() == record.getId() )
{
engine.report().selfReferentialNext();
}
else
{
engine.comparativeCheck( dereference.lookup( records, record.getNextBlock() ), this );
}
if ( record.getLength() < blockSize )
{
engine.report().recordNotFullReferencesNext();
}
}
}
@Override
public void checkReference( DynamicRecord record, DynamicRecord next,
CheckerEngine<DynamicRecord, ConsistencyReport.DynamicConsistencyReport> engine,
RecordAccess records )
{
if ( !next.inUse() )
{
engine.report().nextNotInUse( next );
}
else
{
if ( next.getLength() <= 0 )
{
engine.report().emptyNextBlock( next );
}
}
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_DynamicRecordCheck.java
|
4,252
|
public abstract class AbstractStoreProcessor extends RecordStore.Processor<RuntimeException>
{
private final RecordCheck<NodeRecord, ConsistencyReport.NodeConsistencyReport> nodeChecker;
private final RecordCheck<RelationshipRecord, ConsistencyReport.RelationshipConsistencyReport> relationshipChecker;
private final RecordCheck<PropertyRecord, ConsistencyReport.PropertyConsistencyReport> propertyChecker;
private final RecordCheck<PropertyKeyTokenRecord, ConsistencyReport.PropertyKeyTokenConsistencyReport> propertyKeyTokenChecker;
private final RecordCheck<RelationshipTypeTokenRecord, ConsistencyReport.RelationshipTypeConsistencyReport> relationshipTypeTokenChecker;
private final RecordCheck<LabelTokenRecord, ConsistencyReport.LabelTokenConsistencyReport> labelTokenChecker;
public AbstractStoreProcessor()
{
this( CheckDecorator.NONE );
}
public AbstractStoreProcessor( CheckDecorator decorator )
{
this.nodeChecker = decorator.decorateNodeChecker( new NodeRecordCheck() );
this.relationshipChecker = decorator.decorateRelationshipChecker( new RelationshipRecordCheck() );
this.propertyChecker = decorator.decoratePropertyChecker( new PropertyRecordCheck() );
this.propertyKeyTokenChecker = decorator.decoratePropertyKeyTokenChecker( new PropertyKeyTokenRecordCheck() );
this.relationshipTypeTokenChecker = decorator.decorateRelationshipTypeTokenChecker( new
RelationshipTypeTokenRecordCheck() );
this.labelTokenChecker = decorator.decorateLabelTokenChecker( new LabelTokenRecordCheck() );
}
protected abstract void checkNode(
RecordStore<NodeRecord> store, NodeRecord node,
RecordCheck<NodeRecord, ConsistencyReport.NodeConsistencyReport> checker );
protected abstract void checkRelationship(
RecordStore<RelationshipRecord> store, RelationshipRecord rel,
RecordCheck<RelationshipRecord, ConsistencyReport.RelationshipConsistencyReport> checker );
protected abstract void checkProperty(
RecordStore<PropertyRecord> store, PropertyRecord property,
RecordCheck<PropertyRecord, ConsistencyReport.PropertyConsistencyReport> checker );
protected abstract void checkRelationshipTypeToken(
RecordStore<RelationshipTypeTokenRecord> store,
RelationshipTypeTokenRecord record,
RecordCheck<RelationshipTypeTokenRecord, ConsistencyReport.RelationshipTypeConsistencyReport> checker );
protected abstract void checkLabelToken(
RecordStore<LabelTokenRecord> store,
LabelTokenRecord record,
RecordCheck<LabelTokenRecord, ConsistencyReport.LabelTokenConsistencyReport> checker );
protected abstract void checkPropertyKeyToken(
RecordStore<PropertyKeyTokenRecord> store, PropertyKeyTokenRecord record,
RecordCheck<PropertyKeyTokenRecord,
ConsistencyReport.PropertyKeyTokenConsistencyReport> checker );
protected abstract void checkDynamic(
RecordType type, RecordStore<DynamicRecord> store, DynamicRecord string,
RecordCheck<DynamicRecord, ConsistencyReport.DynamicConsistencyReport> checker );
protected abstract void checkDynamicLabel(
RecordType type, RecordStore<DynamicRecord> store, DynamicRecord string,
RecordCheck<DynamicRecord, ConsistencyReport.DynamicLabelConsistencyReport> checker );
public void processSchema( RecordStore<DynamicRecord> store, DynamicRecord schema )
{
// cf. StoreProcessor
checkDynamic( RecordType.SCHEMA, store, schema, new DynamicRecordCheck( store, SCHEMA ) );
}
@Override
public final void processNode( RecordStore<NodeRecord> store, NodeRecord node )
{
checkNode( store, node, nodeChecker );
}
@Override
public final void processRelationship( RecordStore<RelationshipRecord> store, RelationshipRecord rel )
{
checkRelationship( store, rel, relationshipChecker );
}
@Override
public final void processProperty( RecordStore<PropertyRecord> store, PropertyRecord property )
{
checkProperty( store, property, propertyChecker );
}
@Override
public final void processString( RecordStore<DynamicRecord> store, DynamicRecord string, IdType idType )
{
RecordType type;
DynamicStore dereference;
switch ( idType )
{
case STRING_BLOCK:
type = RecordType.STRING_PROPERTY;
dereference = DynamicStore.STRING;
break;
case RELATIONSHIP_TYPE_TOKEN_NAME:
type = RecordType.RELATIONSHIP_TYPE_NAME;
dereference = DynamicStore.RELATIONSHIP_TYPE;
break;
case PROPERTY_KEY_TOKEN_NAME:
type = RecordType.PROPERTY_KEY_NAME;
dereference = DynamicStore.PROPERTY_KEY;
break;
case LABEL_TOKEN_NAME:
type = RecordType.LABEL_NAME;
dereference = DynamicStore.LABEL;
break;
default:
throw new IllegalArgumentException( format( "The id type [%s] is not valid for String records.", idType ) );
}
checkDynamic( type, store, string, new DynamicRecordCheck( store, dereference ) );
}
@Override
public final void processArray( RecordStore<DynamicRecord> store, DynamicRecord array )
{
checkDynamic( RecordType.ARRAY_PROPERTY, store, array, new DynamicRecordCheck( store, ARRAY ) );
}
@Override
public final void processLabelArrayWithOwner( RecordStore<DynamicRecord> store, DynamicRecord array )
{
checkDynamic( RecordType.NODE_DYNAMIC_LABEL, store, array, new DynamicRecordCheck( store, NODE_LABEL ) );
checkDynamicLabel( RecordType.NODE_DYNAMIC_LABEL, store, array, new NodeDynamicLabelOrphanChainStartCheck() );
}
@Override
public final void processRelationshipTypeToken( RecordStore<RelationshipTypeTokenRecord> store,
RelationshipTypeTokenRecord record )
{
checkRelationshipTypeToken( store, record, relationshipTypeTokenChecker );
}
@Override
public final void processPropertyKeyToken( RecordStore<PropertyKeyTokenRecord> store,
PropertyKeyTokenRecord record )
{
checkPropertyKeyToken( store, record, propertyKeyTokenChecker );
}
@Override
public void processLabelToken( RecordStore<LabelTokenRecord> store, LabelTokenRecord record )
{
checkLabelToken( store, record, labelTokenChecker );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_checking_AbstractStoreProcessor.java
|
4,253
|
{{
put( new InstanceId( 1 ), URI.create( "ha://1" ) );
put( new InstanceId( 2 ), URI.create( "ha://2" ) );
}}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessorTest.java
|
4,254
|
public class FailingException extends RuntimeException
{
public FailingException( String message )
{
super( message );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_FailingException.java
|
4,255
|
{
@Override
public int read( byte[] b ) throws IOException
{
readBytes( b );
return b.length;
}
@Override
public int read( byte[] b, int off, int len ) throws IOException
{
readBytes( b, off, len );
return len;
}
@Override
public long skip( long n ) throws IOException
{
skipBytes( (int)n );
return n;
}
@Override
public int available() throws IOException
{
return super.available();
}
@Override
public void close() throws IOException
{
}
@Override
public synchronized void mark( int readlimit )
{
throw new UnsupportedOperationException();
}
@Override
public synchronized void reset() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public boolean markSupported()
{
return false;
}
@Override
public int read() throws IOException
{
return readByte();
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_DechunkingChannelBuffer.java
|
4,256
|
public class DechunkingChannelBuffer implements ChannelBuffer
{
private final BlockingReadHandler<ChannelBuffer> reader;
private ChannelBuffer buffer;
private boolean more;
private boolean hasMarkedReaderIndex;
private final long timeoutMillis;
private boolean failure;
private final byte applicationProtocolVersion;
private final byte internalProtocolVersion;
DechunkingChannelBuffer( BlockingReadHandler<ChannelBuffer> reader, long timeoutMillis, byte internalProtocolVersion,
byte applicationProtocolVersion )
{
this.reader = reader;
this.timeoutMillis = timeoutMillis;
this.internalProtocolVersion = internalProtocolVersion;
this.applicationProtocolVersion = applicationProtocolVersion;
readNextChunk();
}
protected ChannelBuffer readNext()
{
try
{
ChannelBuffer result = reader.read( timeoutMillis, TimeUnit.MILLISECONDS );
if ( result == null )
{
throw new ComException( "Channel has been closed" );
}
return result;
}
catch ( IOException e )
{
throw new ComException( e );
}
catch ( InterruptedException e )
{
throw new ComException( e );
}
}
private void readNextChunkIfNeeded( int bytesPlus )
{
if ( buffer.readableBytes() < bytesPlus && more )
{
readNextChunk();
}
}
private void readNextChunk()
{
ChannelBuffer readBuffer = readNext();
/* Header layout:
* [ , ][ , x] 0: last chunk in message, 1: there a more chunks after this one
* [ , ][ , x ] 0: success, 1: failure
* [ , ][xxxx,xx ] internal protocol version
* [xxxx,xxxx][ , ] application protocol version */
byte[] header = new byte[2];
readBuffer.readBytes( header );
more = (header[0] & 0x1) != 0;
failure = (header[0] & 0x2) != 0;
assertSameProtocolVersion( header, internalProtocolVersion, applicationProtocolVersion );
if ( !more && buffer == null )
{
// Optimization: this is the first chunk and it'll be the only chunk
// in this message.
buffer = readBuffer;
}
else
{
buffer = buffer == null ? ChannelBuffers.dynamicBuffer() : buffer;
discardReadBytes();
buffer.writeBytes( readBuffer );
}
if ( failure )
{
readAndThrowFailureResponse();
}
}
static void assertSameProtocolVersion( byte[] header, byte internalProtocolVersion, byte applicationProtocolVersion )
{
/* [aaaa,aaaa][pppp,ppoc]
* Only 6 bits for internal protocol version, yielding 64 values. It's ok to wrap around because
* It's highly unlikely that instances that are so far apart in versions will communicate
* with each other.
*/
byte readInternalProtocolVersion = (byte) ((header[0] & 0x7C) >>> 2);
if ( readInternalProtocolVersion != internalProtocolVersion )
{
throw new IllegalProtocolVersionException( internalProtocolVersion, readInternalProtocolVersion,
"Unexpected internal protocol version " + readInternalProtocolVersion +
", expected " + internalProtocolVersion + ". Header:" + numbersToBitString( header ) );
}
if ( header[1] != applicationProtocolVersion )
{
throw new IllegalProtocolVersionException( applicationProtocolVersion, header[1],
"Unexpected application protocol version " + header[1] +
", expected " + applicationProtocolVersion + ". Header:" + numbersToBitString( header ) );
}
}
private void readAndThrowFailureResponse()
{
Throwable cause;
try
{
ObjectInputStream input = new ObjectInputStream( asInputStream() );
cause = (Throwable) input.readObject();
}
catch ( Throwable e )
{
// Note: this is due to a problem with the streaming of exceptions, the ChunkingChannelBuffer will almost
// always sends exceptions back as two chunks, the first one empty and the second with the exception.
// We hit this when we try to read the exception of the first one, and in reading it hit the second
// chunk with the "real" exception. This should be revisited to 1) clear up the chunking and 2) handle
// serialized exceptions spanning multiple chunks.
if ( e instanceof RuntimeException ) throw (RuntimeException) e;
if ( e instanceof Error ) throw (Error) e;
throw new ComException( e );
}
if ( cause instanceof RuntimeException ) throw (RuntimeException) cause;
if ( cause instanceof Error ) throw (Error) cause;
throw new ComException( cause );
}
public ChannelBufferFactory factory()
{
return buffer.factory();
}
public boolean failure()
{
return failure;
}
/**
* Will return the capacity of the current chunk only
*/
public int capacity()
{
return buffer.capacity();
}
public ByteOrder order()
{
return buffer.order();
}
public boolean isDirect()
{
return buffer.isDirect();
}
public int readerIndex()
{
return buffer.readerIndex();
}
public void readerIndex( int readerIndex )
{
buffer.readerIndex( readerIndex );
}
public int writerIndex()
{
return buffer.writerIndex();
}
public void writerIndex( int writerIndex )
{
buffer.writerIndex( writerIndex );
}
public void setIndex( int readerIndex, int writerIndex )
{
buffer.setIndex( readerIndex, writerIndex );
}
/**
* Will return amount of readable bytes in this chunk only
*/
public int readableBytes()
{
return buffer.readableBytes();
}
public int writableBytes()
{
return 0;
}
/**
* Can fetch the next chunk if needed
*/
public boolean readable()
{
readNextChunkIfNeeded( 1 );
return buffer.readable();
}
public boolean writable()
{
return buffer.writable();
}
public void clear()
{
buffer.clear();
}
public void markReaderIndex()
{
buffer.markReaderIndex();
hasMarkedReaderIndex = true;
}
public void resetReaderIndex()
{
buffer.resetReaderIndex();
hasMarkedReaderIndex = false;
}
public void markWriterIndex()
{
buffer.markWriterIndex();
}
public void resetWriterIndex()
{
buffer.resetWriterIndex();
}
public void discardReadBytes()
{
int oldReaderIndex = buffer.readerIndex();
if ( hasMarkedReaderIndex )
{
buffer.resetReaderIndex();
}
int bytesToDiscard = buffer.readerIndex();
buffer.discardReadBytes();
if ( hasMarkedReaderIndex )
{
buffer.readerIndex( oldReaderIndex-bytesToDiscard );
}
}
public void ensureWritableBytes( int writableBytes )
{
buffer.ensureWritableBytes( writableBytes );
}
public byte getByte( int index )
{
readNextChunkIfNeeded( 1 );
return buffer.getByte( index );
}
public short getUnsignedByte( int index )
{
readNextChunkIfNeeded( 1 );
return buffer.getUnsignedByte( index );
}
public short getShort( int index )
{
readNextChunkIfNeeded( 2 );
return buffer.getShort( index );
}
public int getUnsignedShort( int index )
{
readNextChunkIfNeeded( 2 );
return buffer.getUnsignedShort( index );
}
public int getMedium( int index )
{
readNextChunkIfNeeded( 4 );
return buffer.getMedium( index );
}
public int getUnsignedMedium( int index )
{
readNextChunkIfNeeded( 4 );
return buffer.getUnsignedMedium( index );
}
public int getInt( int index )
{
readNextChunkIfNeeded( 4 );
return buffer.getInt( index );
}
public long getUnsignedInt( int index )
{
readNextChunkIfNeeded( 4 );
return buffer.getUnsignedInt( index );
}
public long getLong( int index )
{
readNextChunkIfNeeded( 8 );
return buffer.getLong( index );
}
public char getChar( int index )
{
readNextChunkIfNeeded( 2 );
return buffer.getChar( index );
}
public float getFloat( int index )
{
readNextChunkIfNeeded( 8 );
return buffer.getFloat( index );
}
public double getDouble( int index )
{
readNextChunkIfNeeded( 8 );
return buffer.getDouble( index );
}
public void getBytes( int index, ChannelBuffer dst )
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( dst.writableBytes() );
buffer.getBytes( index, dst );
}
public void getBytes( int index, ChannelBuffer dst, int length )
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( length );
buffer.getBytes( index, dst, length );
}
public void getBytes( int index, ChannelBuffer dst, int dstIndex, int length )
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( length );
buffer.getBytes( index, dst, dstIndex, length );
}
public void getBytes( int index, byte[] dst )
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( dst.length );
buffer.getBytes( index, dst );
}
public void getBytes( int index, byte[] dst, int dstIndex, int length )
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( length );
buffer.getBytes( index, dst, dstIndex, length );
}
public void getBytes( int index, ByteBuffer dst )
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( dst.limit() );
buffer.getBytes( index, dst );
}
public void getBytes( int index, OutputStream out, int length ) throws IOException
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( length );
buffer.getBytes( index, out, length );
}
public int getBytes( int index, GatheringByteChannel out, int length ) throws IOException
{
// TODO We need a loop for this (if dst is bigger than chunk size)
readNextChunkIfNeeded( length );
return buffer.getBytes( index, out, length );
}
private UnsupportedOperationException unsupportedOperation()
{
return new UnsupportedOperationException( "Not supported in a DechunkingChannelBuffer, it's used merely for reading" );
}
public void setByte( int index, int value )
{
throw unsupportedOperation();
}
public void setShort( int index, int value )
{
throw unsupportedOperation();
}
public void setMedium( int index, int value )
{
throw unsupportedOperation();
}
public void setInt( int index, int value )
{
throw unsupportedOperation();
}
public void setLong( int index, long value )
{
throw unsupportedOperation();
}
public void setChar( int index, int value )
{
throw unsupportedOperation();
}
public void setFloat( int index, float value )
{
throw unsupportedOperation();
}
public void setDouble( int index, double value )
{
throw unsupportedOperation();
}
public void setBytes( int index, ChannelBuffer src )
{
throw unsupportedOperation();
}
public void setBytes( int index, ChannelBuffer src, int length )
{
throw unsupportedOperation();
}
public void setBytes( int index, ChannelBuffer src, int srcIndex, int length )
{
throw unsupportedOperation();
}
public void setBytes( int index, byte[] src )
{
throw unsupportedOperation();
}
public void setBytes( int index, byte[] src, int srcIndex, int length )
{
throw unsupportedOperation();
}
public void setBytes( int index, ByteBuffer src )
{
throw unsupportedOperation();
}
public int setBytes( int index, InputStream in, int length ) throws IOException
{
throw unsupportedOperation();
}
public int setBytes( int index, ScatteringByteChannel in, int length ) throws IOException
{
throw unsupportedOperation();
}
public void setZero( int index, int length )
{
throw unsupportedOperation();
}
public byte readByte()
{
readNextChunkIfNeeded( 1 );
return buffer.readByte();
}
public short readUnsignedByte()
{
readNextChunkIfNeeded( 1 );
return buffer.readUnsignedByte();
}
public short readShort()
{
readNextChunkIfNeeded( 2 );
return buffer.readShort();
}
public int readUnsignedShort()
{
readNextChunkIfNeeded( 2 );
return buffer.readUnsignedShort();
}
public int readMedium()
{
readNextChunkIfNeeded( 4 );
return buffer.readMedium();
}
public int readUnsignedMedium()
{
readNextChunkIfNeeded( 4 );
return buffer.readUnsignedMedium();
}
public int readInt()
{
readNextChunkIfNeeded( 4 );
return buffer.readInt();
}
public long readUnsignedInt()
{
readNextChunkIfNeeded( 4 );
return buffer.readUnsignedInt();
}
public long readLong()
{
readNextChunkIfNeeded( 8 );
return buffer.readLong();
}
public char readChar()
{
readNextChunkIfNeeded( 2 );
return buffer.readChar();
}
public float readFloat()
{
readNextChunkIfNeeded( 8 );
return buffer.readFloat();
}
public double readDouble()
{
readNextChunkIfNeeded( 8 );
return buffer.readDouble();
}
public ChannelBuffer readBytes( int length )
{
readNextChunkIfNeeded( length );
return buffer.readBytes( length );
}
public ChannelBuffer readBytes( ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public ChannelBuffer readSlice( int length )
{
readNextChunkIfNeeded( length );
return buffer.readSlice( length );
}
public ChannelBuffer readSlice( ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public void readBytes( ChannelBuffer dst )
{
readNextChunkIfNeeded( dst.writableBytes() );
buffer.readBytes( dst );
}
public void readBytes( ChannelBuffer dst, int length )
{
readNextChunkIfNeeded( length );
buffer.readBytes( dst, length );
}
public void readBytes( ChannelBuffer dst, int dstIndex, int length )
{
readNextChunkIfNeeded( length );
buffer.readBytes( dst, dstIndex, length );
}
public void readBytes( byte[] dst )
{
readNextChunkIfNeeded( dst.length );
buffer.readBytes( dst );
}
public void readBytes( byte[] dst, int dstIndex, int length )
{
readNextChunkIfNeeded( length );
buffer.readBytes( dst, dstIndex, length );
}
public void readBytes( ByteBuffer dst )
{
readNextChunkIfNeeded( dst.limit() );
buffer.readBytes( dst );
}
public void readBytes( OutputStream out, int length ) throws IOException
{
readNextChunkIfNeeded( length );
buffer.readBytes( out, length );
}
public int readBytes( GatheringByteChannel out, int length ) throws IOException
{
readNextChunkIfNeeded( length );
return buffer.readBytes( out, length );
}
public void skipBytes( int length )
{
readNextChunkIfNeeded( length );
buffer.skipBytes( length );
}
public int skipBytes( ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public void writeByte( int value )
{
throw unsupportedOperation();
}
public void writeShort( int value )
{
throw unsupportedOperation();
}
public void writeMedium( int value )
{
throw unsupportedOperation();
}
public void writeInt( int value )
{
throw unsupportedOperation();
}
public void writeLong( long value )
{
throw unsupportedOperation();
}
public void writeChar( int value )
{
throw unsupportedOperation();
}
public void writeFloat( float value )
{
throw unsupportedOperation();
}
public void writeDouble( double value )
{
throw unsupportedOperation();
}
public void writeBytes( ChannelBuffer src )
{
throw unsupportedOperation();
}
public void writeBytes( ChannelBuffer src, int length )
{
throw unsupportedOperation();
}
public void writeBytes( ChannelBuffer src, int srcIndex, int length )
{
throw unsupportedOperation();
}
public void writeBytes( byte[] src )
{
throw unsupportedOperation();
}
public void writeBytes( byte[] src, int srcIndex, int length )
{
throw unsupportedOperation();
}
public void writeBytes( ByteBuffer src )
{
throw unsupportedOperation();
}
public int writeBytes( InputStream in, int length ) throws IOException
{
throw unsupportedOperation();
}
public int writeBytes( ScatteringByteChannel in, int length ) throws IOException
{
throw unsupportedOperation();
}
public void writeZero( int length )
{
throw unsupportedOperation();
}
public int indexOf( int fromIndex, int toIndex, byte value )
{
throw unsupportedOperation();
}
public int indexOf( int fromIndex, int toIndex, ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public int bytesBefore( byte value )
{
throw unsupportedOperation();
}
public int bytesBefore( ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public int bytesBefore( int length, byte value )
{
throw unsupportedOperation();
}
public int bytesBefore( int length, ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public int bytesBefore( int index, int length, byte value )
{
throw unsupportedOperation();
}
public int bytesBefore( int index, int length, ChannelBufferIndexFinder indexFinder )
{
throw unsupportedOperation();
}
public ChannelBuffer copy()
{
throw unsupportedOperation();
}
public ChannelBuffer copy( int index, int length )
{
throw unsupportedOperation();
}
public ChannelBuffer slice()
{
throw unsupportedOperation();
}
public ChannelBuffer slice( int index, int length )
{
throw unsupportedOperation();
}
public ChannelBuffer duplicate()
{
throw unsupportedOperation();
}
public ByteBuffer toByteBuffer()
{
throw unsupportedOperation();
}
public ByteBuffer toByteBuffer( int index, int length )
{
throw unsupportedOperation();
}
public ByteBuffer[] toByteBuffers()
{
throw unsupportedOperation();
}
public ByteBuffer[] toByteBuffers( int index, int length )
{
throw unsupportedOperation();
}
public boolean hasArray()
{
throw unsupportedOperation();
}
public byte[] array()
{
throw unsupportedOperation();
}
public int arrayOffset()
{
throw unsupportedOperation();
}
public String toString( Charset charset )
{
return buffer.toString( charset );
}
public String toString( int index, int length, Charset charset )
{
return buffer.toString( index, length, charset );
}
public String toString( String charsetName )
{
return buffer.toString( charsetName );
}
public String toString( String charsetName, ChannelBufferIndexFinder terminatorFinder )
{
return buffer.toString( charsetName, terminatorFinder );
}
public String toString( int index, int length, String charsetName )
{
return buffer.toString( index, length, charsetName );
}
public String toString( int index, int length, String charsetName,
ChannelBufferIndexFinder terminatorFinder )
{
return buffer.toString( index, length, charsetName, terminatorFinder );
}
@Override
public int hashCode()
{
return buffer.hashCode();
}
@Override
public boolean equals( Object obj )
{
return buffer.equals( obj );
}
public int compareTo( ChannelBuffer buffer )
{
return this.buffer.compareTo( buffer );
}
@Override
public String toString()
{
return buffer.toString();
}
private InputStream asInputStream()
{
return new InputStream()
{
@Override
public int read( byte[] b ) throws IOException
{
readBytes( b );
return b.length;
}
@Override
public int read( byte[] b, int off, int len ) throws IOException
{
readBytes( b, off, len );
return len;
}
@Override
public long skip( long n ) throws IOException
{
skipBytes( (int)n );
return n;
}
@Override
public int available() throws IOException
{
return super.available();
}
@Override
public void close() throws IOException
{
}
@Override
public synchronized void mark( int readlimit )
{
throw new UnsupportedOperationException();
}
@Override
public synchronized void reset() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public boolean markSupported()
{
return false;
}
@Override
public int read() throws IOException
{
return readByte();
}
};
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_DechunkingChannelBuffer.java
|
4,257
|
public class DataProducer implements ReadableByteChannel
{
private int bytesLeftToProduce;
private boolean closed;
public DataProducer( int size )
{
this.bytesLeftToProduce = size;
}
@Override
public boolean isOpen()
{
return !closed;
}
@Override
public void close() throws IOException
{
if ( closed )
throw new IllegalStateException( "Already closed" );
closed = true;
}
@Override
public int read( ByteBuffer dst ) throws IOException
{
int toFill = min( dst.remaining(), bytesLeftToProduce ), leftToFill = toFill;
if ( toFill <= 0 )
return -1;
while ( leftToFill-- > 0 )
dst.put( (byte) 5 );
bytesLeftToProduce -= toFill;
return toFill;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_DataProducer.java
|
4,258
|
{
@Override
public void handle( Exception e )
{
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ConnectionLostHandler.java
|
4,259
|
public class ComException extends RuntimeException
{
public ComException()
{
super();
}
public ComException( String message, Throwable cause )
{
super( message, cause );
}
public ComException( String message )
{
super( message );
}
public ComException( Throwable cause )
{
super( cause );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ComException.java
|
4,260
|
public class ClientCrashingWriter implements MadeUpWriter
{
private final MadeUpClient client;
private final int crashAtSize;
private int totalSize;
public ClientCrashingWriter( MadeUpClient client, int crashAtSize )
{
this.client = client;
this.crashAtSize = crashAtSize;
}
@Override
public void write( ReadableByteChannel data )
{
ByteBuffer buffer = ByteBuffer.allocateDirect( 1000 );
while ( true )
{
buffer.clear();
try
{
int size = data.read( buffer );
if ( size == -1 ) break;
if ( (totalSize += size) >= crashAtSize ) client.stop();
}
catch ( IOException e )
{
throw new ComException( e );
}
}
}
public int getSizeRead()
{
return totalSize;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ClientCrashingWriter.java
|
4,261
|
{
@Override
public void release()
{
channelPool.release();
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Client.java
|
4,262
|
{
@Override
protected Triplet<Channel, ChannelBuffer, ByteBuffer> create()
{
ChannelFuture channelFuture = bootstrap.connect( address );
channelFuture.awaitUninterruptibly( 5, TimeUnit.SECONDS );
Triplet<Channel, ChannelBuffer, ByteBuffer> channel = null;
if ( channelFuture.isSuccess() )
{
channel = Triplet.of( channelFuture.getChannel(),
ChannelBuffers.dynamicBuffer(),
ByteBuffer.allocate( 1024 * 1024 ) );
msgLog.logMessage( "Opened a new channel to " + address, true );
return channel;
}
String msg = Client.this.getClass().getSimpleName() + " could not connect to " + address;
msgLog.logMessage( msg, true );
ComException exception = new ComException( msg );
// connectionLostHandler.handle( exception );
throw exception;
}
@Override
protected boolean isAlive(
Triplet<Channel, ChannelBuffer, ByteBuffer> resource )
{
return resource.first().isConnected();
}
@Override
protected void dispose(
Triplet<Channel, ChannelBuffer, ByteBuffer> resource )
{
Channel channel = resource.first();
if ( channel.isConnected() )
{
msgLog.debug( "Closing channel: " + channel + ". Channel pool size is now " + channelPool.currentSize() );
channel.close();
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Client.java
|
4,263
|
public abstract class Client<T> extends LifecycleAdapter implements ChannelPipelineFactory
{
// Max number of concurrent channels that may exist. Needs to be high because we
// don't want to run into that limit, it will make some #acquire calls block and
// gets disastrous if that thread is holding monitors that is needed to communicate
// with the server in some way.
public static final int DEFAULT_MAX_NUMBER_OF_CONCURRENT_CHANNELS_PER_CLIENT = 20;
public static final int DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS = 20;
private ClientBootstrap bootstrap;
private final SocketAddress address;
private final StringLogger msgLog;
private ExecutorService executor;
private ResourcePool<Triplet<Channel, ChannelBuffer, ByteBuffer>> channelPool;
private final Protocol protocol;
private final int frameLength;
private final long readTimeout;
private final int maxUnusedChannels;
private final StoreId storeId;
private ResourceReleaser resourcePoolReleaser;
private final List<MismatchingVersionHandler> mismatchingVersionHandlers;
private final RequestMonitor requestMonitor;
private int chunkSize;
public Client( String hostNameOrIp, int port, Logging logging, Monitors monitors,
StoreId storeId, int frameLength,
byte applicationProtocolVersion, long readTimeout,
int maxConcurrentChannels, int chunkSize )
{
assertChunkSizeIsWithinFrameSize( chunkSize, frameLength );
this.msgLog = logging.getMessagesLog( getClass() );
this.storeId = storeId;
this.frameLength = frameLength;
this.readTimeout = readTimeout;
// ResourcePool no longer controls max concurrent channels. Use this value for the pool size
this.maxUnusedChannels = maxConcurrentChannels;
this.mismatchingVersionHandlers = new ArrayList<MismatchingVersionHandler>( 2 );
this.address = new InetSocketAddress( hostNameOrIp, port );
this.protocol = new Protocol( chunkSize, applicationProtocolVersion, getInternalProtocolVersion() );
msgLog.info( getClass().getSimpleName() + " communication channel created towards " + hostNameOrIp + ":" +
port );
this.requestMonitor = monitors.newMonitor( RequestMonitor.class, getClass() );
}
@Override
public void start()
{
executor = Executors.newCachedThreadPool( new NamedThreadFactory( getClass().getSimpleName() + "@" + address
) );
bootstrap = new ClientBootstrap( new NioClientSocketChannelFactory( executor, executor ) );
bootstrap.setPipelineFactory( this );
channelPool = new ResourcePool<Triplet<Channel, ChannelBuffer, ByteBuffer>>( maxUnusedChannels,
new ResourcePool.CheckStrategy.TimeoutCheckStrategy( ResourcePool.DEFAULT_CHECK_INTERVAL, SYSTEM_CLOCK ),
new LoggingResourcePoolMonitor( msgLog ))
{
@Override
protected Triplet<Channel, ChannelBuffer, ByteBuffer> create()
{
ChannelFuture channelFuture = bootstrap.connect( address );
channelFuture.awaitUninterruptibly( 5, TimeUnit.SECONDS );
Triplet<Channel, ChannelBuffer, ByteBuffer> channel = null;
if ( channelFuture.isSuccess() )
{
channel = Triplet.of( channelFuture.getChannel(),
ChannelBuffers.dynamicBuffer(),
ByteBuffer.allocate( 1024 * 1024 ) );
msgLog.logMessage( "Opened a new channel to " + address, true );
return channel;
}
String msg = Client.this.getClass().getSimpleName() + " could not connect to " + address;
msgLog.logMessage( msg, true );
ComException exception = new ComException( msg );
// connectionLostHandler.handle( exception );
throw exception;
}
@Override
protected boolean isAlive(
Triplet<Channel, ChannelBuffer, ByteBuffer> resource )
{
return resource.first().isConnected();
}
@Override
protected void dispose(
Triplet<Channel, ChannelBuffer, ByteBuffer> resource )
{
Channel channel = resource.first();
if ( channel.isConnected() )
{
msgLog.debug( "Closing channel: " + channel + ". Channel pool size is now " + channelPool.currentSize() );
channel.close();
}
}
};
/*
* This is here to couple the channel releasing to Response.close() itself and not
* to TransactionStream.close() as it is implemented here. The reason is that a Response
* that is returned without a TransactionStream will still hold the channel and should
* release it eventually. Also, logically, closing the channel is not dependent on the
* TransactionStream.
*/
resourcePoolReleaser = new ResourceReleaser()
{
@Override
public void release()
{
channelPool.release();
}
};
}
@Override
public void stop()
{
channelPool.close( true );
bootstrap.releaseExternalResources();
executor.shutdownNow();
mismatchingVersionHandlers.clear();
msgLog.logMessage( toString() + " shutdown", true );
}
protected <R> Response<R> sendRequest( RequestType<T> type, RequestContext context,
Serializer serializer, Deserializer<R> deserializer )
{
return sendRequest( type, context, serializer, deserializer, null );
}
protected <R> Response<R> sendRequest( RequestType<T> type, RequestContext context,
Serializer serializer, Deserializer<R> deserializer,
StoreId specificStoreId )
{
boolean success = true;
Triplet<Channel, ChannelBuffer, ByteBuffer> channelContext = null;
Throwable failure = null;
try
{
// Send 'em over the wire
channelContext = getChannel( type );
Channel channel = channelContext.first();
ChannelBuffer output = channelContext.second();
ByteBuffer input = channelContext.third();
Map<String, String> requestContext = new HashMap<String, String>();
requestContext.put( "type", type.toString() );
requestContext.put( "slaveContext", context.toString() );
requestContext.put( "serverAddress", channel.getRemoteAddress().toString() );
requestMonitor.beginRequest( requestContext );
// Request
protocol.serializeRequest( channel, output, type, context, serializer );
// Response
@SuppressWarnings("unchecked")
Response<R> response = protocol.deserializeResponse(
(BlockingReadHandler<ChannelBuffer>) channel.getPipeline().get( "blockingHandler" ), input,
getReadTimeout( type, readTimeout ), deserializer, resourcePoolReleaser );
if ( shouldCheckStoreId( type ) )
{
// specificStoreId is there as a workaround for then the graphDb isn't initialized yet
if ( specificStoreId != null )
{
assertCorrectStoreId( response.getStoreId(), specificStoreId );
}
else
{
assertCorrectStoreId( response.getStoreId(), storeId );
}
}
return response;
}
catch ( IllegalProtocolVersionException e )
{
failure = e;
success = false;
for ( MismatchingVersionHandler handler : mismatchingVersionHandlers )
{
handler.versionMismatched( e.getExpected(), e.getReceived() );
}
throw e;
}
catch ( Throwable e )
{
failure = e;
success = false;
if ( channelContext != null )
{
closeChannel( channelContext );
}
throw Exceptions.launderedException( ComException.class, e );
}
finally
{
/*
* Otherwise the user must call response.close() to prevent resource leaks.
*/
if ( !success )
{
releaseChannel();
}
requestMonitor.endRequest( failure );
}
}
protected long getReadTimeout( RequestType<T> type, long readTimeout )
{
return readTimeout;
}
protected boolean shouldCheckStoreId( RequestType<T> type )
{
return true;
}
protected StoreId getStoreId()
{
return storeId;
}
private void assertCorrectStoreId( StoreId storeId, StoreId myStoreId )
{
if ( !myStoreId.equals( storeId ) )
{
throw new MismatchingStoreIdException( myStoreId, storeId );
}
}
private Triplet<Channel, ChannelBuffer, ByteBuffer> getChannel( RequestType<T> type ) throws Exception
{
// Calling acquire is dangerous since it may be a blocking call... and if this
// thread holds a lock which others may want to be able to communicate with
// the server things go stiff.
Triplet<Channel, ChannelBuffer, ByteBuffer> result = channelPool.acquire();
if ( result == null )
{
msgLog.error( "Unable to acquire new channel for " + type );
throw new ComException( "Unable to acquire new channel for " + type );
}
return result;
}
private void releaseChannel()
{
channelPool.release();
}
private void closeChannel( Triplet<Channel, ChannelBuffer, ByteBuffer> channel )
{
channel.first().close().awaitUninterruptibly();
}
@Override
public ChannelPipeline getPipeline() throws Exception
{
ChannelPipeline pipeline = Channels.pipeline();
addLengthFieldPipes( pipeline, frameLength );
BlockingReadHandler<ChannelBuffer> reader = new BlockingReadHandler<ChannelBuffer>(
new ArrayBlockingQueue<ChannelEvent>( 3, false ) );
pipeline.addLast( "blockingHandler", reader );
return pipeline;
}
public void addMismatchingVersionHandler( MismatchingVersionHandler toAdd )
{
mismatchingVersionHandlers.add( toAdd );
}
protected byte getInternalProtocolVersion()
{
return Server.INTERNAL_PROTOCOL_VERSION;
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + address + "]";
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_Client.java
|
4,264
|
public class ChunkingChannelBuffer implements ChannelBuffer, ChannelFutureListener
{
static final byte CONTINUATION_LAST = 0;
static final byte CONTINUATION_MORE = 1;
static final byte OUTCOME_SUCCESS = 0;
static final byte OUTCOME_FAILURE = 1;
private static final int MAX_WRITE_AHEAD_CHUNKS = 5;
private ChannelBuffer buffer;
private final Channel channel;
private final int capacity;
private int continuationPosition;
private final AtomicInteger writeAheadCounter = new AtomicInteger();
private volatile boolean failure;
private final byte applicationProtocolVersion;
private final byte internalProtocolVersion;
public ChunkingChannelBuffer( ChannelBuffer buffer, Channel channel, int capacity,
byte internalProtocolVersion, byte applicationProtocolVersion )
{
this.buffer = buffer;
this.channel = channel;
this.capacity = capacity;
this.internalProtocolVersion = internalProtocolVersion;
this.applicationProtocolVersion = applicationProtocolVersion;
addRoomForContinuationHeader();
}
private void addRoomForContinuationHeader()
{
continuationPosition = writerIndex();
// byte 0: [pppp,ppoc] p: internal protocol version, o: outcome, c: continuation
// byte 1: [aaaa,aaaa] a: application protocol version
buffer.writeBytes( header( CONTINUATION_LAST ) );
}
private byte[] header( byte continuation )
{
byte[] header = new byte[2];
header[0] = (byte)((internalProtocolVersion << 2) | ((failure?OUTCOME_FAILURE:OUTCOME_SUCCESS) << 1) | continuation );
header[1] = applicationProtocolVersion;
return header;
}
private void setContinuation( byte continuation )
{
buffer.setBytes( continuationPosition, header( continuation ) );
}
public ChannelBufferFactory factory()
{
return buffer.factory();
}
public int capacity()
{
return buffer.capacity();
}
public ByteOrder order()
{
return buffer.order();
}
public boolean isDirect()
{
return buffer.isDirect();
}
public int readerIndex()
{
return buffer.readerIndex();
}
public void readerIndex( int readerIndex )
{
buffer.readerIndex( readerIndex );
}
public int writerIndex()
{
return buffer.writerIndex();
}
public void writerIndex( int writerIndex )
{
buffer.writerIndex( writerIndex );
}
public void setIndex( int readerIndex, int writerIndex )
{
buffer.setIndex( readerIndex, writerIndex );
}
public int readableBytes()
{
return buffer.readableBytes();
}
public int writableBytes()
{
return buffer.writableBytes();
}
public boolean readable()
{
return buffer.readable();
}
public boolean writable()
{
return buffer.writable();
}
public void clear( boolean failure )
{
buffer.clear();
this.failure = failure;
addRoomForContinuationHeader();
}
public void clear()
{
clear( false );
}
public void markReaderIndex()
{
buffer.markReaderIndex();
}
public void resetReaderIndex()
{
buffer.resetReaderIndex();
}
public void markWriterIndex()
{
buffer.markWriterIndex();
}
public void resetWriterIndex()
{
buffer.resetWriterIndex();
}
public void discardReadBytes()
{
buffer.discardReadBytes();
}
public void ensureWritableBytes( int writableBytes )
{
buffer.ensureWritableBytes( writableBytes );
}
public byte getByte( int index )
{
return buffer.getByte( index );
}
public short getUnsignedByte( int index )
{
return buffer.getUnsignedByte( index );
}
public short getShort( int index )
{
return buffer.getShort( index );
}
public int getUnsignedShort( int index )
{
return buffer.getUnsignedShort( index );
}
public int getMedium( int index )
{
return buffer.getMedium( index );
}
public int getUnsignedMedium( int index )
{
return buffer.getUnsignedMedium( index );
}
public int getInt( int index )
{
return buffer.getInt( index );
}
public long getUnsignedInt( int index )
{
return buffer.getUnsignedInt( index );
}
public long getLong( int index )
{
return buffer.getLong( index );
}
public char getChar( int index )
{
return buffer.getChar( index );
}
public float getFloat( int index )
{
return buffer.getFloat( index );
}
public double getDouble( int index )
{
return buffer.getDouble( index );
}
public void getBytes( int index, ChannelBuffer dst )
{
buffer.getBytes( index, dst );
}
public void getBytes( int index, ChannelBuffer dst, int length )
{
buffer.getBytes( index, dst, length );
}
public void getBytes( int index, ChannelBuffer dst, int dstIndex, int length )
{
buffer.getBytes( index, dst, dstIndex, length );
}
public void getBytes( int index, byte[] dst )
{
buffer.getBytes( index, dst );
}
public void getBytes( int index, byte[] dst, int dstIndex, int length )
{
buffer.getBytes( index, dst, dstIndex, length );
}
public void getBytes( int index, ByteBuffer dst )
{
buffer.getBytes( index, dst );
}
public void getBytes( int index, OutputStream out, int length ) throws IOException
{
buffer.getBytes( index, out, length );
}
public int getBytes( int index, GatheringByteChannel out, int length ) throws IOException
{
return buffer.getBytes( index, out, length );
}
public void setByte( int index, int value )
{
buffer.setByte( index, value );
}
public void setShort( int index, int value )
{
buffer.setShort( index, value );
}
public void setMedium( int index, int value )
{
buffer.setMedium( index, value );
}
public void setInt( int index, int value )
{
buffer.setInt( index, value );
}
public void setLong( int index, long value )
{
buffer.setLong( index, value );
}
public void setChar( int index, int value )
{
buffer.setChar( index, value );
}
public void setFloat( int index, float value )
{
buffer.setFloat( index, value );
}
public void setDouble( int index, double value )
{
buffer.setDouble( index, value );
}
public void setBytes( int index, ChannelBuffer src )
{
buffer.setBytes( index, src );
}
public void setBytes( int index, ChannelBuffer src, int length )
{
buffer.setBytes( index, src, length );
}
public void setBytes( int index, ChannelBuffer src, int srcIndex, int length )
{
buffer.setBytes( index, src, srcIndex, length );
}
public void setBytes( int index, byte[] src )
{
buffer.setBytes( index, src );
}
public void setBytes( int index, byte[] src, int srcIndex, int length )
{
buffer.setBytes( index, src, srcIndex, length );
}
public void setBytes( int index, ByteBuffer src )
{
buffer.setBytes( index, src );
}
public int setBytes( int index, InputStream in, int length ) throws IOException
{
return buffer.setBytes( index, in, length );
}
public int setBytes( int index, ScatteringByteChannel in, int length ) throws IOException
{
return buffer.setBytes( index, in, length );
}
public void setZero( int index, int length )
{
buffer.setZero( index, length );
}
public byte readByte()
{
return buffer.readByte();
}
public short readUnsignedByte()
{
return buffer.readUnsignedByte();
}
public short readShort()
{
return buffer.readShort();
}
public int readUnsignedShort()
{
return buffer.readUnsignedShort();
}
public int readMedium()
{
return buffer.readMedium();
}
public int readUnsignedMedium()
{
return buffer.readUnsignedMedium();
}
public int readInt()
{
return buffer.readInt();
}
public long readUnsignedInt()
{
return buffer.readUnsignedInt();
}
public long readLong()
{
return buffer.readLong();
}
public char readChar()
{
return buffer.readChar();
}
public float readFloat()
{
return buffer.readFloat();
}
public double readDouble()
{
return buffer.readDouble();
}
public ChannelBuffer readBytes( int length )
{
return buffer.readBytes( length );
}
public ChannelBuffer readBytes( ChannelBufferIndexFinder indexFinder )
{
return buffer.readBytes( indexFinder );
}
public ChannelBuffer readSlice( int length )
{
return buffer.readSlice( length );
}
public ChannelBuffer readSlice( ChannelBufferIndexFinder indexFinder )
{
return buffer.readSlice( indexFinder );
}
public void readBytes( ChannelBuffer dst )
{
buffer.readBytes( dst );
}
public void readBytes( ChannelBuffer dst, int length )
{
buffer.readBytes( dst, length );
}
public void readBytes( ChannelBuffer dst, int dstIndex, int length )
{
buffer.readBytes( dst, dstIndex, length );
}
public void readBytes( byte[] dst )
{
buffer.readBytes( dst );
}
public void readBytes( byte[] dst, int dstIndex, int length )
{
buffer.readBytes( dst, dstIndex, length );
}
public void readBytes( ByteBuffer dst )
{
buffer.readBytes( dst );
}
public void readBytes( OutputStream out, int length ) throws IOException
{
buffer.readBytes( out, length );
}
public int readBytes( GatheringByteChannel out, int length ) throws IOException
{
return buffer.readBytes( out, length );
}
public void skipBytes( int length )
{
buffer.skipBytes( length );
}
public int skipBytes( ChannelBufferIndexFinder indexFinder )
{
return buffer.skipBytes( indexFinder );
}
private void sendChunkIfNeeded( int bytesPlus )
{
// Note: This is wasteful, it should pack as much data as possible into the current chunk before sending it off.
// Refactor when there is time.
if ( writerIndex()+bytesPlus >= capacity )
{
setContinuation( CONTINUATION_MORE );
writeCurrentChunk();
// TODO Reuse buffers?
buffer = ChannelBuffers.dynamicBuffer();
addRoomForContinuationHeader();
}
}
private void writeCurrentChunk()
{
if ( !channel.isOpen() || !channel.isConnected() || !channel.isBound() )
throw new ComException( "Channel has been closed, so no need to try to write to it anymore. Client closed it?" );
waitForClientToCatchUpOnReadingChunks();
ChannelFuture future = channel.write( buffer );
future.addListener( this );
writeAheadCounter.incrementAndGet();
}
private void waitForClientToCatchUpOnReadingChunks()
{
// Wait until channel gets disconnected or client catches up.
// If channel has been disconnected we can exit and the next write
// will produce a decent exception out.
boolean waited = false;
while ( channel.isConnected() && writeAheadCounter.get() >= MAX_WRITE_AHEAD_CHUNKS )
{
waited = true;
try
{
Thread.sleep( 200 );
}
catch ( InterruptedException e )
{ // OK
Thread.interrupted();
}
}
if ( waited && (!channel.isConnected() || !channel.isOpen()) )
{
throw new ComException( "Channel has been closed" );
}
}
@Override
public void operationComplete( ChannelFuture future ) throws Exception
{
if ( !future.isDone() )
{
throw new ComException( "This should not be possible because we waited for the future to be done" );
}
if ( !future.isSuccess() || future.isCancelled() )
{
future.getChannel().close();
}
writeAheadCounter.decrementAndGet();
}
public void done()
{
if ( readable() /* Meaning that something has been written to it and can be read/sent */ )
{
writeCurrentChunk();
}
}
public void writeByte( int value )
{
sendChunkIfNeeded( 1 );
buffer.writeByte( value );
}
public void writeShort( int value )
{
sendChunkIfNeeded( 2 );
buffer.writeShort( value );
}
public void writeMedium( int value )
{
sendChunkIfNeeded( 4 );
buffer.writeMedium( value );
}
public void writeInt( int value )
{
sendChunkIfNeeded( 4 );
buffer.writeInt( value );
}
public void writeLong( long value )
{
sendChunkIfNeeded( 8 );
buffer.writeLong( value );
}
public void writeChar( int value )
{
sendChunkIfNeeded( 2 );
buffer.writeChar( value );
}
public void writeFloat( float value )
{
sendChunkIfNeeded( 8 );
buffer.writeFloat( value );
}
public void writeDouble( double value )
{
sendChunkIfNeeded( 8 );
buffer.writeDouble( value );
}
public void writeBytes( ChannelBuffer src )
{
sendChunkIfNeeded( src.capacity() );
buffer.writeBytes( src );
}
public void writeBytes( ChannelBuffer src, int length )
{
sendChunkIfNeeded( length );
buffer.writeBytes( src, length );
}
public void writeBytes( ChannelBuffer src, int srcIndex, int length )
{
sendChunkIfNeeded( length );
buffer.writeBytes( src, srcIndex, length );
}
public void writeBytes( byte[] src )
{
sendChunkIfNeeded( src.length );
buffer.writeBytes( src );
}
public void writeBytes( byte[] src, int srcIndex, int length )
{
sendChunkIfNeeded( length );
buffer.writeBytes( src, srcIndex, length );
}
public void writeBytes( ByteBuffer src )
{
sendChunkIfNeeded( src.limit() );
buffer.writeBytes( src );
}
public int writeBytes( InputStream in, int length ) throws IOException
{
sendChunkIfNeeded( length );
return buffer.writeBytes( in, length );
}
public int writeBytes( ScatteringByteChannel in, int length ) throws IOException
{
sendChunkIfNeeded( length );
return buffer.writeBytes( in, length );
}
public void writeZero( int length )
{
sendChunkIfNeeded( length );
buffer.writeZero( length );
}
public int indexOf( int fromIndex, int toIndex, byte value )
{
return buffer.indexOf( fromIndex, toIndex, value );
}
public int indexOf( int fromIndex, int toIndex, ChannelBufferIndexFinder indexFinder )
{
return buffer.indexOf( fromIndex, toIndex, indexFinder );
}
public int bytesBefore( byte value )
{
return buffer.bytesBefore( value );
}
public int bytesBefore( ChannelBufferIndexFinder indexFinder )
{
return buffer.bytesBefore( indexFinder );
}
public int bytesBefore( int length, byte value )
{
return buffer.bytesBefore( length, value );
}
public int bytesBefore( int length, ChannelBufferIndexFinder indexFinder )
{
return buffer.bytesBefore( length, indexFinder );
}
public int bytesBefore( int index, int length, byte value )
{
return buffer.bytesBefore( index, length, value );
}
public int bytesBefore( int index, int length, ChannelBufferIndexFinder indexFinder )
{
return buffer.bytesBefore( index, length, indexFinder );
}
public ChannelBuffer copy()
{
return buffer.copy();
}
public ChannelBuffer copy( int index, int length )
{
return buffer.copy( index, length );
}
public ChannelBuffer slice()
{
return buffer.slice();
}
public ChannelBuffer slice( int index, int length )
{
return buffer.slice( index, length );
}
public ChannelBuffer duplicate()
{
return buffer.duplicate();
}
public ByteBuffer toByteBuffer()
{
return buffer.toByteBuffer();
}
public ByteBuffer toByteBuffer( int index, int length )
{
return buffer.toByteBuffer( index, length );
}
public ByteBuffer[] toByteBuffers()
{
return buffer.toByteBuffers();
}
public ByteBuffer[] toByteBuffers( int index, int length )
{
return buffer.toByteBuffers( index, length );
}
public boolean hasArray()
{
return buffer.hasArray();
}
public byte[] array()
{
return buffer.array();
}
public int arrayOffset()
{
return buffer.arrayOffset();
}
public String toString( Charset charset )
{
return buffer.toString( charset );
}
public String toString( int index, int length, Charset charset )
{
return buffer.toString( index, length, charset );
}
public String toString( String charsetName )
{
return buffer.toString( charsetName );
}
public String toString( String charsetName, ChannelBufferIndexFinder terminatorFinder )
{
return buffer.toString( charsetName, terminatorFinder );
}
public String toString( int index, int length, String charsetName )
{
return buffer.toString( index, length, charsetName );
}
public String toString( int index, int length, String charsetName,
ChannelBufferIndexFinder terminatorFinder )
{
return buffer.toString( index, length, charsetName, terminatorFinder );
}
public int compareTo( ChannelBuffer buffer )
{
return this.buffer.compareTo( buffer );
}
@Override
public String toString()
{
return buffer.toString();
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ChunkingChannelBuffer.java
|
4,265
|
public class BlockLogReader implements ReadableByteChannel
{
private final ChannelBuffer source;
private final byte[] byteArray = new byte[BlockLogBuffer.MAX_SIZE];
private final ByteBuffer byteBuffer = ByteBuffer.wrap( byteArray );
private boolean moreBlocks;
public BlockLogReader( ChannelBuffer source )
{
this.source = source;
readNextBlock();
}
/**
* Read a block from the channel. Read the first byte, determine size and if
* more are coming, set state accordingly and store content. NOTE: After
* this op the buffer is flipped, ready to read.
*/
private void readNextBlock()
{
int blockSize = source.readUnsignedByte();
byteBuffer.clear();
moreBlocks = blockSize == BlockLogBuffer.FULL_BLOCK_AND_MORE;
int limit = moreBlocks ? BlockLogBuffer.DATA_SIZE : blockSize;
byteBuffer.limit( limit );
source.readBytes( byteBuffer );
byteBuffer.flip();
}
public boolean isOpen()
{
return true;
}
public void close() throws IOException
{
// This is to make sure that reader index in the ChannelBuffer is left
// in the right place even if this reader wasn't completely read through.
readToTheEnd();
}
public int read( ByteBuffer dst ) throws IOException
{
/*
* Fill up dst with what comes from the channel, until dst is full.
* readAsMuchAsPossible() is constantly called reading essentially
* one chunk at a time until either it runs out of stuff coming
* from the channel or the actual target buffer is filled.
*/
int bytesWanted = dst.limit();
int bytesRead = 0;
while ( bytesWanted > 0 )
{
int bytesReadThisTime = readAsMuchAsPossible( dst, bytesWanted );
if ( bytesReadThisTime == 0 )
{
break;
}
bytesRead += bytesReadThisTime;
bytesWanted -= bytesReadThisTime;
}
return bytesRead == 0 && !moreBlocks ? -1 : bytesRead;
}
/**
* Reads in at most {@code maxBytesWanted} in {@code dst} but never more
* than a chunk.
*
* @param dst The buffer to write the reads bytes to
* @param maxBytesWanted The maximum number of bytes to read.
* @return The number of bytes actually read
*/
private int readAsMuchAsPossible( ByteBuffer dst, int maxBytesWanted )
{
if ( byteBuffer.remaining() == 0 && moreBlocks )
{
readNextBlock();
}
int bytesToRead = Math.min( maxBytesWanted, byteBuffer.remaining() );
dst.put( byteArray, byteBuffer.position(), bytesToRead );
byteBuffer.position( byteBuffer.position()+bytesToRead );
return bytesToRead;
}
/**
* Reads everything that can be read from the channel. Stops when a chunk
* starting with a non zero byte is met.
*/
private void readToTheEnd()
{
while ( moreBlocks )
{
readNextBlock();
}
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_BlockLogReader.java
|
4,266
|
public class BlockLogBuffer implements LogBuffer
{
// First byte of every chunk that is not the last one
static final byte FULL_BLOCK_AND_MORE = 0;
static final int MAX_SIZE = 256; /* soft limit, incl. header */
static final int DATA_SIZE = MAX_SIZE-1;
private final ChannelBuffer target;
private final ByteCounterMonitor monitor;
// MAX_SIZE can be overcome by one primitive put(), the largest is 8 bytes
private final byte[] byteArray = new byte[MAX_SIZE + 8/*largest atom*/];
private final ByteBuffer byteBuffer = ByteBuffer.wrap( byteArray );
public BlockLogBuffer( ChannelBuffer target, ByteCounterMonitor monitor )
{
this.target = target;
this.monitor = monitor;
clearInternalBuffer();
}
private void clearInternalBuffer()
{
byteBuffer.clear();
// reserve space for size - assume we are going to fill the buffer
byteBuffer.put( FULL_BLOCK_AND_MORE );
}
/**
* If the position of the byteBuffer is larger than MAX_SIZE then
* MAX_SIZE bytes are flushed to the underlying channel. The remaining
* bytes (1 up to and including 8 - see the byteArray field initializer)
* are moved over at the beginning of the cleared buffer.
*
* @return the buffer
*/
private LogBuffer checkFlush()
{
if ( byteBuffer.position() > MAX_SIZE )
{
target.writeBytes( byteArray, 0, MAX_SIZE );
monitor.bytesWritten( MAX_SIZE );
int pos = byteBuffer.position();
clearInternalBuffer();
byteBuffer.put( byteArray, MAX_SIZE, pos - MAX_SIZE );
}
return this;
}
public LogBuffer put( byte b ) throws IOException
{
byteBuffer.put( b );
return checkFlush();
}
public LogBuffer putShort( short s ) throws IOException
{
byteBuffer.putShort( s );
return checkFlush();
}
public LogBuffer putInt( int i ) throws IOException
{
byteBuffer.putInt( i );
return checkFlush();
}
public LogBuffer putLong( long l ) throws IOException
{
byteBuffer.putLong( l );
return checkFlush();
}
public LogBuffer putFloat( float f ) throws IOException
{
byteBuffer.putFloat( f );
return checkFlush();
}
public LogBuffer putDouble( double d ) throws IOException
{
byteBuffer.putDouble( d );
return checkFlush();
}
public LogBuffer put( byte[] bytes ) throws IOException
{
for ( int pos = 0; pos < bytes.length; )
{
int toWrite = Math.min( byteBuffer.remaining(), bytes.length - pos );
byteBuffer.put( bytes, pos, toWrite );
checkFlush();
pos += toWrite;
}
return this;
}
public LogBuffer put( char[] chars ) throws IOException
{
for ( int bytePos = 0; bytePos < chars.length * 2; )
{
int bytesToWrite = Math.min( byteBuffer.remaining(), chars.length * 2 - bytePos );
bytesToWrite -= ( bytesToWrite % 2 );
for ( int i = 0; i < bytesToWrite / 2; i++ )
{
byteBuffer.putChar( chars[( bytePos / 2 ) + i] );
}
checkFlush();
bytePos += bytesToWrite;
}
return this;
}
@Override
public void writeOut() throws IOException
{
// Do nothing
}
public void force() throws IOException
{
// Do nothing
}
public long getFileChannelPosition() throws IOException
{
throw new UnsupportedOperationException( "BlockLogBuffer does not have a FileChannel" );
}
public StoreChannel getFileChannel()
{
throw new UnsupportedOperationException( "BlockLogBuffer does not have a FileChannel" );
}
/**
* Signals the end of use for this buffer over this channel - first byte of
* the chunk is set to the position of the buffer ( != 0, instead of
* FULL_BLOCK_AND_MORE) and it is written to the channel.
*/
public void done()
{
assert byteBuffer.position() > 1 : "buffer should contain more than the header";
assert byteBuffer.position() <= MAX_SIZE : "buffer should not be over full";
long howManyBytesToWrite = byteBuffer.position();
byteBuffer.put( 0, (byte) ( byteBuffer.position() - 1 ) );
byteBuffer.flip();
target.writeBytes( byteBuffer );
monitor.bytesWritten( howManyBytesToWrite );
clearInternalBuffer();
}
public int write( ReadableByteChannel data ) throws IOException
{
int result = 0;
int bytesRead = 0;
while ( (bytesRead = data.read( byteBuffer )) >= 0 )
{
checkFlush();
result += bytesRead;
}
return result;
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_BlockLogBuffer.java
|
4,267
|
public class Quorums
{
/** Determines if a number of available members qualify as a majority, given the total number of members. */
public static boolean isQuorum( long availableMembers, long totalMembers )
{
return availableMembers >= Math.floor((totalMembers/2) + 1);
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_util_Quorums.java
|
4,268
|
public class Timeout
{
private long timeout;
private Message<? extends MessageType> timeoutMessage;
public Timeout( long timeout, Message<? extends MessageType> timeoutMessage )
{
this.timeout = timeout;
this.timeoutMessage = timeoutMessage;
}
public Message<? extends MessageType> getTimeoutMessage()
{
return timeoutMessage;
}
public boolean checkTimeout( long now )
{
if ( now >= timeout )
{
timeoutStrategy.timeoutTriggered( timeoutMessage );
return true;
}
else
{
return false;
}
}
public void trigger( MessageProcessor receiver )
{
receiver.process( timeoutMessage );
}
@Override
public String toString()
{
return timeout + ": " + timeoutMessage;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_timeout_Timeouts.java
|
4,269
|
public class Timeouts implements MessageSource
{
private long now = 0;
private MessageProcessor receiver;
private TimeoutStrategy timeoutStrategy;
private Map<Object, Timeout> timeouts = new HashMap<Object, Timeout>();
private List<Map.Entry<Object, Timeout>> triggeredTimeouts = new ArrayList<Map.Entry<Object, Timeout>>();
public Timeouts( TimeoutStrategy timeoutStrategy )
{
this.timeoutStrategy = timeoutStrategy;
}
@Override
public void addMessageProcessor( MessageProcessor messageProcessor )
{
if(receiver != null)
{
throw new UnsupportedOperationException( "Timeouts does not yet support multiple message processors" );
}
receiver = messageProcessor;
}
/**
* Add a new timeout to the list
* If this is not cancelled it will trigger a message on the message processor
*
* @param key
* @param timeoutMessage
*/
public void setTimeout( Object key, Message<? extends MessageType> timeoutMessage )
{
long timeoutAt = now + timeoutStrategy.timeoutFor( timeoutMessage );
timeouts.put( key, new Timeout( timeoutAt, timeoutMessage ) );
}
/**
* Cancel a timeout corresponding to a particular key. Use the same key
* that was used to set it up.
*
* @param key
*/
public void cancelTimeout( Object key )
{
Timeout timeout = timeouts.remove( key );
if ( timeout != null )
{
timeoutStrategy.timeoutCancelled( timeout.timeoutMessage );
}
}
/**
* Cancel all current timeouts. This is typically used when shutting down.
*/
public void cancelAllTimeouts()
{
for ( Timeout timeout : timeouts.values() )
{
timeoutStrategy.timeoutCancelled( timeout.getTimeoutMessage() );
}
timeouts.clear();
}
public Map<Object, Timeout> getTimeouts()
{
return timeouts;
}
public Message<? extends MessageType> getTimeoutMessage( String timeoutName )
{
Timeout timeout = timeouts.get( timeoutName );
if ( timeout != null )
{
return timeout.getTimeoutMessage();
}
else
{
return null;
}
}
public void tick( long time )
{
synchronized ( this )
{
// Time has passed
now = time;
timeoutStrategy.tick( now );
// Check if any timeouts needs to be triggered
triggeredTimeouts.clear();
for ( Map.Entry<Object, Timeout> timeout : timeouts.entrySet() )
{
if ( timeout.getValue().checkTimeout( now ) )
{
triggeredTimeouts.add( timeout );
}
}
// Remove all timeouts that were triggered
for ( Map.Entry<Object, Timeout> triggeredTimeout : triggeredTimeouts )
{
timeouts.remove( triggeredTimeout.getKey() );
}
}
// Trigger timeouts
// This needs to be done outside of the synchronized block as it will trigger a message
// which will cause the statemachine to synchronize on Timeouts
for ( Map.Entry<Object, Timeout> triggeredTimeout : triggeredTimeouts )
{
triggeredTimeout.getValue().trigger( receiver );
}
}
public class Timeout
{
private long timeout;
private Message<? extends MessageType> timeoutMessage;
public Timeout( long timeout, Message<? extends MessageType> timeoutMessage )
{
this.timeout = timeout;
this.timeoutMessage = timeoutMessage;
}
public Message<? extends MessageType> getTimeoutMessage()
{
return timeoutMessage;
}
public boolean checkTimeout( long now )
{
if ( now >= timeout )
{
timeoutStrategy.timeoutTriggered( timeoutMessage );
return true;
}
else
{
return false;
}
}
public void trigger( MessageProcessor receiver )
{
receiver.process( timeoutMessage );
}
@Override
public String toString()
{
return timeout + ": " + timeoutMessage;
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_timeout_Timeouts.java
|
4,270
|
public class MessageTimeoutStrategy
implements TimeoutStrategy
{
Map<MessageType, Long> timeouts = new HashMap<MessageType, Long>();
private TimeoutStrategy delegate;
public MessageTimeoutStrategy( TimeoutStrategy delegate )
{
this.delegate = delegate;
}
public MessageTimeoutStrategy timeout( MessageType messageType, long timeout )
{
timeouts.put( messageType, timeout );
return this;
}
public MessageTimeoutStrategy relativeTimeout( MessageType messageType, MessageType relativeTo, long timeout )
{
timeouts.put( messageType, timeouts.get( relativeTo ) + timeout );
return this;
}
@Override
public long timeoutFor( Message message )
{
Long timeout = timeouts.get( message.getMessageType() );
if ( timeout == null )
{
return delegate.timeoutFor( message );
}
else
{
return timeout;
}
}
@Override
public void timeoutTriggered( Message timeoutMessage )
{
delegate.timeoutTriggered( timeoutMessage );
}
@Override
public void timeoutCancelled( Message timeoutMessage )
{
delegate.timeoutCancelled( timeoutMessage );
}
@Override
public void tick( long now )
{
delegate.tick( now );
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_timeout_MessageTimeoutStrategy.java
|
4,271
|
{
@Override
public boolean process( Message<? extends MessageType> message )
{
synchronized(LatencyCalculator.this)
{
Long sent = conversations.get( message.getHeader( Message.CONVERSATION_ID ) );
if (sent != null)
{
long received = now;
String from = message.getHeader( Message.FROM );
List<Long> hostLatencies = latencies.get( from );
if (hostLatencies == null)
{
hostLatencies = new ArrayList<Long>( );
latencies.put( from, hostLatencies );
}
long latency = received - sent;
if (latency < 0)
logger.warn( "Negative latency!" );
hostLatencies.add( latency );
if (hostLatencies.size() == latencyCount)
{
long latencySum = 0;
for( Long hostLatency : hostLatencies )
{
latencySum += hostLatency;
}
long latencyAvg = latencySum / latencyCount;
// logger.info( from+" roundtrip latency: "+latencyAvg );
hostLatencies.clear();
}
}
}
return true;
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_timeout_LatencyCalculator.java
|
4,272
|
public class LatencyCalculator
implements MessageProcessor, TimeoutStrategy
{
TimeoutStrategy delegate;
Map<String, Long> conversations = new HashMap<String, Long>( );
Map<String, List<Long>> latencies = new HashMap<String, List<Long>>( );
Logger logger = LoggerFactory.getLogger(LatencyCalculator.class);
long now;
int latencyCount = 5;
public LatencyCalculator(TimeoutStrategy delegate, MessageSource incoming)
{
this.delegate = delegate;
incoming.addMessageProcessor( new MessageProcessor()
{
@Override
public boolean process( Message<? extends MessageType> message )
{
synchronized(LatencyCalculator.this)
{
Long sent = conversations.get( message.getHeader( Message.CONVERSATION_ID ) );
if (sent != null)
{
long received = now;
String from = message.getHeader( Message.FROM );
List<Long> hostLatencies = latencies.get( from );
if (hostLatencies == null)
{
hostLatencies = new ArrayList<Long>( );
latencies.put( from, hostLatencies );
}
long latency = received - sent;
if (latency < 0)
logger.warn( "Negative latency!" );
hostLatencies.add( latency );
if (hostLatencies.size() == latencyCount)
{
long latencySum = 0;
for( Long hostLatency : hostLatencies )
{
latencySum += hostLatency;
}
long latencyAvg = latencySum / latencyCount;
// logger.info( from+" roundtrip latency: "+latencyAvg );
hostLatencies.clear();
}
}
}
return true;
}
} );
}
@Override
public boolean process( Message<? extends MessageType> message )
{
if ( !message.isInternal() && !message.getHeader( Message.TO ).equals( message.getHeader( Message.CREATED_BY ) ) )
{
conversations.put( message.getHeader( Message.CONVERSATION_ID ), now );
}
return true;
}
@Override
public long timeoutFor( Message message )
{
return delegate.timeoutFor( message );
}
@Override
public void timeoutTriggered( Message timeoutMessage )
{
delegate.timeoutTriggered( timeoutMessage );
}
@Override
public void timeoutCancelled( Message timeoutMessage )
{
delegate.timeoutCancelled( timeoutMessage );
}
public synchronized void tick(long now)
{
this.now = now;
delegate.tick( now );
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_timeout_LatencyCalculator.java
|
4,273
|
public class FixedTimeoutStrategy
implements TimeoutStrategy
{
protected final long timeout;
public FixedTimeoutStrategy(long timeout)
{
this.timeout = timeout;
}
@Override
public long timeoutFor( Message message )
{
return timeout;
}
@Override
public void timeoutTriggered(Message timeoutMessage)
{
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void timeoutCancelled(Message timeoutMessage)
{
//To change body of implemented methods use File | Settings | File Templates.
}
@Override
public void tick( long now )
{
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_timeout_FixedTimeoutStrategy.java
|
4,274
|
public class FailingByteChannel extends KnownDataByteChannel
{
private final String failWithMessage;
private final int sizeToFailAt;
public FailingByteChannel( int sizeToFailAt, String failWithMessage )
{
super( sizeToFailAt*2 );
this.sizeToFailAt = sizeToFailAt;
this.failWithMessage = failWithMessage;
}
public int read( ByteBuffer dst ) throws IOException
{
if ( position > sizeToFailAt ) throw new MadeUpException( failWithMessage );
return super.read( dst );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_FailingByteChannel.java
|
4,275
|
public class IllegalProtocolVersionException extends ComException
{
private final int expected;
private final int received;
public IllegalProtocolVersionException( int expected, int received )
{
super();
this.expected = expected;
this.received = received;
}
public IllegalProtocolVersionException( int expected, int received, String message, Throwable cause )
{
super( message, cause );
this.expected = expected;
this.received = received;
}
public IllegalProtocolVersionException( int expected, int received, String message )
{
super( message );
this.expected = expected;
this.received = received;
}
public IllegalProtocolVersionException( int expected, int received, Throwable cause )
{
super( cause );
this.expected = expected;
this.received = received;
}
public int getExpected()
{
return expected;
}
public int getReceived()
{
return received;
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_IllegalProtocolVersionException.java
|
4,276
|
public class StateTransitionLogger
implements StateTransitionListener
{
private final Logging logging;
/** Throttle so don't flood occurences of the same message over and over */
private String lastLogMessage = "";
public StateTransitionLogger( Logging logging )
{
this.logging = logging;
}
@Override
public void stateTransition( StateTransition transition )
{
StringLogger logger = logging.getMessagesLog( transition.getOldState().getClass() );
if ( logger.isDebugEnabled() )
{
if ( transition.getOldState() == HeartbeatState.heartbeat )
{
return;
}
// The bulk of the message
StringBuilder line = new StringBuilder( transition.getOldState().getClass().getSuperclass().getSimpleName
() +
": " + transition );
// Who was this message from?
if ( transition.getMessage().hasHeader( FROM ) )
{
line.append( " from:" + transition.getMessage().getHeader( FROM ) );
}
if ( transition.getMessage().hasHeader( INSTANCE ) )
{
line.append( " instance:" + transition.getMessage().getHeader( INSTANCE ) );
}
if ( transition.getMessage().hasHeader( CONVERSATION_ID ) )
{
line.append( " conversation-id:" + transition.getMessage().getHeader( CONVERSATION_ID ) );
}
Object payload = transition.getMessage().getPayload();
if ( payload != null )
{
line.append( " payload:" + payload );
}
// Throttle
String msg = line.toString();
if( msg.equals( lastLogMessage ) )
{
return;
}
// Log it
logger.debug( line.toString() );
lastLogMessage = msg;
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateTransitionLogger.java
|
4,277
|
public class KnownDataByteChannel implements ReadableByteChannel
{
protected int position;
private final int size;
public KnownDataByteChannel( int size )
{
this.size = size;
}
@Override
public boolean isOpen()
{
return true;
}
@Override
public void close() throws IOException
{
}
@Override
public int read( ByteBuffer dst ) throws IOException
{
int toRead = Math.min( dst.limit()-dst.position(), left() );
if ( toRead == 0 )
{
return -1;
}
for ( int i = 0; i < toRead; i++ )
{
dst.put( (byte)((position++)%10) );
}
return toRead;
}
private int left()
{
return size-position;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_KnownDataByteChannel.java
|
4,278
|
public class MadeUpServerProcess extends SubProcess<ServerInterface, StartupData> implements ServerInterface
{
private static final long serialVersionUID = 1L;
public static final int PORT = 8888;
private volatile transient MadeUpServer server;
@Override
protected void startup( StartupData data ) throws Throwable
{
MadeUpCommunicationInterface implementation = new MadeUpServerImplementation(
new StoreId( data.creationTime, data.storeId, data.storeVersion ) );
MadeUpServer localServer = new MadeUpServer( implementation, 8888, data.internalProtocolVersion,
data.applicationProtocolVersion, TxChecksumVerifier.ALWAYS_MATCH, data.chunkSize );
localServer.init();
localServer.start();
// The field being non null is an indication of startup, so assign last
server = localServer;
}
@Override
public void awaitStarted()
{
try
{
long endTime = System.currentTimeMillis()+20*1000;
while ( server == null && System.currentTimeMillis() < endTime )
{
Thread.sleep( 10 );
}
if ( server == null )
{
throw new RuntimeException( "Couldn't start server, wait timeout" );
}
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
}
@Override
protected void shutdown( boolean normal )
{
if ( server != null )
{
try
{
server.stop();
server.shutdown();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
}
new Thread()
{
@Override
public void run()
{
try
{
Thread.sleep( 100 );
}
catch ( InterruptedException e )
{
Thread.interrupted();
}
shutdownProcess();
}
}.start();
}
protected void shutdownProcess()
{
super.shutdown();
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServerProcess.java
|
4,279
|
public class MadeUpServerImplementation implements MadeUpCommunicationInterface
{
private final StoreId storeIdToRespondWith;
private boolean gotCalled;
public MadeUpServerImplementation( StoreId storeIdToRespondWith )
{
this.storeIdToRespondWith = storeIdToRespondWith;
}
@Override
public Response<Integer> multiply( int value1, int value2 )
{
gotCalled = true;
return new Response<Integer>( value1 * value2, storeIdToRespondWith,
TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
@Override
public Response<Void> fetchDataStream( MadeUpWriter writer, int dataSize )
{
// Reversed on the server side. This will send data back to the client.
writer.write( new KnownDataByteChannel( dataSize ) );
return emptyResponse();
}
private Response<Void> emptyResponse()
{
return new Response<Void>( null, storeIdToRespondWith,
TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
@Override
public Response<Void> sendDataStream( ReadableByteChannel data )
{
// TOOD Verify as well?
readFully( data );
return emptyResponse();
}
private void readFully( ReadableByteChannel data )
{
ByteBuffer buffer = ByteBuffer.allocate( 1000 );
try
{
while ( true )
{
buffer.clear();
if ( data.read( buffer ) == -1 )
break;
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
@Override
public Response<Integer> throwException( String messageInException )
{
throw new MadeUpException( messageInException, new Exception( "The cause of it" ) );
}
public boolean gotCalled()
{
return this.gotCalled;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServerImplementation.java
|
4,280
|
{
@Override
public Response<Integer> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
throw new ThisShouldNotHappenError( "Jake", "Test should not reach this far, " +
"it should fail while reading the request context." );
}
}, Protocol.VOID_SERIALIZER );
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,281
|
{
@Override
public Response<Integer> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
return master.throwException( readString( input ) );
}
}, Protocol.VOID_SERIALIZER ),
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,282
|
{
@Override
public Response<Void> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
BlockLogReader reader = new BlockLogReader( input );
try
{
Response<Void> response = master.sendDataStream( reader );
return response;
}
finally
{
try
{
reader.close();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
}, Protocol.VOID_SERIALIZER ),
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,283
|
{
@Override
public Response<Void> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
int dataSize = input.readInt();
return master.fetchDataStream( new ToChannelBufferWriter( target ), dataSize );
}
}, Protocol.VOID_SERIALIZER ),
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,284
|
{
@Override
public Response<Integer> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
int value1 = input.readInt();
int value2 = input.readInt();
return master.multiply( value1, value2 );
}
}, Protocol.INTEGER_SERIALIZER ),
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,285
|
{
@Override
public long getOldChannelThreshold()
{
return Client.DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS * 1000;
}
@Override
public int getMaxConcurrentTransactions()
{
return DEFAULT_MAX_NUMBER_OF_CONCURRENT_TRANSACTIONS;
}
@Override
public int getChunkSize()
{
return chunkSize;
}
@Override
public HostnamePort getServerAddress()
{
return new HostnamePort( null, port );
}
}, new DevNullLoggingService(), FRAME_LENGTH, applicationProtocolVersion, txVerifier, SYSTEM_CLOCK,
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,286
|
public class MadeUpServer extends Server<MadeUpCommunicationInterface, Void>
{
private volatile boolean responseWritten;
private volatile boolean responseFailureEncountered;
private final byte internalProtocolVersion;
public static final int FRAME_LENGTH = 1024 * 1024 * 1;
public MadeUpServer( MadeUpCommunicationInterface requestTarget, final int port, byte internalProtocolVersion,
byte applicationProtocolVersion, TxChecksumVerifier txVerifier, final int chunkSize )
{
super( requestTarget, new Server.Configuration()
{
@Override
public long getOldChannelThreshold()
{
return Client.DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS * 1000;
}
@Override
public int getMaxConcurrentTransactions()
{
return DEFAULT_MAX_NUMBER_OF_CONCURRENT_TRANSACTIONS;
}
@Override
public int getChunkSize()
{
return chunkSize;
}
@Override
public HostnamePort getServerAddress()
{
return new HostnamePort( null, port );
}
}, new DevNullLoggingService(), FRAME_LENGTH, applicationProtocolVersion, txVerifier, SYSTEM_CLOCK,
new Monitors());
this.internalProtocolVersion = internalProtocolVersion;
}
@Override
protected void responseWritten( RequestType<MadeUpCommunicationInterface> type, Channel channel,
RequestContext context )
{
responseWritten = true;
}
@Override
protected void writeFailureResponse( Throwable exception, ChunkingChannelBuffer buffer )
{
responseFailureEncountered = true;
super.writeFailureResponse( exception, buffer );
}
@Override
protected byte getInternalProtocolVersion()
{
return internalProtocolVersion;
}
@Override
protected RequestType<MadeUpCommunicationInterface> getRequestContext( byte id )
{
return MadeUpRequestType.values()[id];
}
@Override
protected void finishOffChannel( Channel channel, RequestContext context )
{
}
public boolean responseHasBeenWritten()
{
return responseWritten;
}
public boolean responseFailureEncountered()
{
return responseFailureEncountered;
}
static enum MadeUpRequestType implements RequestType<MadeUpCommunicationInterface>
{
MULTIPLY( new TargetCaller<MadeUpCommunicationInterface, Integer>()
{
@Override
public Response<Integer> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
int value1 = input.readInt();
int value2 = input.readInt();
return master.multiply( value1, value2 );
}
}, Protocol.INTEGER_SERIALIZER ),
FETCH_DATA_STREAM( new TargetCaller<MadeUpCommunicationInterface, Void>()
{
@Override
public Response<Void> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
int dataSize = input.readInt();
return master.fetchDataStream( new ToChannelBufferWriter( target ), dataSize );
}
}, Protocol.VOID_SERIALIZER ),
SEND_DATA_STREAM( new TargetCaller<MadeUpCommunicationInterface, Void>()
{
@Override
public Response<Void> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
BlockLogReader reader = new BlockLogReader( input );
try
{
Response<Void> response = master.sendDataStream( reader );
return response;
}
finally
{
try
{
reader.close();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
}, Protocol.VOID_SERIALIZER ),
THROW_EXCEPTION( new TargetCaller<MadeUpCommunicationInterface, Integer>()
{
@Override
public Response<Integer> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
return master.throwException( readString( input ) );
}
}, Protocol.VOID_SERIALIZER ),
CAUSE_READ_CONTEXT_EXCEPTION( new TargetCaller<MadeUpCommunicationInterface, Integer>()
{
@Override
public Response<Integer> call( MadeUpCommunicationInterface master,
RequestContext context, ChannelBuffer input, ChannelBuffer target )
{
throw new ThisShouldNotHappenError( "Jake", "Test should not reach this far, " +
"it should fail while reading the request context." );
}
}, Protocol.VOID_SERIALIZER );
private final TargetCaller masterCaller;
private final ObjectSerializer serializer;
MadeUpRequestType( TargetCaller masterCaller, ObjectSerializer serializer )
{
this.masterCaller = masterCaller;
this.serializer = serializer;
}
@Override
public TargetCaller getTargetCaller()
{
return this.masterCaller;
}
@Override
public ObjectSerializer getObjectSerializer()
{
return this.serializer;
}
@Override
public byte id()
{
return (byte) ordinal();
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServer.java
|
4,287
|
public class MadeUpException extends RuntimeException
{
public MadeUpException( String message )
{
super( message );
}
public MadeUpException( String message, Throwable cause )
{
super( message, cause );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpException.java
|
4,288
|
{
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
throws IOException
{
return buffer.readInt();
}
} );
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,289
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, messageInException );
}
}, new Deserializer<Integer>()
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,290
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
BlockLogBuffer writer = new BlockLogBuffer( buffer, new Monitors().newMonitor( ByteCounterMonitor.class ) );
try
{
writer.write( data );
}
finally
{
writer.done();
}
}
}, Protocol.VOID_DESERIALIZER );
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,291
|
{
@Override
public Void read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
throws IOException
{
writer.write( new BlockLogReader( buffer ) );
return null;
}
} );
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,292
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( dataSize );
}
}, new Deserializer<Void>()
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,293
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( value1 );
buffer.writeInt( value2 );
}
}, Protocol.INTEGER_DESERIALIZER );
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,294
|
public class MadeUpClient extends Client<MadeUpCommunicationInterface> implements MadeUpCommunicationInterface
{
private final byte internalProtocolVersion;
public MadeUpClient( int port, StoreId storeIdToExpect,
byte internalProtocolVersion, byte applicationProtocolVersion, int chunkSize )
{
super( localhost(), port, new DevNullLoggingService(), new Monitors(), storeIdToExpect, FRAME_LENGTH,
applicationProtocolVersion, Client.DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS * 1000,
Client.DEFAULT_MAX_NUMBER_OF_CONCURRENT_CHANNELS_PER_CLIENT,
chunkSize );
this.internalProtocolVersion = internalProtocolVersion;
}
private static String localhost()
{
try
{
return InetAddress.getLocalHost().getHostAddress();
}
catch ( UnknownHostException e )
{
throw new RuntimeException( e );
}
}
@Override
protected byte getInternalProtocolVersion()
{
return internalProtocolVersion;
}
@Override
public Response<Integer> multiply( final int value1, final int value2 )
{
return sendRequest( MadeUpRequestType.MULTIPLY, getRequestContext(), new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( value1 );
buffer.writeInt( value2 );
}
}, Protocol.INTEGER_DESERIALIZER );
}
private RequestContext getRequestContext()
{
return new RequestContext( EMPTY.getEpoch(), EMPTY.machineId(), EMPTY.getEventIdentifier(),
new RequestContext.Tx[] { lastAppliedTx( DEFAULT_DATA_SOURCE_NAME, 1 ) }, EMPTY.getMasterId(),
EMPTY.getChecksum() );
}
@Override
public Response<Void> fetchDataStream( final MadeUpWriter writer, final int dataSize )
{
return sendRequest( MadeUpRequestType.FETCH_DATA_STREAM, getRequestContext(), new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( dataSize );
}
}, new Deserializer<Void>()
{
@Override
public Void read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
throws IOException
{
writer.write( new BlockLogReader( buffer ) );
return null;
}
} );
}
@Override
public Response<Void> sendDataStream( final ReadableByteChannel data )
{
return sendRequest( MadeUpRequestType.SEND_DATA_STREAM, getRequestContext(), new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
BlockLogBuffer writer = new BlockLogBuffer( buffer, new Monitors().newMonitor( ByteCounterMonitor.class ) );
try
{
writer.write( data );
}
finally
{
writer.done();
}
}
}, Protocol.VOID_DESERIALIZER );
}
@Override
public Response<Integer> throwException( final String messageInException )
{
return sendRequest( MadeUpRequestType.THROW_EXCEPTION, getRequestContext(), new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, messageInException );
}
}, new Deserializer<Integer>()
{
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
throws IOException
{
return buffer.readInt();
}
} );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpClient.java
|
4,295
|
public class LoggingResourcePoolMonitorTest
{
@Test
public void testUpdatedCurrentPeakSizeLogsOnlyOnChange() throws Exception
{
StringLogger logger = mock( StringLogger.class );
LoggingResourcePoolMonitor monitor = new LoggingResourcePoolMonitor( logger );
monitor.updatedCurrentPeakSize( 10 );
verify( logger, times( 1 ) ).debug( anyString() );
monitor.updatedCurrentPeakSize( 10 );
verify( logger, times( 1 ) ).debug( anyString() );
monitor.updatedCurrentPeakSize( 11 );
verify( logger, times( 2 ) ).debug( anyString() );
}
@Test
public void testUpdatedTargetSizeOnlyOnChange() throws Exception
{
StringLogger logger = mock( StringLogger.class );
LoggingResourcePoolMonitor monitor = new LoggingResourcePoolMonitor( logger );
monitor.updatedTargetSize( 10 );
verify( logger, times( 1 ) ).debug( anyString() );
monitor.updatedTargetSize( 10 );
verify( logger, times( 1 ) ).debug( anyString() );
monitor.updatedTargetSize( 11 );
verify( logger, times( 2 ) ).debug( anyString() );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_LoggingResourcePoolMonitorTest.java
|
4,296
|
public class LoggingResourcePoolMonitor extends ResourcePool.Monitor.Adapter<Triplet<Channel, ChannelBuffer, ByteBuffer>>
{
private final StringLogger msgLog;
private int lastCurrentPeakSize = -1;
private int lastTargetSize = -1;
public LoggingResourcePoolMonitor( StringLogger msgLog )
{
this.msgLog = msgLog;
}
@Override
public void updatedCurrentPeakSize( int currentPeakSize )
{
if ( lastCurrentPeakSize != currentPeakSize )
{
msgLog.debug( "ResourcePool updated currentPeakSize to " + currentPeakSize );
lastCurrentPeakSize = currentPeakSize;
}
}
@Override
public void created( Triplet <Channel, ChannelBuffer, ByteBuffer> resource )
{
msgLog.debug( "ResourcePool create resource " + resource );
}
@Override
public void updatedTargetSize( int targetSize )
{
if ( lastTargetSize != targetSize )
{
msgLog.debug( "ResourcePool updated targetSize to " + targetSize );
lastTargetSize = targetSize;
}
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_LoggingResourcePoolMonitor.java
|
4,297
|
public class StateTransitionLoggerTest
{
@Test
public void shouldThrottle() throws Exception
{
// Given
Logging logging = mock( Logging.class );
StringLogger logger = mock(StringLogger.class);
when(logging.getMessagesLog( any(Class.class) )).thenReturn( logger );
when(logger.isDebugEnabled()).thenReturn( true );
StateTransitionLogger stateLogger = new StateTransitionLogger( logging );
// When
stateLogger.stateTransition( new StateTransition( entered, Message.internal( join), joining ) );
stateLogger.stateTransition( new StateTransition( entered, Message.internal( join), joining ) );
stateLogger.stateTransition( new StateTransition( joining, Message.internal( join), entered ) );
stateLogger.stateTransition( new StateTransition( entered, Message.internal( join), joining ) );
// Then
verify( logger, times(4) ).isDebugEnabled();
verify( logger, times(2) ).debug( "ClusterState: entered-[join]->joining" );
verify( logger ).debug( "ClusterState: joining-[join]->entered" );
verifyNoMoreInteractions( logger );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_statemachine_StateTransitionLoggerTest.java
|
4,298
|
public class StateTransition
{
private State<?,?> oldState;
private Message<? extends MessageType> message;
private State<?,?> newState;
public StateTransition( State<?,?> oldState, Message<? extends MessageType> message, State<?,?> newState )
{
this.oldState = oldState;
this.message = message;
this.newState = newState;
}
public State<?,?> getOldState()
{
return oldState;
}
public Message<? extends MessageType> getMessage()
{
return message;
}
public State<?,?> getNewState()
{
return newState;
}
@Override
public boolean equals( Object o )
{
if( this == o )
{
return true;
}
if( o == null || getClass() != o.getClass() )
{
return false;
}
StateTransition that = (StateTransition) o;
if( !message.equals( that.message ) )
{
return false;
}
if( !newState.equals( that.newState ) )
{
return false;
}
if( !oldState.equals( that.oldState ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = oldState.hashCode();
result = 31 * result + message.hashCode();
result = 31 * result + newState.hashCode();
return result;
}
@Override
public String toString()
{
if (message.getPayload() instanceof String)
return getOldState().toString()+
"-["+getMessage().getMessageType()+":"+getMessage().getPayload()+"]->"+
getNewState().toString();
else
return getOldState().toString()+
"-["+getMessage().getMessageType()+"]->"+
getNewState().toString();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateTransition.java
|
4,299
|
public class ConsistencyCheckingError extends DataInconsistencyError
{
private final ConsistencySummaryStatistics summary;
public ConsistencyCheckingError( LogEntry.Start startEntry, LogEntry.Commit commitEntry,
ConsistencySummaryStatistics summary )
{
super( String.format( "Inconsistencies in transaction:\n\t%s\n\t%s\n\t%s",
(startEntry == null ? "NO START ENTRY" : startEntry.toString()),
(commitEntry == null ? "NO COMMIT ENTRY" : commitEntry.toString()),
summary ) );
this.summary = summary;
}
public int getInconsistencyCountForRecordType( RecordType recordType )
{
return summary.getInconsistencyCountForRecordType( recordType );
}
public int getTotalInconsistencyCount()
{
return summary.getTotalInconsistencyCount();
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_ConsistencyCheckingError.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.