Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
1,400
|
private static class CommitThread extends Thread
{
private final TransactionManager tm;
private final Transaction tx;
private boolean success = false;
private final Thread main;
CommitThread( TransactionManager tm, Transaction tx, Thread main )
{
this.tm = tm;
this.tx = tx;
this.main = main;
}
@Override
public synchronized void run()
{
try
{
while ( main.getState() != Thread.State.WAITING ) Thread.sleep( 1 );
tm.resume( tx );
tm.getTransaction().commit();
success = true;
}
catch ( Throwable t )
{
t.printStackTrace();
}
}
synchronized boolean success()
{
return success;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_core_TestTxSuspendResume.java
|
1,401
|
public class Transactor
{
public interface Work<RESULT, FAILURE extends KernelException>
{
RESULT perform( Statement statement ) throws FAILURE;
}
private final TransactionManager txManager;
private final PersistenceManager persistenceManager;
public Transactor( TransactionManager txManager, PersistenceManager persistenceManager )
{
this.txManager = txManager;
this.persistenceManager = persistenceManager;
}
public <RESULT, FAILURE extends KernelException> RESULT execute( Work<RESULT, FAILURE> work )
throws FAILURE, TransactionalException
{
Transaction previousTransaction = suspendTransaction();
try
{
beginTransaction();
KernelTransaction tx = persistenceManager.currentKernelTransactionForWriting();
boolean success = false;
try
{
RESULT result;
try ( Statement statement = tx.acquireStatement() )
{
result = work.perform( statement );
}
success = true;
return result;
}
finally
{
if ( success )
{
txManager.commit();
}
else
{
txManager.rollback();
}
}
}
catch ( HeuristicMixedException | RollbackException | HeuristicRollbackException | SystemException |
TransactionalException failure )
{
previousTransaction = null; // the transaction manager threw an exception, don't resume previous.
throw new TransactionFailureException(failure);
}
finally
{
if ( previousTransaction != null )
{
resumeTransaction( previousTransaction );
}
}
}
private void beginTransaction() throws BeginTransactionFailureException
{
try
{
txManager.begin();
}
catch ( NotSupportedException | SystemException e )
{
throw new BeginTransactionFailureException( e );
}
}
private Transaction suspendTransaction() throws TransactionFailureException
{
Transaction existingTransaction;
try
{
existingTransaction = txManager.suspend();
}
catch ( SystemException failure )
{
throw new TransactionFailureException( failure );
}
return existingTransaction;
}
private void resumeTransaction( Transaction existingTransaction ) throws TransactionFailureException
{
try
{
txManager.resume( existingTransaction );
}
catch ( InvalidTransactionException failure )
{ // thrown from resume()
throw new ThisShouldNotHappenError( "Tobias Lindaaker",
"Transaction resumed in the same transaction manager as it was " +
"suspended from should not be invalid. The Neo4j code base does not " +
"throw InvalidTransactionException", failure );
}
catch ( SystemException failure )
{
throw new TransactionFailureException( failure );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_Transactor.java
|
1,402
|
public class TransactionEventsSyncHook implements Synchronization
{
private final TransactionEventHandlers handlers;
/**
* This is null at construction time, then populated in beforeCompletion and
* used in afterCompletion.
*/
private List<TransactionEventHandlers.HandlerAndState> states;
private TransactionData transactionData;
private final AbstractTransactionManager tm;
public TransactionEventsSyncHook( TransactionEventHandlers transactionEventHandlers, AbstractTransactionManager tm )
{
this.handlers = transactionEventHandlers;
this.tm = tm;
}
public void beforeCompletion()
{
this.transactionData = tm.getTransactionState().getTransactionData();
try
{
if ( tm.getStatus() != Status.STATUS_ACTIVE )
{
return;
}
}
catch ( SystemException e )
{
e.printStackTrace();
}
states = new ArrayList<>();
handlers.beforeCompletion(transactionData, states);
}
public void afterCompletion( int status )
{
handlers.afterCompletion(transactionData, status, states);
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_TransactionEventsSyncHook.java
|
1,403
|
class TransactionDataImpl implements TransactionData
{
private final Collection<PropertyEntry<Node>> assignedNodeProperties = newCollection();
private final Collection<PropertyEntry<Relationship>> assignedRelationshipProperties =
newCollection();
private final Collection<Node> createdNodes = newCollection();
private final Collection<Relationship> createdRelationships = newCollection();
private final Collection<Node> deletedNodes = new HashSet<Node>();
private final Collection<Relationship> deletedRelationships = new HashSet<Relationship>();
private final Collection<PropertyEntry<Node>> removedNodeProperties = newCollection();
private final Collection<PropertyEntry<Relationship>> removedRelationshipProperties =
newCollection();
private <T> Collection<T> newCollection()
{
// TODO Tweak later, better collection impl or something?
return new ArrayList<T>();
}
public Iterable<PropertyEntry<Node>> assignedNodeProperties()
{
return this.assignedNodeProperties;
}
public Iterable<PropertyEntry<Relationship>> assignedRelationshipProperties()
{
return this.assignedRelationshipProperties;
}
public Iterable<Node> createdNodes()
{
return this.createdNodes;
}
public Iterable<Relationship> createdRelationships()
{
return this.createdRelationships;
}
public Iterable<Node> deletedNodes()
{
return this.deletedNodes;
}
public boolean isDeleted( Node node )
{
return this.deletedNodes.contains( node );
}
public Iterable<Relationship> deletedRelationships()
{
return this.deletedRelationships;
}
public boolean isDeleted( Relationship relationship )
{
return this.deletedRelationships.contains( relationship );
}
public Iterable<PropertyEntry<Node>> removedNodeProperties()
{
return this.removedNodeProperties;
}
public Iterable<PropertyEntry<Relationship>> removedRelationshipProperties()
{
return this.removedRelationshipProperties;
}
void assignedProperty( Node node, String key, Object value,
Object valueBeforeTransaction )
{
this.assignedNodeProperties.add( PropertyEntryImpl.assigned( node, key,
value, valueBeforeTransaction ) );
}
void assignedProperty( Relationship relationship, String key,
Object value, Object valueBeforeTransaction )
{
this.assignedRelationshipProperties.add( PropertyEntryImpl.assigned(
relationship, key, value, valueBeforeTransaction ) );
}
void removedProperty( Node node, String key,
Object valueBeforeTransaction )
{
this.removedNodeProperties.add( PropertyEntryImpl.removed( node, key,
valueBeforeTransaction ) );
}
void removedProperty( Relationship relationship, String key,
Object valueBeforeTransaction )
{
this.removedRelationshipProperties.add( PropertyEntryImpl.removed(
relationship, key, valueBeforeTransaction ) );
}
void created( Node node )
{
this.createdNodes.add( node );
}
void created( Relationship relationship )
{
this.createdRelationships.add( relationship );
}
void deleted( Node node )
{
this.deletedNodes.add( node );
}
void deleted( Relationship relationship )
{
this.deletedRelationships.add( relationship );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_TransactionDataImpl.java
|
1,404
|
public class TokenNotFoundException extends Exception
{
public TokenNotFoundException( String message )
{
super( message );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_TokenNotFoundException.java
|
1,405
|
public abstract class TokenHolder<TOKEN extends Token> extends LifecycleAdapter
{
public static final int NO_ID = -1;
private final Map<String,Integer> nameToId = new CopyOnWriteHashMap<String, Integer>();
private final Map<Integer, TOKEN> idToToken = new CopyOnWriteHashMap<Integer, TOKEN>();
private final AbstractTransactionManager transactionManager;
protected final PersistenceManager persistenceManager;
private final EntityIdGenerator idGenerator;
private final TokenCreator tokenCreator;
public TokenHolder( AbstractTransactionManager transactionManager,
PersistenceManager persistenceManager, EntityIdGenerator idGenerator,
TokenCreator tokenCreator )
{
this.transactionManager = transactionManager;
this.persistenceManager = persistenceManager;
this.idGenerator = idGenerator;
this.tokenCreator = tokenCreator;
}
void addTokens( Token... tokens )
{
Map<String, Integer> newNameToId = new HashMap<String, Integer>();
Map<Integer, TOKEN> newIdToToken = new HashMap<Integer, TOKEN>();
for ( Token token : tokens )
{
addToken( token.name(), token.id(), newNameToId, newIdToToken );
notifyMeOfTokensAdded( token.name(), token.id() );
}
nameToId.putAll( newNameToId );
idToToken.putAll( newIdToToken );
}
/**
* Overload this if you want to know of tokens being added
*/
protected void notifyMeOfTokensAdded( String name, int id )
{
}
void addToken( String name, int id )
{
addToken( name, id, nameToId, idToToken );
notifyMeOfTokensAdded( name, id );
}
void addToken( String name, int id, Map<String, Integer> nameToIdMap, Map<Integer, TOKEN> idToTokenMap )
{
TOKEN token = newToken( name, id );
nameToIdMap.put( name, id );
idToTokenMap.put( id, token );
}
void removeToken( int id )
{
TOKEN token = idToToken.remove( id );
nameToId.remove( token.name() );
}
public int getOrCreateId( String name )
{
Integer id = nameToId.get( name );
if ( id != null )
{
return id;
}
// Let's create it
id = createToken( name );
return id;
}
private synchronized int createToken( String name )
{
Integer id = nameToId.get( name );
if ( id != null )
{
return id;
}
id = tokenCreator.getOrCreate( transactionManager, idGenerator,
persistenceManager, name );
addToken( name, id );
return id;
}
public TOKEN getTokenById( int id ) throws TokenNotFoundException
{
TOKEN result = getTokenByIdOrNull( id );
if ( result == null )
{
throw new TokenNotFoundException( "Token for id " + id );
}
return result;
}
public TOKEN getTokenByIdOrNull( int id )
{
return idToToken.get( id );
}
public boolean hasTokenWithId( int id )
{
return idToToken.containsKey( id );
}
/** Returns the id, or {@link #NO_ID} if no token with this name exists. */
public final int idOf( TOKEN token )
{
return getIdByName( token.name() );
}
/** Returns the id, or {@link #NO_ID} if no token with this name exists. */
public int getIdByName( String name )
{
Integer id = nameToId.get( name );
if ( id == null )
{
return NO_ID;
}
return id;
}
public TOKEN getTokenByName( String name ) throws TokenNotFoundException
{
Integer id = nameToId.get( name );
if ( id == null )
throw new TokenNotFoundException( name );
return idToToken.get( id );
}
public TOKEN getTokenByNameOrNull( String name )
{
Integer id = nameToId.get( name );
return id != null ? idToToken.get( id ) : null;
}
public Iterable<TOKEN> getAllTokens()
{
return idToToken.values();
}
@Override
public void stop()
{
nameToId.clear();
idToToken.clear();
}
protected abstract TOKEN newToken( String name, int id );
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_TokenHolder.java
|
1,406
|
public class Token
{
private final String name;
private final int id;
public Token( String name, int id )
{
this.name = name;
this.id = id;
}
public String name()
{
return name;
}
public int id()
{
return this.id;
}
@Override
public int hashCode()
{
return id;
}
@Override
public boolean equals( Object obj )
{
if ( !(obj instanceof Token) )
{
return false;
}
return id == ((Token) obj).id;
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[name:" + name + ", id:" + id + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_Token.java
|
1,407
|
public class ThreadToStatementContextBridgeTest
{
@Test
public void shouldThrowNotInTransactionExceptionWhenNotInTransaction() throws Exception
{
// Given
PersistenceManager persistenceManager = mock( PersistenceManager.class );
when( persistenceManager.currentKernelTransactionForReading() ).thenReturn( null );
ThreadToStatementContextBridge bridge = new ThreadToStatementContextBridge( persistenceManager );
// When
try
{
bridge.instance();
fail( "Should throw" );
}
catch ( NotInTransactionException e )
{ // Good
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_core_ThreadToStatementContextBridgeTest.java
|
1,408
|
public class ThreadToStatementContextBridge extends LifecycleAdapter implements Provider<Statement>
{
private final PersistenceManager persistenceManager;
private boolean isShutdown = false;
public ThreadToStatementContextBridge( PersistenceManager persistenceManager )
{
this.persistenceManager = persistenceManager;
}
@Override
public Statement instance()
{
return transaction().acquireStatement();
}
private KernelTransaction transaction()
{
checkIfShutdown();
KernelTransaction transaction = persistenceManager.currentKernelTransactionForReading();
if ( transaction == null )
{
throw new NotInTransactionException();
}
return transaction;
}
@Override
public void shutdown() throws Throwable
{
isShutdown = true;
}
private void checkIfShutdown()
{
if ( isShutdown )
{
throw new DatabaseShutdownException();
}
}
public void assertInTransaction()
{
checkIfShutdown();
// Contract: Persistence manager throws NotInTransactionException if we are not in a transaction.
persistenceManager.getCurrentTransaction();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_core_ThreadToStatementContextBridge.java
|
1,409
|
private static class DummyResource extends XaResourceHelpImpl
{
private final Object identifier;
DummyResource( Object identifier, XaResourceManager xaRm,
byte[] branchId )
{
super( xaRm, branchId );
this.identifier = identifier;
}
@Override
public boolean isSameRM( XAResource xares )
{
if ( xares instanceof DummyResource )
{
return identifier.equals(
((DummyResource) xares).identifier );
}
return false;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_DummyIndexDataSource.java
|
1,410
|
private abstract class DummyIndex<T extends PropertyContainer> implements Index<T>
{
private final String name;
private final InternalAbstractGraphDatabase db;
public DummyIndex( String name, InternalAbstractGraphDatabase db )
{
this.name = name;
this.db = db;
}
@Override
public String getName()
{
return name;
}
@Override
public IndexHits<T> get( String key, Object value )
{
return new IteratorIndexHits<>( Collections.<T>emptyList() );
}
@Override
public IndexHits<T> query( String key, Object queryOrQueryObject )
{
throw new UnsupportedOperationException();
}
@Override
public IndexHits<T> query( Object queryOrQueryObject )
{
throw new UnsupportedOperationException();
}
@Override
public boolean isWriteable()
{
return false;
}
@Override
public GraphDatabaseService getGraphDatabase()
{
return null;
}
@Override
public void add( T entity, String key, Object value )
{
throw new UnsupportedOperationException();
}
@Override
public void remove( T entity, String key, Object value )
{
throw new UnsupportedOperationException();
}
@Override
public void remove( T entity, String key )
{
throw new UnsupportedOperationException();
}
@Override
public void remove( T entity )
{
throw new UnsupportedOperationException();
}
@Override
public void delete()
{
throw new UnsupportedOperationException();
}
@Override
public T putIfAbsent( T entity, String key, Object value )
{
throw new UnsupportedOperationException();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_DummyIndexExtensionFactory.java
|
1,411
|
@RunWith(JUnit4.class)
public static final class FailureBeforeRebuild extends IdGeneratorRebuildFailureEmulationTest
{
@Override
protected void emulateFailureOnRebuildOf( NeoStore neostore )
{
// emulate a failure during rebuild by not issuing this call:
// neostore.makeStoreOk();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_IdGeneratorRebuildFailureEmulationTest.java
|
1,412
|
class BrickElement
{
private final int index;
private int hitCount;
private int hitCountSnapshot;
private volatile LockableWindow window;
final AtomicInteger lockCount = new AtomicInteger();
BrickElement( int index )
{
this.index = index;
}
void setWindow( LockableWindow window )
{
this.window = window;
}
LockableWindow getWindow()
{
return window;
}
int index()
{
return index;
}
void setHit()
{
hitCount += 10;
if ( hitCount < 0 )
{
hitCount -= 10;
}
}
int getHit()
{
return hitCount;
}
void refresh()
{
if ( window == null )
{
hitCount /= 1.25;
}
else
{
hitCount /= 1.15;
}
}
void snapshotHitCount()
{
hitCountSnapshot = hitCount;
}
int getHitCountSnapshot()
{
return hitCountSnapshot;
}
LockableWindow getAndMarkWindow()
{
try
{
LockableWindow candidate = window;
if ( candidate != null && candidate.markAsInUse() )
{
return candidate;
}
/* We may have to allocate a row over this position, so we first need to increase the row count over
* this brick to make sure that if a refreshBricks() runs at the same time it won't map a window
* under this row. Locking has to happen before we get the window, otherwise we open up for a race
* between checking for the window and a refreshBricks(). */
lock();
candidate = window;
if ( candidate != null && candidate.markAsInUse() )
{
// This means the position is in a window and not in a row, so unlock.
unLock();
}
/* If the if above does not execute, it happens because we are going to map a row over this. So the brick
* must remain locked until we are done with the row. That means that from now on refreshBricks() calls
* will block until the row we'll grab in the code after this method call is released. */
return candidate;
}
finally
{
setHit();
}
}
synchronized void lock()
{
lockCount.incrementAndGet();
}
/**
* Not synchronized on purpose. See {@link #allocateNewWindow(BrickElement)} for details.
*/
void unLock()
{
int lockCountAfterDecrement = lockCount.decrementAndGet();
assert lockCountAfterDecrement >= 0 : "Should not be able to have negative lock count " + lockCountAfterDecrement;
}
@Override
public String toString()
{
return "" + hitCount + (window == null ? "x" : "o");
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_BrickElement.java
|
1,413
|
public static abstract class Configuration extends CommonAbstractStore.Configuration
{
public static final Setting<Boolean> rebuild_idgenerators_fast = GraphDatabaseSettings.rebuild_idgenerators_fast;
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractStore.java
|
1,414
|
public abstract class AbstractStore extends CommonAbstractStore
{
public static abstract class Configuration extends CommonAbstractStore.Configuration
{
public static final Setting<Boolean> rebuild_idgenerators_fast = GraphDatabaseSettings.rebuild_idgenerators_fast;
}
private final Config conf;
/**
* Returns the fixed size of each record in this store.
*
* @return The record size
*/
public abstract int getRecordSize();
@Override
protected long figureOutHighestIdInUse()
{
try
{
return getFileChannel().size() / getRecordSize();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
public AbstractStore( File fileName, Config conf, IdType idType,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger )
{
super( fileName, conf, idType, idGeneratorFactory, windowPoolFactory, fileSystemAbstraction, stringLogger );
this.conf = conf;
}
@Override
protected int getEffectiveRecordSize()
{
return getRecordSize();
}
@Override
protected void readAndVerifyBlockSize() throws IOException
{
// record size is fixed for non-dynamic stores, so nothing to do here
}
@Override
protected void verifyFileSizeAndTruncate() throws IOException
{
int expectedVersionLength = UTF8.encode( buildTypeDescriptorAndVersion( getTypeDescriptor() ) ).length;
long fileSize = getFileChannel().size();
if ( getRecordSize() != 0
&& (fileSize - expectedVersionLength) % getRecordSize() != 0 && !isReadOnly() )
{
setStoreNotOk( new IllegalStateException(
"Misaligned file size " + fileSize + " for " + this + ", expected version length:" +
expectedVersionLength ) );
}
if ( getStoreOk() && !isReadOnly() )
{
getFileChannel().truncate( fileSize - expectedVersionLength );
}
}
private long findHighIdBackwards() throws IOException
{
// Duplicated method
StoreChannel fileChannel = getFileChannel();
int recordSize = getRecordSize();
long fileSize = fileChannel.size();
long highId = fileSize / recordSize;
ByteBuffer byteBuffer = ByteBuffer.allocate( getRecordSize() );
for ( long i = highId; i > 0; i-- )
{
fileChannel.position( i * recordSize );
if ( fileChannel.read( byteBuffer ) > 0 )
{
byteBuffer.flip();
boolean isInUse = isRecordInUse( byteBuffer );
byteBuffer.clear();
if ( isInUse )
{
return i;
}
}
}
return 0;
}
protected boolean isRecordInUse( ByteBuffer buffer )
{
byte inUse = buffer.get();
return (inUse & 0x1) == Record.IN_USE.byteValue();
}
/**
* Rebuilds the {@link IdGenerator} by looping through all records and
* checking if record in use or not.
*/
@Override
protected void rebuildIdGenerator()
{
if ( isReadOnly() && !isBackupSlave() )
{
throw new ReadOnlyDbException();
}
stringLogger.debug( "Rebuilding id generator for[" + getStorageFileName() + "] ..." );
closeIdGenerator();
if ( fileSystemAbstraction.fileExists( new File( getStorageFileName().getPath() + ".id" ) ) )
{
boolean success = fileSystemAbstraction.deleteFile( new File( getStorageFileName().getPath() + ".id" ) );
assert success;
}
createIdGenerator( new File( getStorageFileName().getPath() + ".id" ) );
openIdGenerator();
StoreChannel fileChannel = getFileChannel();
long highId = 1;
long defraggedCount = 0;
try
{
long fileSize = fileChannel.size();
int recordSize = getRecordSize();
boolean fullRebuild = true;
if ( conf.get( Configuration.rebuild_idgenerators_fast ) )
{
fullRebuild = false;
highId = findHighIdBackwards();
}
ByteBuffer byteBuffer = ByteBuffer.allocate( recordSize );
// Duplicated code block
LinkedList<Long> freeIdList = new LinkedList<Long>();
if ( fullRebuild )
{
for ( long i = 0; i * recordSize < fileSize && recordSize > 0; i++ )
{
fileChannel.position( i * recordSize );
byteBuffer.clear();
fileChannel.read( byteBuffer );
byteBuffer.flip();
if ( !isRecordInUse( byteBuffer ) )
{
freeIdList.add( i );
}
else
{
highId = i;
setHighId( highId + 1 );
while ( !freeIdList.isEmpty() )
{
freeId( freeIdList.removeFirst() );
defraggedCount++;
}
}
}
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to rebuild id generator " + getStorageFileName(), e );
}
setHighId( highId + 1 );
stringLogger.logMessage( getStorageFileName() + " rebuild id generator, highId=" + getHighId() +
" defragged count=" + defraggedCount, true );
stringLogger.debug( "[" + getStorageFileName() + "] high id=" + getHighId()
+ " (defragged=" + defraggedCount + ")" );
closeIdGenerator();
openIdGenerator();
}
public abstract List<WindowPoolStats> getAllWindowPoolStats();
public void logAllWindowPoolStats( StringLogger.LineLogger logger )
{
logger.logLine( getWindowPoolStats().toString() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractStore.java
|
1,415
|
public abstract class AbstractSchemaRule implements SchemaRule
{
private final int label;
private final Kind kind;
private final long id;
public AbstractSchemaRule( long id, int label, Kind kind )
{
this.id = id;
this.label = label;
this.kind = kind;
}
@Override
public long getId()
{
return this.id;
}
@Override
public final int getLabel()
{
return this.label;
}
@Override
public final Kind getKind()
{
return this.kind;
}
@Override
public int length()
{
return 4 /*label id*/ + 1 /*kind id*/;
}
@Override
public void serialize( ByteBuffer target )
{
target.putInt( label );
target.put( kind.id() );
}
@Override
public int hashCode()
{
final int prime = 31;
int result = prime + kind.hashCode();
return prime * result + label;
}
@Override
public boolean equals( Object obj )
{
if ( this == obj )
{
return true;
}
if ( obj == null )
{
return false;
}
if ( getClass() != obj.getClass() )
{
return false;
}
AbstractSchemaRule other = (AbstractSchemaRule) obj;
if ( kind != other.kind )
{
return false;
}
if ( label != other.label )
{
return false;
}
return true;
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[id="+ id +", label="+label+", kind="+ kind + innerToString() + "]";
}
protected abstract String innerToString();
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractSchemaRule.java
|
1,416
|
public abstract class AbstractRecordStore<R extends AbstractBaseRecord> extends AbstractStore implements RecordStore<R>
{
public AbstractRecordStore( File fileName, Config conf, IdType idType, IdGeneratorFactory idGeneratorFactory,
WindowPoolFactory windowPoolFactory, FileSystemAbstraction fileSystemAbstraction,
StringLogger stringLogger )
{
super( fileName, conf, idType, idGeneratorFactory, windowPoolFactory, fileSystemAbstraction, stringLogger );
}
@Override
public Collection<R> getRecords( long id )
{
return singletonList( getRecord( id ) );
}
@Override
public Long getNextRecordReference( R record )
{
return null;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractRecordStore.java
|
1,417
|
public abstract class AbstractRecord extends AbstractBaseRecord
{
private final int id;
AbstractRecord( int id )
{
super( false );
this.id = id;
}
AbstractRecord( int id, boolean inUse )
{
super( inUse );
this.id = id;
}
public int getId()
{
return id;
}
@Override
public long getLongId()
{
return id;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractRecord.java
|
1,418
|
{
@Override
public void run()
{
while ( true )
{
// modify buffer's position "because we can" - this is used in several places,
// including Buffer.getOffsettedBuffer which in turn is also used in several places
window.getBuffer().setOffset( RANDOM.nextInt( window.getBuffer().getBuffer().limit() ) );
}
}
} ).start();
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_AbstractPersistenceWindowTests.java
|
1,419
|
{
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_AbstractPersistenceWindowTests.java
|
1,420
|
public class AbstractPersistenceWindowTests
{
private static final Random RANDOM = new Random();
private static final int RECORD_SIZE = 7;
private static final int NUMBER_OF_RECORDS = 13;
private AbstractPersistenceWindow window;
@Before
public void before() throws Exception
{
File directory = new File( "target/test-data" );
directory.mkdirs();
String filename = new File( directory, UUID.randomUUID().toString() ).getAbsolutePath();
RandomAccessFile file = new RandomAccessFile( filename, "rw" );
StoreFileChannel channel = new StoreFileChannel( file.getChannel() );
window = new AbstractPersistenceWindow( 0, RECORD_SIZE, RECORD_SIZE * NUMBER_OF_RECORDS,
channel, ByteBuffer.allocate( RECORD_SIZE * NUMBER_OF_RECORDS ) )
{
};
}
@Test
public void shouldNotLetChangesToOffsetInterfereWithFlushing() throws Exception
{
new Thread( new Runnable()
{
@Override
public void run()
{
while ( true )
{
// modify buffer's position "because we can" - this is used in several places,
// including Buffer.getOffsettedBuffer which in turn is also used in several places
window.getBuffer().setOffset( RANDOM.nextInt( window.getBuffer().getBuffer().limit() ) );
}
}
} ).start();
try
{
for ( int i = 1; i < 10000; i++ )
{
window.force();
}
}
catch ( BufferOverflowException e )
{
fail( "Changing the state of the buffer's flags should not affect flushing" );
}
}
@Test
public void shouldNotLetFlushingInterfereWithReads() throws Exception
{
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
window.getBuffer().get();
// ad infimum, or at least up to RECORD_SIZE * NUMBER_OF_RECORDS
// then a flush comes along...
window.force();
try
{
window.getBuffer().get();
}
catch ( BufferUnderflowException e )
{
fail( "Flushing should not affect the state of the buffer's flags" );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_AbstractPersistenceWindowTests.java
|
1,421
|
abstract class AbstractPersistenceWindow extends LockableWindow
{
private final int recordSize;
private final long position;
private Buffer buffer = null;
private final int windowSize;
AbstractPersistenceWindow( long position, int recordSize, int totalSize,
StoreChannel channel, ByteBuffer byteBuffer )
{
super( channel );
assert position >= 0 : "Illegal position[" + position + "]";
assert recordSize > 0 : "Illegal recordSize[" + recordSize + "]";
assert channel != null : "Null file channel";
assert totalSize >= recordSize;
this.position = position;
this.recordSize = recordSize;
this.windowSize = totalSize / recordSize;
this.buffer = new Buffer( this, byteBuffer );
}
@Override
public Buffer getBuffer()
{
return buffer;
}
@Override
public int getRecordSize()
{
return recordSize;
}
@Override
public Buffer getOffsettedBuffer( long id )
{
int offset = (int) (id - buffer.position()) * recordSize;
buffer.setOffset( offset );
return buffer;
}
@Override
public long position()
{
return position;
}
void readFullWindow()
{
try
{
long fileSize = getFileChannel().size();
long recordCount = fileSize / recordSize;
// possible last element not written completely, therefore if
// fileSize % recordSize can be non 0 and we check > instead of >=
if ( position > recordCount )
{
// use new buffer since it will contain only zeros
return;
}
ByteBuffer byteBuffer = buffer.getBuffer();
byteBuffer.clear();
getFileChannel().read( byteBuffer, position * recordSize );
byteBuffer.clear();
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to load position["
+ position + "] @[" + position * recordSize + "]", e );
}
}
private void writeContents()
{
ByteBuffer byteBuffer = buffer.getBuffer().duplicate();
byteBuffer.clear();
try
{
int written = 0;
while ( byteBuffer.hasRemaining() ) {
int writtenThisTime = getFileChannel().write( byteBuffer, position * recordSize + written );
if (writtenThisTime == 0)
throw new IOException( "Unable to write to disk, reported bytes written was 0" );
written += writtenThisTime;
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to write record["
+ position + "] @[" + position * recordSize + "]", e );
}
}
@Override
public int size()
{
return windowSize;
}
@Override
public void force()
{
if ( isDirty() )
{
writeContents();
setClean();
}
}
@Override
public boolean equals( Object o )
{
if ( !(o instanceof AbstractPersistenceWindow) )
{
return false;
}
return position() == ((AbstractPersistenceWindow) o).position();
}
@Override
public int hashCode()
{
return (int) this.position;
}
@Override
public String toString()
{
return "PersistenceRow[" + position + "]";
}
@Override
public synchronized void close()
{
// close called after flush all so no need to write out here
buffer.close();
closed = true;
}
@Override
void acceptContents( PersistenceRow dpw )
{
ByteBuffer sourceBuffer = dpw.getBuffer().getBuffer();
ByteBuffer targetBuffer = getBuffer().getBuffer();
// The position of the row is the record to accept,
// whereas the position of this window is the first record
// in this window.
targetBuffer.position( (int) ((dpw.position() - position()) * getRecordSize()) );
sourceBuffer.clear();
targetBuffer.put( sourceBuffer );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractPersistenceWindow.java
|
1,422
|
public static abstract class Configuration
extends CommonAbstractStore.Configuration
{
public static final Setting<Boolean> rebuild_idgenerators_fast = GraphDatabaseSettings.rebuild_idgenerators_fast;
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractDynamicStore.java
|
1,423
|
public abstract class AbstractDynamicStore extends CommonAbstractStore implements Store, RecordStore<DynamicRecord>,
DynamicBlockSize
{
public static abstract class Configuration
extends CommonAbstractStore.Configuration
{
public static final Setting<Boolean> rebuild_idgenerators_fast = GraphDatabaseSettings.rebuild_idgenerators_fast;
}
public static final byte[] NO_DATA = new byte[0];
private final Config conf;
private int blockSize;
protected final DynamicRecordAllocator recordAllocator;
public AbstractDynamicStore( File fileName, Config conf, IdType idType,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger )
{
super( fileName, conf, idType, idGeneratorFactory, windowPoolFactory, fileSystemAbstraction, stringLogger );
this.conf = conf;
this.recordAllocator = new ExistingThenNewRecordAllocator( this, this );
}
@Override
protected int getEffectiveRecordSize()
{
return getBlockSize();
}
@Override
public int getRecordSize()
{
return getBlockSize();
}
@Override
public int getRecordHeaderSize()
{
return BLOCK_HEADER_SIZE;
}
@Override
protected void verifyFileSizeAndTruncate() throws IOException
{
int expectedVersionLength = UTF8.encode( buildTypeDescriptorAndVersion( getTypeDescriptor() ) ).length;
long fileSize = getFileChannel().size();
if ( (fileSize - expectedVersionLength) % blockSize != 0 && !isReadOnly() )
{
setStoreNotOk( new IllegalStateException( "Misaligned file size " + fileSize + " for " + this + ", expected version length " + expectedVersionLength ) );
}
if ( getStoreOk() && !isReadOnly() )
{
getFileChannel().truncate( fileSize - expectedVersionLength );
}
}
@Override
protected void readAndVerifyBlockSize() throws IOException
{
ByteBuffer buffer = ByteBuffer.allocate( 4 );
getFileChannel().position( 0 );
getFileChannel().read( buffer );
buffer.flip();
blockSize = buffer.getInt();
if ( blockSize <= 0 )
{
throw new InvalidRecordException( "Illegal block size: " +
blockSize + " in " + getStorageFileName() );
}
}
/**
* Returns the byte size of each block for this dynamic store
*
* @return The block size of this store
*/
@Override
public int getBlockSize()
{
return blockSize;
}
/**
* Calculate the size of a dynamic record given the size of the data block.
*
* @param dataSize the size of the data block in bytes.
* @return the size of a dynamic record.
*/
public static int getRecordSize( int dataSize )
{
return dataSize + BLOCK_HEADER_SIZE;
}
// (in_use+next high)(1 byte)+nr_of_bytes(3 bytes)+next_block(int)
public static final int BLOCK_HEADER_SIZE = 1 + 3 + 4; // = 8
@Override
public void updateRecord( DynamicRecord record )
{
long blockId = record.getId();
registerIdFromUpdateRecord( blockId );
PersistenceWindow window = acquireWindow( blockId, OperationType.WRITE );
try
{
Buffer buffer = window.getOffsettedBuffer( blockId );
if ( record.inUse() )
{
long nextBlock = record.getNextBlock();
int highByteInFirstInteger = nextBlock == Record.NO_NEXT_BLOCK.intValue() ? 0
: (int) ( ( nextBlock & 0xF00000000L ) >> 8 );
highByteInFirstInteger |= ( Record.IN_USE.byteValue() << 28 );
highByteInFirstInteger |= (record.isStartRecord() ? 0 : 1) << 31;
/*
* First 4b
* [x , ][ , ][ , ][ , ] 0: start record, 1: linked record
* [ x, ][ , ][ , ][ , ] inUse
* [ ,xxxx][ , ][ , ][ , ] high next block bits
* [ , ][xxxx,xxxx][xxxx,xxxx][xxxx,xxxx] nr of bytes in the data field in this record
*
*/
int firstInteger = record.getLength();
assert firstInteger < ( 1 << 24 ) - 1;
firstInteger |= highByteInFirstInteger;
buffer.putInt( firstInteger ).putInt( (int) nextBlock );
if ( !record.isLight() )
{
buffer.put( record.getData() );
}
else
{
assert getHighId() != record.getId() + 1;
}
}
else
{
buffer.put( Record.NOT_IN_USE.byteValue() );
if ( !isInRecoveryMode() )
{
freeId( blockId );
}
}
}
finally
{
releaseWindow( window );
}
}
@Override
public void forceUpdateRecord( DynamicRecord record )
{
updateRecord( record );
}
// [next][type][data]
protected Collection<DynamicRecord> allocateRecordsFromBytes( byte src[] )
{
return allocateRecordsFromBytes( src, Collections.<DynamicRecord>emptyList().iterator(),
recordAllocator );
}
public static Collection<DynamicRecord> allocateRecordsFromBytes(
byte src[], Iterator<DynamicRecord> recordsToUseFirst,
DynamicRecordAllocator dynamicRecordAllocator )
{
assert src != null : "Null src argument";
List<DynamicRecord> recordList = new LinkedList<>();
DynamicRecord nextRecord = dynamicRecordAllocator.nextUsedRecordOrNew( recordsToUseFirst );
int srcOffset = 0;
int dataSize = dynamicRecordAllocator.dataSize();
do
{
DynamicRecord record = nextRecord;
record.setStartRecord( srcOffset == 0 );
if ( src.length - srcOffset > dataSize )
{
byte data[] = new byte[dataSize];
System.arraycopy( src, srcOffset, data, 0, dataSize );
record.setData( data );
nextRecord = dynamicRecordAllocator.nextUsedRecordOrNew( recordsToUseFirst );
record.setNextBlock( nextRecord.getId() );
srcOffset += dataSize;
}
else
{
byte data[] = new byte[src.length - srcOffset];
System.arraycopy( src, srcOffset, data, 0, data.length );
record.setData( data );
nextRecord = null;
record.setNextBlock( Record.NO_NEXT_BLOCK.intValue() );
}
recordList.add( record );
assert !record.isLight();
assert record.getData() != null;
}
while ( nextRecord != null );
return recordList;
}
public Collection<DynamicRecord> getLightRecords( long startBlockId )
{
List<DynamicRecord> recordList = new LinkedList<>();
long blockId = startBlockId;
while ( blockId != Record.NO_NEXT_BLOCK.intValue() )
{
PersistenceWindow window = acquireWindow( blockId,
OperationType.READ );
try
{
DynamicRecord record = getRecord( blockId, window, RecordLoad.CHECK );
recordList.add( record );
blockId = record.getNextBlock();
}
finally
{
releaseWindow( window );
}
}
return recordList;
}
public void ensureHeavy( DynamicRecord record )
{
if ( !record.isLight() )
return;
if ( record.getLength() == 0 ) // don't go though the trouble of acquiring the window if we would read nothing
{
record.setData( NO_DATA );
}
long blockId = record.getId();
PersistenceWindow window = acquireWindow( blockId, OperationType.READ );
try
{
Buffer buf = window.getBuffer();
// NOTE: skip of header in offset
int offset = (int) (blockId-buf.position()) * getBlockSize() + BLOCK_HEADER_SIZE;
buf.setOffset( offset );
byte bytes[] = new byte[record.getLength()];
buf.get( bytes );
record.setData( bytes );
}
finally
{
releaseWindow( window );
}
}
protected boolean isRecordInUse( ByteBuffer buffer )
{
return ( ( buffer.get() & (byte) 0xF0 ) >> 4 ) == Record.IN_USE.byteValue();
}
private DynamicRecord getRecord( long blockId, PersistenceWindow window, RecordLoad load )
{
DynamicRecord record = new DynamicRecord( blockId );
Buffer buffer = window.getOffsettedBuffer( blockId );
/*
* First 4b
* [x , ][ , ][ , ][ , ] 0: start record, 1: linked record
* [ x, ][ , ][ , ][ , ] inUse
* [ ,xxxx][ , ][ , ][ , ] high next block bits
* [ , ][xxxx,xxxx][xxxx,xxxx][xxxx,xxxx] nr of bytes in the data field in this record
*
*/
long firstInteger = buffer.getUnsignedInt();
boolean isStartRecord = (firstInteger & 0x80000000) == 0;
long maskedInteger = firstInteger & ~0x80000000;
int highNibbleInMaskedInteger = (int) ( ( maskedInteger ) >> 28 );
boolean inUse = highNibbleInMaskedInteger == Record.IN_USE.intValue();
if ( !inUse && load != RecordLoad.FORCE )
{
throw new InvalidRecordException( "DynamicRecord Not in use, blockId[" + blockId + "]" );
}
int dataSize = getBlockSize() - BLOCK_HEADER_SIZE;
int nrOfBytes = (int) ( firstInteger & 0xFFFFFF );
/*
* Pointer to next block 4b (low bits of the pointer)
*/
long nextBlock = buffer.getUnsignedInt();
long nextModifier = ( firstInteger & 0xF000000L ) << 8;
long longNextBlock = longFromIntAndMod( nextBlock, nextModifier );
boolean readData = load != RecordLoad.CHECK;
if ( longNextBlock != Record.NO_NEXT_BLOCK.intValue()
&& nrOfBytes < dataSize || nrOfBytes > dataSize )
{
readData = false;
if ( load != RecordLoad.FORCE )
throw new InvalidRecordException( "Next block set[" + nextBlock
+ "] current block illegal size[" + nrOfBytes + "/" + dataSize + "]" );
}
record.setInUse( inUse );
record.setStartRecord( isStartRecord );
record.setLength( nrOfBytes );
record.setNextBlock( longNextBlock );
/*
* Data 'nrOfBytes' bytes
*/
if ( readData )
{
byte byteArrayElement[] = new byte[nrOfBytes];
buffer.get( byteArrayElement );
record.setData( byteArrayElement );
}
return record;
}
@Override
public DynamicRecord getRecord( long id )
{
PersistenceWindow window = acquireWindow( id,
OperationType.READ );
try
{
return getRecord( id, window, RecordLoad.NORMAL );
}
finally
{
releaseWindow( window );
}
}
@Override
public DynamicRecord forceGetRecord( long id )
{
PersistenceWindow window;
try
{
window = acquireWindow( id, OperationType.READ );
}
catch ( InvalidRecordException e )
{
return new DynamicRecord( id );
}
try
{
return getRecord( id, window, RecordLoad.FORCE );
}
finally
{
releaseWindow( window );
}
}
@Override
public DynamicRecord forceGetRaw( DynamicRecord record )
{
return record;
}
@Override
public DynamicRecord forceGetRaw( long id )
{
return forceGetRecord( id );
}
public Collection<DynamicRecord> getRecords( long startBlockId )
{
return getRecords( startBlockId, RecordLoad.NORMAL );
}
public Collection<DynamicRecord> getRecords( long startBlockId, RecordLoad loadFlag )
{
List<DynamicRecord> recordList = new LinkedList<>();
long blockId = startBlockId;
while ( blockId != Record.NO_NEXT_BLOCK.intValue() )
{
PersistenceWindow window = acquireWindow( blockId, OperationType.READ );
try
{
DynamicRecord record = getRecord( blockId, window, loadFlag );
if ( ! record.inUse() )
{
return recordList;
}
recordList.add( record );
blockId = record.getNextBlock();
}
finally
{
releaseWindow( window );
}
}
return recordList;
}
@Override
public Long getNextRecordReference( DynamicRecord record )
{
long nextId = record.getNextBlock();
return Record.NO_NEXT_BLOCK.is( nextId ) ? null : nextId;
}
/**
* @return a {@link ByteBuffer#slice() sliced} {@link ByteBuffer} wrapping {@code target} or,
* if necessary a new larger {@code byte[]} and containing exactly all concatenated data read from records
*/
public static ByteBuffer concatData( Collection<DynamicRecord> records, byte[] target )
{
int totalLength = 0;
for ( DynamicRecord record : records )
totalLength += record.getLength();
if ( target.length < totalLength )
target = new byte[totalLength];
ByteBuffer buffer = ByteBuffer.wrap( target, 0, totalLength );
for ( DynamicRecord record : records )
buffer.put( record.getData() );
buffer.position( 0 );
return buffer;
}
private long findHighIdBackwards() throws IOException
{
StoreChannel fileChannel = getFileChannel();
int recordSize = getBlockSize();
long fileSize = fileChannel.size();
long highId = fileSize / recordSize;
ByteBuffer byteBuffer = ByteBuffer.allocate( 1 );
for ( long i = highId; i > 0; i-- )
{
fileChannel.position( i * recordSize );
if ( fileChannel.read( byteBuffer ) > 0 )
{
byteBuffer.flip();
boolean isInUse = isRecordInUse( byteBuffer );
byteBuffer.clear();
if ( isInUse )
{
return i;
}
}
}
return 0;
}
/**
* Rebuilds the internal id generator keeping track of what blocks are free
* or taken.
*/
@Override
protected void rebuildIdGenerator()
{
if ( getBlockSize() <= 0 )
{
throw new InvalidRecordException( "Illegal blockSize: " +
getBlockSize() );
}
stringLogger.debug( "Rebuilding id generator for[" + getStorageFileName() + "] ..." );
closeIdGenerator();
if ( fileSystemAbstraction.fileExists( new File( getStorageFileName().getPath() + ".id" )) )
{
boolean success = fileSystemAbstraction.deleteFile( new File( getStorageFileName().getPath() + ".id" ));
assert success;
}
createIdGenerator( new File( getStorageFileName().getPath() + ".id" ));
openIdGenerator();
setHighId( 1 ); // reserved first block containing blockSize
StoreChannel fileChannel = getFileChannel();
long highId = 0;
long defraggedCount = 0;
try
{
long fileSize = fileChannel.size();
boolean fullRebuild = true;
if ( conf.get( Configuration.rebuild_idgenerators_fast ) )
{
fullRebuild = false;
highId = findHighIdBackwards();
}
ByteBuffer byteBuffer = ByteBuffer.wrap( new byte[1] );
LinkedList<Long> freeIdList = new LinkedList<>();
if ( fullRebuild )
{
for ( long i = 1; i * getBlockSize() < fileSize; i++ )
{
fileChannel.position( i * getBlockSize() );
byteBuffer.clear();
fileChannel.read( byteBuffer );
byteBuffer.flip();
if ( !isRecordInUse( byteBuffer ) )
{
freeIdList.add( i );
}
else
{
highId = i;
setHighId( highId + 1 );
while ( !freeIdList.isEmpty() )
{
freeId( freeIdList.removeFirst() );
defraggedCount++;
}
}
}
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to rebuild id generator " + getStorageFileName(), e );
}
setHighId( highId + 1 );
stringLogger.debug( "[" + getStorageFileName() + "] high id=" + getHighId()
+ " (defragged=" + defraggedCount + ")" );
stringLogger.logMessage( getStorageFileName() + " rebuild id generator, highId=" + getHighId() +
" defragged count=" + defraggedCount, true );
closeIdGenerator();
openIdGenerator();
}
@Override
protected long figureOutHighestIdInUse()
{
try
{
return getFileChannel().size()/getBlockSize();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
@Override
public String toString()
{
return super.toString() + "[fileName:" + storageFileName.getName() +
", blockSize:" + (getRecordSize() - getRecordHeaderSize()) + "]";
}
public Pair<byte[]/*header in the first record*/,byte[]/*all other bytes*/> readFullByteArray(
Iterable<DynamicRecord> records, PropertyType propertyType )
{
for ( DynamicRecord record : records )
{
ensureHeavy( record );
}
return readFullByteArrayFromHeavyRecords( records, propertyType );
}
public static Pair<byte[]/*header in the first record*/,byte[]/*all other bytes*/> readFullByteArrayFromHeavyRecords(
Iterable<DynamicRecord> records, PropertyType propertyType )
{
byte[] header = null;
List<byte[]> byteList = new LinkedList<>();
int totalSize = 0, i = 0;
for ( DynamicRecord record : records )
{
int offset = 0;
if ( i++ == 0 )
{ // This is the first one, read out the header separately
header = propertyType.readDynamicRecordHeader( record.getData() );
offset = header.length;
}
byteList.add( record.getData() );
totalSize += (record.getData().length-offset);
}
byte[] bArray = new byte[totalSize];
assert header != null : "header should be non-null since records should not be empty";
int sourceOffset = header.length;
int offset = 0;
for ( byte[] currentArray : byteList )
{
System.arraycopy( currentArray, sourceOffset, bArray, offset,
currentArray.length-sourceOffset );
offset += (currentArray.length-sourceOffset);
sourceOffset = 0;
}
return Pair.of( header, bArray );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractDynamicStore.java
|
1,424
|
public abstract class AbstractBaseRecord implements CloneableInPublic
{
private boolean inUse;
private boolean created = false;
AbstractBaseRecord( boolean inUse )
{
// limit subclasses to this package only
this.inUse = inUse;
}
public abstract long getLongId();
public final boolean inUse()
{
return inUse;
}
public void setInUse( boolean inUse )
{
this.inUse = inUse;
}
public final void setCreated()
{
this.created = true;
}
public final boolean isCreated()
{
return created;
}
@Override
public int hashCode()
{
final int prime = 31;
int result = 1;
long id = getLongId();
result = prime * result + (int) (id ^ (id >>> 32));
return result;
}
@Override
public boolean equals( Object obj )
{
if ( this == obj )
return true;
if ( obj == null )
return false;
if ( getClass() != obj.getClass() )
return false;
AbstractBaseRecord other = (AbstractBaseRecord) obj;
if ( getLongId() != other.getLongId() )
return false;
return true;
}
@Override
public AbstractBaseRecord clone()
{
throw new UnsupportedOperationException();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractBaseRecord.java
|
1,425
|
public abstract class Abstract64BitRecord extends AbstractBaseRecord
{
private final long id;
protected Abstract64BitRecord( long id )
{
super( false );
this.id = id;
}
Abstract64BitRecord( long id, boolean inUse )
{
super( inUse );
this.id = id;
}
public long getId()
{
return id;
}
@Override
public long getLongId()
{
return id;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_Abstract64BitRecord.java
|
1,426
|
private static class ThreadStillRunningException extends Exception
{
ThreadStillRunningException( TaskThread thread )
{
super( '"' + thread.getName() + "\"; state=" + thread.getState() + "; blockedOn=" + thread.blocker() );
setStackTrace( thread.getStackTrace() );
}
@Override
public synchronized Throwable fillInStackTrace()
{
return this;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,427
|
private static class TaskThread extends Thread implements ThreadInfo
{
private final Task[] tasks;
private Exception failure;
TaskThread( String name, Task[] tasks )
{
super( name );
this.tasks = tasks;
}
void complete( List<Throwable> failures, long timeout, TimeUnit unit ) throws InterruptedException
{
join( unit.toMillis( timeout ) );
if ( isAlive() )
{
failures.add( new ThreadStillRunningException( this ) );
}
if ( failure != null )
{
failures.add( failure );
}
}
@Override
public void run()
{
try
{
for ( Task task : tasks )
{
task.perform();
}
}
catch ( Exception e )
{
failure = e;
}
}
@Override
public Object blocker()
{
return getBlocker( this );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,428
|
public static class Signal implements Task
{
private final CountDownLatch latch;
private Signal( CountDownLatch latch )
{
this.latch = latch;
}
public Await await()
{
return new Await( latch );
}
public void awaitNow() throws InterruptedException
{
latch.await();
}
@Override
public void perform() throws Exception
{
latch.countDown();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,429
|
private class Repository
{
private final Description description;
private int i;
private final List<TaskThread> threads = new ArrayList<>();
Repository( Description description )
{
this.description = description;
}
synchronized TaskThread createThread( String name, Task[] tasks )
{
TaskThread thread = new TaskThread( nextName( name ), tasks );
threads.add( thread );
thread.start();
return thread;
}
private String nextName( String name )
{
return description.getMethodName() + "-" + (++i) + (name == null ? "" : (":" + name));
}
void completeAll( List<Throwable> failures )
{
for ( TaskThread thread : threads )
{
try
{
thread.complete( failures, timeout, unit );
}
catch ( InterruptedException interrupted )
{
failures.add( interrupted );
}
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,430
|
{
@Override
public void perform() throws Exception
{
collected.add( event );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,431
|
public class Events
{
private final List<String> collected;
private Events()
{
collected = new CopyOnWriteArrayList<>();
}
public Task trigger( final String event )
{
return new Task()
{
@Override
public void perform() throws Exception
{
collected.add( event );
}
};
}
public void assertInOrder( String... events ) throws Exception
{
try
{
completeThreads();
}
catch ( Error | Exception ok )
{
throw ok;
}
catch ( Throwable throwable )
{
throw new Exception( "Unexpected Throwable", throwable );
}
String[] actual = collected.toArray( new String[events.length] );
assertArrayEquals( events, actual );
}
public List<String> snapshot()
{
return new ArrayList<>( collected );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,432
|
public class AbstractStoreChannel implements StoreChannel
{
@Override
public FileLock tryLock() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public int write( ByteBuffer src, long position ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public MappedByteBuffer map( FileChannel.MapMode mode, long position, long size ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public int read( ByteBuffer dst, long position ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public void force( boolean metaData ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public long write( ByteBuffer[] srcs, int offset, int length ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public long write( ByteBuffer[] srcs ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public long read( ByteBuffer[] dsts, int offset, int length ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public long read( ByteBuffer[] dsts ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public int read( ByteBuffer dst ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public int write( ByteBuffer src ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public long position() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public StoreChannel position( long newPosition ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public long size() throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public StoreChannel truncate( long size ) throws IOException
{
throw new UnsupportedOperationException();
}
@Override
public boolean isOpen()
{
throw new UnsupportedOperationException();
}
@Override
public void close() throws IOException
{
throw new UnsupportedOperationException();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_AbstractStoreChannel.java
|
1,433
|
{
@Override
public BrickElement create( int index )
{
return new BrickElement( index );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_BrickElementFactory.java
|
1,434
|
private class DummyNodeIndex extends DummyIndex<Node>
{
public DummyNodeIndex( String name, InternalAbstractGraphDatabase db )
{
super( name, db );
}
@Override
public Class<Node> getEntityType()
{
return Node.class;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_DummyIndexExtensionFactory.java
|
1,435
|
public class Buffer
{
private final ByteBuffer buf;
private final PersistenceWindow persistenceWindow;
public Buffer( PersistenceWindow persistenceWindow, ByteBuffer buf )
{
this.persistenceWindow = persistenceWindow;
if ( buf == null )
{
throw new IllegalArgumentException( "null buf" );
}
this.buf = buf;
}
// void setByteBuffer( ByteBuffer byteBuffer )
// {
// this.buf = byteBuffer;
// }
/**
* Returns the position of the persistence window tied to this buffer.
*
* @return The persistence window's position
*/
public long position()
{
return persistenceWindow.position();
}
/**
* Returns the underlying byte buffer.
*
* @return The byte buffer wrapped by this buffer
*/
public ByteBuffer getBuffer()
{
return buf;
}
public void reset()
{
buf.clear();
}
/**
* Sets the offset from persistence window position in the underlying byte
* buffer.
*
* @param offset
* The new offset to set
* @return This buffer
*/
public Buffer setOffset( int offset )
{
try
{
buf.position( offset );
}
catch ( java.lang.IllegalArgumentException e )
{
// logger.severe( "Illegal buffer position: Pos=" + position()
// + " off=" + offset + " capacity=" + buf.capacity() );
throw new IllegalArgumentException( "Illegal offset " + offset +
" for window position:" + position() + ", buffer:" + buf, e );
}
return this;
}
/**
* Returns the offset of this buffer.
*
* @return The offset
*/
public int getOffset()
{
return buf.position();
}
/**
* Puts a <CODE>byte</CODE> into the underlying buffer.
*
* @param b
* The <CODE>byte</CODE> that will be written
* @return This buffer
*/
public Buffer put( byte b )
{
buf.put( b );
return this;
}
/**
* Puts a <CODE>int</CODE> into the underlying buffer.
*
* @param i
* The <CODE>int</CODE> that will be written
* @return This buffer
*/
public Buffer putInt( int i )
{
buf.putInt( i );
return this;
}
/**
* Puts a <CODE>long</CODE> into the underlying buffer.
*
* @param l
* The <CODE>long</CODE> that will be written
* @return This buffer
*/
public Buffer putLong( long l )
{
buf.putLong( l );
return this;
}
/**
* Reads and returns a <CODE>byte</CODE> from the underlying buffer.
*
* @return The <CODE>byte</CODE> value at the current position/offset
*/
public byte get()
{
return buf.get();
}
/**
* Reads and returns a <CODE>int</CODE> from the underlying buffer.
*
* @return The <CODE>int</CODE> value at the current position/offset
*/
public int getInt()
{
return buf.getInt();
}
public long getUnsignedInt()
{
return buf.getInt()&0xFFFFFFFFL;
}
/**
* Reads and returns a <CODE>long</CODE> from the underlying buffer.
*
* @return The <CODE>long</CODE> value at the current position/offset
*/
public long getLong()
{
return buf.getLong();
}
/**
* Puts a <CODE>byte array</CODE> into the underlying buffer.
*
* @param src
* The <CODE>byte array</CODE> that will be written
* @return This buffer
*/
public Buffer put( byte src[] )
{
buf.put( src );
return this;
}
public Buffer put( char src[] )
{
int oldPos = buf.position();
buf.asCharBuffer().put( src );
buf.position( oldPos + src.length * 2 );
return this;
}
/**
* Puts a <CODE>byte array</CODE> into the underlying buffer starting from
* <CODE>offset</CODE> in the array and writing <CODE>length</CODE>
* values.
*
* @param src
* The <CODE>byte array</CODE> to write values from
* @param offset
* The offset in the <CODE>byte array</CODE>
* @param length
* The number of bytes to write
* @return This buffer
*/
public Buffer put( byte src[], int offset, int length )
{
buf.put( src, offset, length );
return this;
}
/**
* Reads <CODE>byte array length</CODE> bytes into the
* <CODE>byte array</CODE> from the underlying buffer.
*
* @param dst
* The byte array to read values into
* @return This buffer
*/
public Buffer get( byte dst[] )
{
buf.get( dst );
return this;
}
public Buffer get( char dst[] )
{
buf.asCharBuffer().get( dst );
return this;
}
public void close()
{
buf.limit( 0 );
}
@Override
public String toString()
{
return "Buffer[[" + buf.position() + "," + buf.capacity() + "]," +
persistenceWindow + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_Buffer.java
|
1,436
|
@SuppressWarnings("deprecation")
private class Database extends ImpermanentGraphDatabase
{
@Override
protected FileSystemAbstraction createFileSystemAbstraction()
{
return fs;
}
@Override
protected IdGeneratorFactory createIdGeneratorFactory()
{
return new DefaultIdGeneratorFactory();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_IdGeneratorRebuildFailureEmulationTest.java
|
1,437
|
@RunWith(Suite.class)
@SuiteClasses({IdGeneratorRebuildFailureEmulationTest.FailureBeforeRebuild.class,
IdGeneratorRebuildFailureEmulationTest.FailureDuringRebuild.class})
public class IdGeneratorRebuildFailureEmulationTest
{
@RunWith(JUnit4.class)
public static final class FailureBeforeRebuild extends IdGeneratorRebuildFailureEmulationTest
{
@Override
protected void emulateFailureOnRebuildOf( NeoStore neostore )
{
// emulate a failure during rebuild by not issuing this call:
// neostore.makeStoreOk();
}
}
@RunWith(SubProcessTestRunner.class)
@ForeignBreakpoints(@ForeignBreakpoints.BreakpointDef(
type = "org.neo4j.kernel.impl.nioneo.store.IdGeneratorImpl", method = "setHighId"))
public static final class FailureDuringRebuild extends IdGeneratorRebuildFailureEmulationTest
{
@Override
protected void emulateFailureOnRebuildOf( NeoStore neostore )
{
// emulate a failure (Id capacity exceeded) during rebuild by breakpoints in this method:
neostore.makeStoreOk();
fail( "makeStoreOk should have thrown UnderlyingStorageException" );
}
@BreakpointHandler("performTest")
public static void bootstrapTest( @BreakpointHandler("setHighId") BreakPoint setHighId )
{
setHighId.enable();
}
@SuppressWarnings("boxing")
@BreakpointHandler("setHighId")
public static void on_setHighId( DebugInterface di, BreakPoint setHighId )
{
if ( setHighId.invocationCount() > 1
|| RelationshipTypeTokenStore.class.getName().equals( di.thread().getStackTrace()
[2].getClassName() ) )
{
setHighId.disable();
// emulate a failure in recovery by changing the id parameter to setHighId(id) to an invalid value,
// causing an exception to be thrown.
di.setLocalVariable( "id", -1 );
}
}
}
@BreakpointTrigger
private void performTest() throws Exception
{
String file = prefix + File.separator + Thread.currentThread().getStackTrace()[2].getMethodName().replace(
'_', '.' );
// emulate the need for rebuilding id generators by deleting it
fs.deleteFile( new File( file + ".id") );
NeoStore neostore = null;
try
{
neostore = factory.newNeoStore( new File( prefix + File.separator + "neostore") );
// emulate a failure during rebuild:
emulateFailureOnRebuildOf( neostore );
}
catch ( UnderlyingStorageException expected )
{
assertEquals( "Id capacity exceeded", expected.getMessage() );
}
finally
{
// we want close to not misbehave
// (and for example truncate the file based on the wrong highId)
if ( neostore != null )
{
neostore.close();
}
}
}
void emulateFailureOnRebuildOf( NeoStore neostore )
{
fail( "emulateFailureOnRebuildOf(NeoStore) must be overridden" );
}
private FileSystem fs;
private StoreFactory factory;
private String prefix;
@Before
public void initialize()
{
fs = new FileSystem();
InternalAbstractGraphDatabase graphdb = new Database();
prefix = graphdb.getStoreDir();
createInitialData( graphdb );
graphdb.shutdown();
Map<String, String> config = new HashMap<String, String>();
config.put( GraphDatabaseSettings.rebuild_idgenerators_fast.name(), Settings.FALSE );
config.put( GraphDatabaseSettings.store_dir.name(), prefix );
factory = new StoreFactory( new Config( config, GraphDatabaseSettings.class ),
new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(), fs, StringLogger.DEV_NULL, null );
}
@After
public void verifyAndDispose() throws Exception
{
try
{
InternalAbstractGraphDatabase graphdb = new Database();
verifyData( graphdb );
graphdb.shutdown();
}
finally
{
if ( fs != null )
{
fs.disposeAndAssertNoOpenFiles();
}
fs = null;
}
}
private void verifyData( GraphDatabaseService graphdb )
{
Transaction tx = graphdb.beginTx();
try
{
int nodecount = 0;
for ( Node node : GlobalGraphOperations.at( graphdb ).getAllNodes() )
{
int propcount = readProperties( node );
int relcount = 0;
for ( Relationship rel : node.getRelationships() )
{
assertEquals( "all relationships should have 3 properties.", 3, readProperties( rel ) );
relcount++;
}
assertEquals( "all created nodes should have 3 properties.", 3, propcount );
assertEquals( "all created nodes should have 2 relationships.", 2, relcount );
nodecount++;
}
assertEquals( "The database should have 2 nodes.", 2, nodecount );
}
finally
{
tx.finish();
}
}
private void createInitialData( GraphDatabaseService graphdb )
{
Transaction tx = graphdb.beginTx();
try
{
Node first = properties( graphdb.createNode() );
Node other = properties( graphdb.createNode() );
properties( first.createRelationshipTo( other, DynamicRelationshipType.withName( "KNOWS" ) ) );
properties( other.createRelationshipTo( first, DynamicRelationshipType.withName( "DISTRUSTS" ) ) );
tx.success();
}
finally
{
tx.finish();
}
}
private <E extends PropertyContainer> E properties( E entity )
{
entity.setProperty( "short thing", "short" );
entity.setProperty( "long thing",
"this is quite a long string, don't you think, it sure is long enough at least" );
entity.setProperty( "string array", new String[]{"these are a few", "cool strings",
"for your viewing pleasure"} );
return entity;
}
private int readProperties( PropertyContainer entity )
{
int count = 0;
for ( String key : entity.getPropertyKeys() )
{
entity.getProperty( key );
count++;
}
return count;
}
private static class FileSystem extends EphemeralFileSystemAbstraction
{
void disposeAndAssertNoOpenFiles() throws Exception
{
//Collection<String> open = openFiles();
//assertTrue( "Open files: " + open, open.isEmpty() );
assertNoOpenFiles();
super.shutdown();
}
@Override
public void shutdown()
{
// no-op, it's pretty odd to have EphemeralFileSystemAbstraction implement Lifecycle by default
}
}
@SuppressWarnings("deprecation")
private class Database extends ImpermanentGraphDatabase
{
@Override
protected FileSystemAbstraction createFileSystemAbstraction()
{
return fs;
}
@Override
protected IdGeneratorFactory createIdGeneratorFactory()
{
return new DefaultIdGeneratorFactory();
}
}
@EnabledBreakpoints("performTest")
@Test
public void neostore() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_nodestore_db() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_propertystore_db_arrays() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_propertystore_db() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_propertystore_db_index() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_propertystore_db_index_keys() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_propertystore_db_strings() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_relationshipstore_db() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_relationshiptypestore_db() throws Exception
{
performTest();
}
@EnabledBreakpoints("performTest")
@Test
public void neostore_relationshiptypestore_db_names() throws Exception
{
performTest();
}
private IdGeneratorRebuildFailureEmulationTest()
{
if ( IdGeneratorRebuildFailureEmulationTest.class == getClass() )
{
throw new UnsupportedOperationException( "This class is effectively abstract" );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_IdGeneratorRebuildFailureEmulationTest.java
|
1,438
|
public class IdGeneratorImpl implements IdGenerator
{
// sticky(byte), nextFreeId(long)
private static final int HEADER_SIZE = 9;
// if sticky the id generator wasn't closed properly so it has to be
// rebuilt (go through the node, relationship, property, rel type etc files)
private static final byte CLEAN_GENERATOR = (byte) 0;
private static final byte STICKY_GENERATOR = (byte) 1;
public static final long INTEGER_MINUS_ONE = 0xFFFFFFFFL; // 4294967295L;
// number of defragged ids to grab from file in batch (also used for write)
private int grabSize = -1;
private final AtomicLong highId = new AtomicLong( -1 );
// total bytes read from file, used in writeIdBatch() and close()
private long readPosition;
// marks how much this session is allowed to read from previously released id batches.
private long maxReadPosition = HEADER_SIZE;
// used to calculate number of ids actually in use
private long defraggedIdCount = -1;
private final File fileName;
private final FileSystemAbstraction fs;
private StoreChannel fileChannel = null;
// defragged ids read from file (freed in a previous session).
private final LinkedList<Long> idsReadFromFile = new LinkedList<>();
// ids freed in this session that havn't been flushed to disk yet
private final LinkedList<Long> releasedIdList = new LinkedList<>();
private final long max;
private final boolean aggressiveReuse;
/**
* Opens the id generator represented by <CODE>fileName</CODE>. The
* <CODE>grabSize</CODE> means how many defragged ids we should keep in
* memory and is also the size (x4) of the two buffers used for reading and
* writing to the id generator file. The highest returned id will be read
* from file and if <CODE>grabSize</CODE> number of ids exist they will be
* read into memory (if less exist all defragged ids will be in memory).
* <p>
* If this id generator hasn't been closed properly since the previous
* session (sticky) an <CODE>IOException</CODE> will be thrown. When this
* happens one has to rebuild the id generator from the (node/rel/prop)
* store file.
*
* @param fileName
* The file name (and path if needed) for the id generator to be
* opened
* @param grabSize
* The number of defragged ids to keep in memory
* @param max is the highest possible id to be returned by this id generator from
* {@link #nextId()}.
* @param aggressiveReuse will reuse ids during the same session, not requiring
* a restart to be able reuse ids freed with {@link #freeId(long)}.
* @param highId the highest id in use.
* @throws UnderlyingStorageException
* If no such file exist or if the id generator is sticky
*/
public IdGeneratorImpl( FileSystemAbstraction fs, File fileName, int grabSize, long max, boolean aggressiveReuse,
long highId )
{
this.fs = fs;
this.aggressiveReuse = aggressiveReuse;
if ( grabSize < 1 )
{
throw new IllegalArgumentException( "Illegal grabSize: " + grabSize );
}
this.max = max;
this.fileName = fileName;
this.grabSize = grabSize;
initGenerator();
this.highId.set( max( this.highId.get(), highId ) );
}
/**
* Returns the next "free" id. If a defragged id exist it will be returned
* else the next free id that hasn't been used yet is returned. If no id
* exist the capacity is exceeded (all values <= max are taken) and a
* {@link UnderlyingStorageException} will be thrown.
*
* @return The next free id
* @throws UnderlyingStorageException
* If the capacity is exceeded
* @throws IllegalStateException if this id generator has been closed
*/
@Override
public synchronized long nextId()
{
assertStillOpen();
long nextDefragId = nextIdFromDefragList();
if ( nextDefragId != -1 ) return nextDefragId;
long id = highId.get();
if ( id == INTEGER_MINUS_ONE )
{
// Skip the integer -1 (0xFFFFFFFF) because it represents
// special values, f.ex. the end of a relationships/property chain.
id = highId.incrementAndGet();
}
assertIdWithinCapacity( id );
highId.incrementAndGet();
return id;
}
private void assertIdWithinCapacity( long id )
{
if ( id > max || id < 0 )
{
throw new UnderlyingStorageException( "Id capacity exceeded" );
}
}
private boolean canReadMoreIdBatches()
{
return readPosition < maxReadPosition;
}
private long nextIdFromDefragList()
{
if ( aggressiveReuse )
{
Long id = releasedIdList.poll();
if ( id != null )
{
defraggedIdCount--;
return id;
}
}
if ( !idsReadFromFile.isEmpty() || canReadMoreIdBatches() )
{
if ( idsReadFromFile.isEmpty() )
{
readIdBatch();
}
long id = idsReadFromFile.removeFirst();
defraggedIdCount--;
return id;
}
return -1;
}
private void assertStillOpen()
{
if ( fileChannel == null )
{
throw new IllegalStateException( "Closed id generator " + fileName );
}
}
@Override
public synchronized IdRange nextIdBatch( int size )
{
assertStillOpen();
// Get from defrag list
int count = 0;
long[] defragIds = new long[size];
while ( count < size )
{
long id = nextIdFromDefragList();
if ( id == -1 )
{
break;
}
defragIds[count++] = id;
}
// Shrink the array to actual size
long[] tmpArray = defragIds;
defragIds = new long[count];
System.arraycopy( tmpArray, 0, defragIds, 0, count );
int sizeLeftForRange = size - count;
long start = highId.get();
setHighId( start + sizeLeftForRange );
return new IdRange( defragIds, start, sizeLeftForRange );
}
/**
* Sets the next free "high" id. This method should be called when an id
* generator has been rebuilt. {@code id} must not be higher than {@code max}.
*
* @param id
* The next free id
*/
@Override
public void setHighId( long id )
{
assertIdWithinCapacity( id );
highId.set( id );
}
/**
* Returns the next "high" id that will be returned if no defragged ids
* exist.
*
* @return The next free "high" id
*/
@Override
public long getHighId()
{
return highId.get();
}
/**
* Frees the <CODE>id</CODE> making it a defragged id that will be
* returned by next id before any new id (that hasn't been used yet) is
* returned.
* <p>
* This method will throw an <CODE>IOException</CODE> if id is negative or
* if id is greater than the highest returned id. However as stated in the
* class documentation above the id isn't validated to see if it really is
* free.
*
* @param id
* The id to be made available again
*/
@Override
public synchronized void freeId( long id )
{
if ( id == INTEGER_MINUS_ONE )
{
return;
}
if ( fileChannel == null )
{
throw new IllegalStateException( "Generator closed " + fileName );
}
if ( id < 0 || id >= highId.get() )
{
throw new IllegalArgumentException( "Illegal id[" + id + "]" );
}
releasedIdList.add( id );
defraggedIdCount++;
if ( releasedIdList.size() >= grabSize )
{
writeIdBatch( ByteBuffer.allocate( grabSize*8 ) );
}
}
/**
* Closes the id generator flushing defragged ids in memory to file. The
* file will be truncated to the minimal size required to hold all defragged
* ids and it will be marked as clean (not sticky).
* <p>
* An invoke to the <CODE>nextId</CODE> or <CODE>freeId</CODE> after
* this method has been invoked will result in an <CODE>IOException</CODE>
* since the highest returned id has been set to a negative value.
*/
@Override
public synchronized void close()
{
if ( highId.get() == -1 )
{
return;
}
// write out lists
ByteBuffer writeBuffer = ByteBuffer.allocate( grabSize*8 );
if ( !releasedIdList.isEmpty() )
{
writeIdBatch( writeBuffer );
}
if ( !idsReadFromFile.isEmpty() )
{
while ( !idsReadFromFile.isEmpty() )
{
releasedIdList.add( idsReadFromFile.removeFirst() );
}
writeIdBatch( writeBuffer );
}
try
{
ByteBuffer buffer = ByteBuffer.allocate( HEADER_SIZE );
writeHeader( buffer );
defragReusableIdsInFile( writeBuffer );
fileChannel.force( false );
markAsCleanlyClosed( buffer );
// flush and close
fileChannel.force( false );
fileChannel.close();
fileChannel = null;
// make this generator unusable
highId.set( -1 );
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to close id generator " + fileName, e );
}
}
private void markAsCleanlyClosed( ByteBuffer buffer ) throws IOException
{
// remove sticky
buffer.clear();
buffer.put( CLEAN_GENERATOR );
buffer.limit( 1 );
buffer.flip();
fileChannel.position( 0 );
fileChannel.write( buffer );
}
private void defragReusableIdsInFile( ByteBuffer writeBuffer ) throws IOException
{
if ( readPosition > HEADER_SIZE )
{
long writePosition = HEADER_SIZE;
long position = Math.min( readPosition, maxReadPosition );
int bytesRead;
do
{
writeBuffer.clear();
fileChannel.position( position );
bytesRead = fileChannel.read( writeBuffer );
position += bytesRead;
writeBuffer.flip();
fileChannel.position( writePosition );
writePosition += fileChannel.write( writeBuffer );
}
while ( bytesRead > 0 );
// truncate
fileChannel.truncate( writePosition );
}
}
private void writeHeader( ByteBuffer buffer ) throws IOException
{
fileChannel.position( 0 );
buffer.put( STICKY_GENERATOR ).putLong( highId.get() );
buffer.flip();
fileChannel.write( buffer );
}
public static void createGenerator( FileSystemAbstraction fs, File fileName )
{
createGenerator( fs, fileName, 0 );
}
/**
* Creates a new id generator.
*
* @param fileName
* The name of the id generator
*/
public static void createGenerator( FileSystemAbstraction fs, File fileName, long highId )
{
// sanity checks
if ( fs == null )
{
throw new IllegalArgumentException( "Null filesystem" );
}
if ( fileName == null )
{
throw new IllegalArgumentException( "Null filename" );
}
if ( fs.fileExists( fileName ) )
{
throw new IllegalStateException( "Can't create IdGeneratorFile["
+ fileName + "], file already exists" );
}
try
{
StoreChannel channel = fs.create( fileName );
// write the header
ByteBuffer buffer = ByteBuffer.allocate( HEADER_SIZE );
buffer.put( CLEAN_GENERATOR ).putLong( highId ).flip();
channel.write( buffer );
channel.force( false );
channel.close();
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to create id generator" + fileName, e );
}
}
// initialize the id generator and performs a simple validation
private synchronized void initGenerator()
{
try
{
fileChannel = fs.open( fileName, "rw" );
ByteBuffer buffer = ByteBuffer.allocate( HEADER_SIZE );
readHeader( buffer );
markAsSticky( buffer );
fileChannel.position( HEADER_SIZE );
maxReadPosition = fileChannel.size();
defraggedIdCount = (int) (maxReadPosition - HEADER_SIZE) / 8;
readIdBatch();
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to init id generator " + fileName, e );
}
}
private void markAsSticky( ByteBuffer buffer ) throws IOException
{
buffer.clear();
buffer.put( STICKY_GENERATOR ).limit( 1 ).flip();
fileChannel.position( 0 );
fileChannel.write( buffer );
}
private void readHeader( ByteBuffer buffer ) throws IOException
{
readPosition = fileChannel.read( buffer );
if ( readPosition != HEADER_SIZE )
{
fileChannel.close();
throw new InvalidIdGeneratorException(
"Unable to read header, bytes read: " + readPosition );
}
buffer.flip();
byte storageStatus = buffer.get();
if ( storageStatus != CLEAN_GENERATOR )
{
fileChannel.close();
throw new InvalidIdGeneratorException( "Sticky generator[ " +
fileName + "] delete this id file and build a new one" );
}
this.highId.set( buffer.getLong() );
}
private void readIdBatch()
{
if ( !canReadMoreIdBatches() )
return;
try
{
int howMuchToRead = (int) Math.min( grabSize*8, maxReadPosition-readPosition );
ByteBuffer readBuffer = ByteBuffer.allocate( howMuchToRead );
fileChannel.position( readPosition );
int bytesRead = fileChannel.read( readBuffer );
assert fileChannel.position() <= maxReadPosition;
readPosition += bytesRead;
readBuffer.flip();
assert (bytesRead % 8) == 0;
int idsRead = bytesRead / 8;
defraggedIdCount -= idsRead;
for ( int i = 0; i < idsRead; i++ )
{
long id = readBuffer.getLong();
if ( id != INTEGER_MINUS_ONE )
{
idsReadFromFile.add( id );
}
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Failed reading defragged id batch", e );
}
}
// writes a batch of defragged ids to file
private void writeIdBatch( ByteBuffer writeBuffer )
{
// position at end
try
{
fileChannel.position( fileChannel.size() );
writeBuffer.clear();
while ( !releasedIdList.isEmpty() )
{
long id = releasedIdList.removeFirst();
if ( id == INTEGER_MINUS_ONE )
{
continue;
}
writeBuffer.putLong( id );
if ( writeBuffer.position() == writeBuffer.capacity() )
{
writeBuffer.flip();
while ( writeBuffer.hasRemaining() )
{
fileChannel.write( writeBuffer );
}
writeBuffer.clear();
}
}
writeBuffer.flip();
while ( writeBuffer.hasRemaining() )
{
fileChannel.write( writeBuffer );
}
// position for next readIdBatch
fileChannel.position( readPosition );
if ( aggressiveReuse )
maxReadPosition = fileChannel.size();
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to write defragged id " + " batch", e );
}
}
/**
* Utility method that will dump all defragged id's and the "high id" to
* console. Do not call while running store using this id generator since it
* could corrupt the id generator (not thread safe). This method will close
* the id generator after being invoked.
*/
public synchronized void dumpFreeIds()
{
while ( canReadMoreIdBatches() )
{
readIdBatch();
}
for ( Long id : idsReadFromFile )
{
System.out.print( " " + id );
}
System.out.println( "\nNext free id: " + highId );
close();
}
@Override
public synchronized long getNumberOfIdsInUse()
{
return highId.get() - defraggedIdCount;
}
@Override
public long getDefragCount()
{
return defraggedIdCount;
}
public void clearFreeIds()
{
releasedIdList.clear();
idsReadFromFile.clear();
defraggedIdCount = -1;
try
{
truncateFile( fileChannel, HEADER_SIZE );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
@Override
public void delete()
{
if ( highId.get() != -1 )
{
throw new RuntimeException( "Must be closed to delete" );
}
if ( !fs.deleteFile( fileName ) )
{
throw new UnderlyingStorageException( "Unable to delete id generator " + fileName );
}
}
@Override
public String toString()
{
return "IdGeneratorImpl " + hashCode() + " [highId=" + highId + ", defragged=" + defraggedIdCount + ", fileName="
+ fileName + ", max=" + max + ", aggressive=" + aggressiveReuse + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_IdGeneratorImpl.java
|
1,439
|
private static class WindowsFileLock extends FileLock
{
private final File lockFile;
private final FileChannel fileChannel;
private final java.nio.channels.FileLock fileChannelLock;
public WindowsFileLock( File lockFile, FileChannel fileChannel, java.nio.channels.FileLock lock )
{
this.lockFile = lockFile;
this.fileChannel = fileChannel;
this.fileChannelLock = lock;
}
@Override
public void release() throws IOException
{
try
{
fileChannelLock.release();
}
finally
{
try
{
fileChannel.close();
}
finally
{
if ( !lockFile.delete() )
{
throw new IOException( "Couldn't delete lock file " + lockFile.getAbsolutePath() );
}
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_FileLock.java
|
1,440
|
private static class PlaceboFileLock extends FileLock
{
@Override
public void release() throws IOException
{
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_FileLock.java
|
1,441
|
private static class DoubleFileLock extends FileLock
{
private final FileLock regular;
private final FileLock extra;
DoubleFileLock( FileLock regular, FileLock extra )
{
this.regular = regular;
this.extra = extra;
}
@Override
public void release() throws IOException
{
regular.release();
extra.release();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_FileLock.java
|
1,442
|
{
@Override
public void release() throws IOException
{
lock.release();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_FileLock.java
|
1,443
|
public abstract class FileLock
{
private static FileLock wrapFileChannelLock( StoreChannel channel ) throws IOException
{
final java.nio.channels.FileLock lock = channel.tryLock();
if ( lock == null )
{
throw new IOException( "Unable to lock " + channel );
}
return new FileLock()
{
@Override
public void release() throws IOException
{
lock.release();
}
};
}
public static FileLock getOsSpecificFileLock( File fileName, StoreChannel channel )
throws IOException
{
if ( Settings.osIsWindows() )
{
/*
* We need to grab only one lock for the whole store. Even though every store will try to grab one
* we will honor only the top level, dedicated store lock. This has the benefit that older versions of
* Neo4j that do not have a dedicated locker still lock on the parent file of neostore so this will still
* block when new instances are started on top of in use older stores and vice versa.
*/
if ( fileName.getName().equals( StoreLocker.STORE_LOCK_FILENAME ) )
{
return getLockFileBasedFileLock( fileName.getParentFile() );
}
// For the rest just return placebo locks
return new PlaceboFileLock();
}
else if ( fileName.getName().equals( NeoStore.DEFAULT_NAME ) )
{
// Lock the file
FileLock regular = wrapFileChannelLock( channel );
// Lock the parent as well
boolean success = false;
try
{
FileLock extra = getLockFileBasedFileLock( fileName.getParentFile() );
success = true;
return new DoubleFileLock( regular, extra );
}
finally
{
if ( !success )
{ // The parent lock failed, so unlock the regular too
regular.release();
}
}
}
else
{
return wrapFileChannelLock( channel );
}
}
private static FileLock getLockFileBasedFileLock( File storeDir ) throws IOException
{
File lockFile = new File( storeDir, "lock" );
if ( !lockFile.exists() )
{
if ( !lockFile.createNewFile() )
{
throw new IOException( "Couldn't create lock file " + lockFile.getAbsolutePath() );
}
}
FileChannel fileChannel = new RandomAccessFile( lockFile, "rw" ).getChannel();
java.nio.channels.FileLock fileChannelLock = null;
try
{
fileChannelLock = fileChannel.tryLock();
}
catch ( OverlappingFileLockException e )
{
// OK, let fileChannelLock continue to be null and we'll deal with it below
}
if ( fileChannelLock == null )
{
fileChannel.close();
throw new IOException( "Couldn't lock lock file " + lockFile.getAbsolutePath() +
" because another process already holds the lock." );
}
return new WindowsFileLock( lockFile, fileChannel, fileChannelLock );
}
public abstract void release() throws IOException;
private static class PlaceboFileLock extends FileLock
{
@Override
public void release() throws IOException
{
}
}
private static class DoubleFileLock extends FileLock
{
private final FileLock regular;
private final FileLock extra;
DoubleFileLock( FileLock regular, FileLock extra )
{
this.regular = regular;
this.extra = extra;
}
@Override
public void release() throws IOException
{
regular.release();
extra.release();
}
}
private static class WindowsFileLock extends FileLock
{
private final File lockFile;
private final FileChannel fileChannel;
private final java.nio.channels.FileLock fileChannelLock;
public WindowsFileLock( File lockFile, FileChannel fileChannel, java.nio.channels.FileLock lock )
{
this.lockFile = lockFile;
this.fileChannel = fileChannel;
this.fileChannelLock = lock;
}
@Override
public void release() throws IOException
{
try
{
fileChannelLock.release();
}
finally
{
try
{
fileChannel.close();
}
finally
{
if ( !lockFile.delete() )
{
throw new IOException( "Couldn't delete lock file " + lockFile.getAbsolutePath() );
}
}
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_FileLock.java
|
1,444
|
public class ExistingThenNewRecordAllocatorTest
{
@Test
public void shouldUseExistingRecordsThenAllocateNewOnes() throws Exception
{
// given
IdSequence mock = mock( IdSequence.class );
when( mock.nextId() ).thenReturn( 3L ).thenReturn( 4L );
ExistingThenNewRecordAllocator allocator = new ExistingThenNewRecordAllocator(
mock( DynamicBlockSize.class ), mock );
Iterator<DynamicRecord> existing = asList( new DynamicRecord( 1 ), new DynamicRecord( 2 ) ).iterator();
// when
DynamicRecord record1 = allocator.nextUsedRecordOrNew( existing );
DynamicRecord record2 = allocator.nextUsedRecordOrNew( existing );
DynamicRecord record3 = allocator.nextUsedRecordOrNew( existing );
DynamicRecord record4 = allocator.nextUsedRecordOrNew( existing );
// then
assertEquals( 1, record1.getId() );
assertEquals( 2, record2.getId() );
assertEquals( 3, record3.getId() );
assertEquals( 4, record4.getId() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_ExistingThenNewRecordAllocatorTest.java
|
1,445
|
class ExistingThenNewRecordAllocator implements DynamicRecordAllocator
{
private final DynamicBlockSize blockSize;
private final IdSequence idSequence;
ExistingThenNewRecordAllocator( DynamicBlockSize blockSize, IdSequence idSequence )
{
this.blockSize = blockSize;
this.idSequence = idSequence;
}
public DynamicRecord nextUsedRecordOrNew( Iterator<DynamicRecord> recordsToUseFirst )
{
DynamicRecord record;
if ( recordsToUseFirst.hasNext() )
{
record = recordsToUseFirst.next();
if ( !record.inUse() )
{
record.setCreated();
}
}
else
{
record = new DynamicRecord( idSequence.nextId() );
record.setCreated();
}
record.setInUse( true );
return record;
}
@Override
public int dataSize()
{
return blockSize.getBlockSize() - AbstractDynamicStore.BLOCK_HEADER_SIZE;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_ExistingThenNewRecordAllocator.java
|
1,446
|
public class DynamicStringStore extends AbstractDynamicStore
{
// store version, each store ends with this string (byte encoded)
public static final String TYPE_DESCRIPTOR = "StringPropertyStore";
public static final String VERSION = buildTypeDescriptorAndVersion( TYPE_DESCRIPTOR );
public DynamicStringStore( File fileName, Config configuration, IdType idType,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger)
{
super( fileName, configuration, idType, idGeneratorFactory, windowPoolFactory,
fileSystemAbstraction, stringLogger);
}
@Override
public <FAILURE extends Exception> void accept( RecordStore.Processor<FAILURE> processor, DynamicRecord record ) throws FAILURE
{
processor.processString( this, record, idType );
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DynamicStringStore.java
|
1,447
|
public class DynamicRecord extends Abstract64BitRecord
{
private static final int MAX_BYTES_IN_TO_STRING = 8, MAX_CHARS_IN_TO_STRING = 16;
private byte[] data = null;
private int length;
private long nextBlock = Record.NO_NEXT_BLOCK.intValue();
private int type;
private boolean startRecord = true;
public static DynamicRecord dynamicRecord( long id, boolean inUse )
{
DynamicRecord record = new DynamicRecord( id );
record.setInUse( inUse );
return record;
}
public static DynamicRecord dynamicRecord( long id, boolean inUse, boolean isStartRecord, long nextBlock, int type,
byte [] data )
{
DynamicRecord record = new DynamicRecord( id );
record.setInUse( inUse );
record.setStartRecord( isStartRecord );
record.setNextBlock( nextBlock );
record.setType( type );
record.setData( data );
return record;
}
public DynamicRecord( long id )
{
super( id );
}
public void setStartRecord( boolean startRecord )
{
this.startRecord = startRecord;
}
public boolean isStartRecord()
{
return startRecord;
}
public int getType()
{
return type;
}
public void setType( int type )
{
this.type = type;
}
public boolean isLight()
{
return data == null;
}
public void setLength( int length )
{
this.length = length;
}
@Override
public void setInUse( boolean inUse )
{
super.setInUse( inUse );
if ( !inUse )
{
data = null;
}
}
public void setInUse( boolean inUse, int type )
{
this.type = type;
this.setInUse( inUse );
}
public void setData( byte[] data )
{
this.length = data.length;
this.data = data;
}
public int getLength()
{
return length;
}
public byte[] getData()
{
return data;
}
public long getNextBlock()
{
return nextBlock;
}
public void setNextBlock( long nextBlock )
{
this.nextBlock = nextBlock;
}
@Override
public String toString()
{
StringBuilder buf = new StringBuilder();
buf.append( "DynamicRecord[" )
.append( getId() )
.append( ",used=" ).append(inUse() ).append( "," )
.append( "light=" ).append( isLight() )
.append("(" ).append( length ).append( "),type=" );
PropertyType type = PropertyType.getPropertyType( this.type << 24, true );
if ( type == null ) buf.append( this.type ); else buf.append( type.name() );
buf.append( ",data=" );
if ( data != null )
{
if ( type == PropertyType.STRING && data.length <= MAX_CHARS_IN_TO_STRING )
{
buf.append( '"' );
buf.append( PropertyStore.decodeString( data ) );
buf.append( "\"," );
}
else
{
buf.append( "byte[" );
if ( data.length <= MAX_BYTES_IN_TO_STRING )
{
for ( int i = 0; i < data.length; i++ )
{
if (i != 0) buf.append( ',' );
buf.append( data[i] );
}
}
else
{
buf.append( "size=" ).append( data.length );
}
buf.append( "]," );
}
}
else
{
buf.append( "null," );
}
buf.append( "start=" ).append( startRecord );
buf.append( ",next=" ).append( nextBlock ).append( "]" );
return buf.toString();
}
@Override
public DynamicRecord clone()
{
DynamicRecord result = new DynamicRecord( getLongId() );
if ( data != null )
result.data = data.clone();
result.setInUse( inUse() );
result.length = length;
result.nextBlock = nextBlock;
result.type = type;
result.startRecord = startRecord;
return result;
}
@Override
public boolean equals( Object obj )
{
if ( !( obj instanceof DynamicRecord ) )
return false;
return ((DynamicRecord) obj).getId() == getId();
}
@Override
public int hashCode()
{
long id = getId();
return (int) (( id >>> 32 ) ^ id );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DynamicRecord.java
|
1,448
|
public class DynamicArrayStore extends AbstractDynamicStore
{
static final int NUMBER_HEADER_SIZE = 3;
static final int STRING_HEADER_SIZE = 5;
// store version, each store ends with this string (byte encoded)
public static final String TYPE_DESCRIPTOR = "ArrayPropertyStore";
public static final String VERSION = buildTypeDescriptorAndVersion( TYPE_DESCRIPTOR );
public DynamicArrayStore(File fileName, Config configuration, IdType idType,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger)
{
super( fileName, configuration, idType, idGeneratorFactory, windowPoolFactory,
fileSystemAbstraction, stringLogger);
}
@Override
public <FAILURE extends Exception> void accept( RecordStore.Processor<FAILURE> processor, DynamicRecord record ) throws FAILURE
{
processor.processArray( this, record );
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
public static Collection<DynamicRecord> allocateFromNumbers( Object array, Iterator<DynamicRecord> recordsToUseFirst,
DynamicRecordAllocator recordAllocator )
{
Class<?> componentType = array.getClass().getComponentType();
boolean isPrimitiveByteArray = componentType.equals( Byte.TYPE );
boolean isByteArray = componentType.equals( Byte.class ) || isPrimitiveByteArray;
ShortArray type = ShortArray.typeOf( array );
if ( type == null ) throw new IllegalArgumentException( array + " not a valid array type." );
int arrayLength = Array.getLength( array );
int requiredBits = isByteArray ? Byte.SIZE : type.calculateRequiredBitsForArray( array, arrayLength);
int totalBits = requiredBits*arrayLength;
int numberOfBytes = (totalBits-1)/8+1;
int bitsUsedInLastByte = totalBits%8;
bitsUsedInLastByte = bitsUsedInLastByte == 0 ? 8 : bitsUsedInLastByte;
numberOfBytes += NUMBER_HEADER_SIZE; // type + rest + requiredBits header. TODO no need to use full bytes
byte[] bytes;
if ( isByteArray )
{
bytes = new byte[NUMBER_HEADER_SIZE+ arrayLength];
bytes[0] = (byte) type.intValue();
bytes[1] = (byte) bitsUsedInLastByte;
bytes[2] = (byte) requiredBits;
if ( isPrimitiveByteArray ) arraycopy( array, 0, bytes, NUMBER_HEADER_SIZE, arrayLength );
else
{
Byte[] source = (Byte[]) array;
for ( int i = 0; i < source.length; i++ ) bytes[NUMBER_HEADER_SIZE+i] = source[i];
}
}
else
{
Bits bits = Bits.bits( numberOfBytes );
bits.put( (byte)type.intValue() );
bits.put( (byte)bitsUsedInLastByte );
bits.put( (byte)requiredBits );
type.writeAll(array, arrayLength,requiredBits,bits);
bytes = bits.asBytes();
}
return allocateRecordsFromBytes( bytes, recordsToUseFirst, recordAllocator );
}
private static Collection<DynamicRecord> allocateFromString( String[] array, Iterator<DynamicRecord> recordsToUseFirst,
DynamicRecordAllocator recordAllocator )
{
List<byte[]> stringsAsBytes = new ArrayList<>();
int totalBytesRequired = STRING_HEADER_SIZE; // 1b type + 4b array length
for ( String string : array )
{
byte[] bytes = PropertyStore.encodeString( string );
stringsAsBytes.add( bytes );
totalBytesRequired += 4/*byte[].length*/ + bytes.length;
}
ByteBuffer buf = ByteBuffer.allocate( totalBytesRequired );
buf.put( PropertyType.STRING.byteValue() );
buf.putInt( array.length );
for ( byte[] stringAsBytes : stringsAsBytes )
{
buf.putInt( stringAsBytes.length );
buf.put( stringAsBytes );
}
return allocateRecordsFromBytes( buf.array(), recordsToUseFirst, recordAllocator );
}
public Collection<DynamicRecord> allocateRecords( Object array )
{
return allocateRecords( array, Collections.<DynamicRecord>emptyList().iterator() );
}
public Collection<DynamicRecord> allocateRecords( Object array, Iterator<DynamicRecord> recordsToUseFirst )
{
if ( !array.getClass().isArray() )
{
throw new IllegalArgumentException( array + " not an array" );
}
Class<?> type = array.getClass().getComponentType();
if ( type.equals( String.class ) )
{
return allocateFromString( (String[]) array, recordsToUseFirst, recordAllocator );
}
else
{
return allocateFromNumbers( array, recordsToUseFirst, recordAllocator );
}
}
public static Object getRightArray( Pair<byte[],byte[]> data )
{
byte[] header = data.first();
byte[] bArray = data.other();
byte typeId = header[0];
if ( typeId == PropertyType.STRING.intValue() )
{
ByteBuffer headerBuffer = ByteBuffer.wrap( header, 1/*skip the type*/, header.length-1 );
int arrayLength = headerBuffer.getInt();
String[] result = new String[arrayLength];
ByteBuffer dataBuffer = ByteBuffer.wrap( bArray );
for ( int i = 0; i < arrayLength; i++ )
{
int byteLength = dataBuffer.getInt();
byte[] stringByteArray = new byte[byteLength];
dataBuffer.get( stringByteArray );
result[i] = PropertyStore.decodeString( stringByteArray );
}
return result;
}
else
{
ShortArray type = ShortArray.typeOf( typeId );
int bitsUsedInLastByte = header[1];
int requiredBits = header[2];
if ( requiredBits == 0 )
return type.createEmptyArray();
Object result;
if ( type == ShortArray.BYTE && requiredBits == Byte.SIZE )
{ // Optimization for byte arrays (probably large ones)
result = bArray;
}
else
{ // Fallback to the generic approach, which is a slower
Bits bits = Bits.bitsFromBytes( bArray );
int length = (bArray.length*8-(8-bitsUsedInLastByte))/requiredBits;
result = type.createArray(length, bits, requiredBits);
}
return result;
}
}
public Object getArrayFor( Iterable<DynamicRecord> records )
{
return getRightArray( readFullByteArray( records, PropertyType.ARRAY ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DynamicArrayStore.java
|
1,449
|
class DirectPersistenceWindow extends AbstractPersistenceWindow
{
DirectPersistenceWindow( long position, int recordSize, int totalSize,
StoreChannel channel )
{
super( position, recordSize, totalSize, channel,
ByteBuffer.allocateDirect( totalSize ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DirectPersistenceWindow.java
|
1,450
|
public class DelegatingRecordStore<R extends AbstractBaseRecord> implements RecordStore<R>
{
private final RecordStore<R> delegate;
public DelegatingRecordStore( RecordStore<R> delegate )
{
this.delegate = delegate;
}
public String toString()
{
return delegate.toString();
}
@Override
public File getStorageFileName()
{
return delegate.getStorageFileName();
}
@Override
public WindowPoolStats getWindowPoolStats()
{
return delegate.getWindowPoolStats();
}
@Override
public long getHighId()
{
return delegate.getHighId();
}
@Override
public long getHighestPossibleIdInUse()
{
return delegate.getHighestPossibleIdInUse();
}
@Override
public long nextId()
{
return delegate.nextId();
}
@Override
public R getRecord( long id )
{
return delegate.getRecord( id );
}
public Long getNextRecordReference( R record )
{
return delegate.getNextRecordReference( record );
}
@Override
public Collection<R> getRecords( long id )
{
return delegate.getRecords( id );
}
public void updateRecord( R record )
{
delegate.updateRecord( record );
}
@Override
public R forceGetRecord( long id )
{
return delegate.forceGetRecord( id );
}
public R forceGetRaw( R record )
{
return delegate.forceGetRaw( record );
}
@Override
public R forceGetRaw( long id )
{
return delegate.forceGetRaw( id );
}
public void forceUpdateRecord( R record )
{
delegate.forceUpdateRecord( record );
}
public <FAILURE extends Exception> void accept( Processor<FAILURE> processor, R record ) throws FAILURE
{
delegate.accept( processor, record );
}
@Override
public int getRecordSize()
{
return delegate.getRecordSize();
}
@Override
public int getRecordHeaderSize()
{
return delegate.getRecordHeaderSize();
}
@Override
public void close()
{
delegate.close();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DelegatingRecordStore.java
|
1,451
|
public class DefaultWindowPoolFactory implements WindowPoolFactory
{
@Override
public WindowPool create( File storageFileName, int recordSize, StoreChannel fileChannel, Config configuration,
StringLogger log )
{
return new PersistenceWindowPool( storageFileName, recordSize, fileChannel,
calculateMappedMemory( configuration, storageFileName ),
configuration.get( CommonAbstractStore.Configuration.use_memory_mapped_buffers ),
isReadOnly( configuration ) && !isBackupSlave( configuration ),
new ConcurrentHashMap<Long, PersistenceRow>(), BrickElementFactory.DEFAULT, log );
}
private boolean isBackupSlave( Config configuration )
{
return configuration.get( CommonAbstractStore.Configuration.backup_slave );
}
private boolean isReadOnly( Config configuration )
{
return configuration.get( CommonAbstractStore.Configuration.read_only );
}
/**
* Returns memory assigned for
* {@link MappedPersistenceWindow memory mapped windows} in bytes. The
* configuration map passed in one constructor is checked for an entry with
* this stores name.
*
* @param config Map of configuration parameters
* @param storageFileName Name of the file on disk
* @return The number of bytes memory mapped windows this store has
*/
private long calculateMappedMemory( Config config, File storageFileName )
{
Long mem = config.get( memoryMappingSetting( storageFileName.getName() ) );
if ( mem == null )
{
mem = 0L;
}
return mem;
}
public static Setting<Long> memoryMappingSetting( String fileName )
{
return setting( fileName + ".mapped_memory", Settings.BYTES, Settings.NO_DEFAULT );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DefaultWindowPoolFactory.java
|
1,452
|
public class DataInconsistencyError extends Error
{
public DataInconsistencyError( String message )
{
super(message);
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_DataInconsistencyError.java
|
1,453
|
public static abstract class Configuration
{
public static final Setting<File> store_dir = InternalAbstractGraphDatabase.Configuration.store_dir;
public static final Setting<File> neo_store = InternalAbstractGraphDatabase.Configuration.neo_store;
public static final Setting<Boolean> read_only = GraphDatabaseSettings.read_only;
public static final Setting<Boolean> backup_slave = GraphDatabaseSettings.backup_slave;
public static final Setting<Boolean> use_memory_mapped_buffers = GraphDatabaseSettings.use_memory_mapped_buffers;
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_CommonAbstractStore.java
|
1,454
|
public abstract class CommonAbstractStore implements IdSequence
{
public static abstract class Configuration
{
public static final Setting<File> store_dir = InternalAbstractGraphDatabase.Configuration.store_dir;
public static final Setting<File> neo_store = InternalAbstractGraphDatabase.Configuration.neo_store;
public static final Setting<Boolean> read_only = GraphDatabaseSettings.read_only;
public static final Setting<Boolean> backup_slave = GraphDatabaseSettings.backup_slave;
public static final Setting<Boolean> use_memory_mapped_buffers = GraphDatabaseSettings.use_memory_mapped_buffers;
}
public static final String ALL_STORES_VERSION = "v0.A.1";
public static final String UNKNOWN_VERSION = "Uknown";
protected Config configuration;
private final IdGeneratorFactory idGeneratorFactory;
private final WindowPoolFactory windowPoolFactory;
protected FileSystemAbstraction fileSystemAbstraction;
protected final File storageFileName;
protected final IdType idType;
protected StringLogger stringLogger;
private IdGenerator idGenerator = null;
private StoreChannel fileChannel = null;
private WindowPool windowPool;
private boolean storeOk = true;
private Throwable causeOfStoreNotOk;
private FileLock fileLock;
private boolean readOnly = false;
private boolean backupSlave = false;
private long highestUpdateRecordId = -1;
/**
* Opens and validates the store contained in <CODE>fileName</CODE>
* loading any configuration defined in <CODE>config</CODE>. After
* validation the <CODE>initStorage</CODE> method is called.
* <p>
* If the store had a clean shutdown it will be marked as <CODE>ok</CODE>
* and the {@link #getStoreOk()} method will return true.
* If a problem was found when opening the store the {@link #makeStoreOk()}
* must be invoked.
*
* throws IOException if the unable to open the storage or if the
* <CODE>initStorage</CODE> method fails
*
* @param idType The Id used to index into this store
*/
public CommonAbstractStore( File fileName, Config configuration, IdType idType,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger )
{
this.storageFileName = fileName;
this.configuration = configuration;
this.idGeneratorFactory = idGeneratorFactory;
this.windowPoolFactory = windowPoolFactory;
this.fileSystemAbstraction = fileSystemAbstraction;
this.idType = idType;
this.stringLogger = stringLogger;
try
{
checkStorage();
checkVersion(); // Overriden in NeoStore
loadStorage();
}
catch ( Exception e )
{
releaseFileLockAndCloseFileChannel();
throw launderedException( e );
}
}
public String getTypeAndVersionDescriptor()
{
return buildTypeDescriptorAndVersion( getTypeDescriptor() );
}
public static String buildTypeDescriptorAndVersion( String typeDescriptor )
{
return typeDescriptor + " " + ALL_STORES_VERSION;
}
protected long longFromIntAndMod( long base, long modifier )
{
return modifier == 0 && base == IdGeneratorImpl.INTEGER_MINUS_ONE ? -1 : base | modifier;
}
/**
* Returns the type and version that identifies this store.
*
* @return This store's implementation type and version identifier
*/
public abstract String getTypeDescriptor();
protected void checkStorage()
{
readOnly = configuration.get( Configuration.read_only );
backupSlave = configuration.get( Configuration.backup_slave );
if ( !fileSystemAbstraction.fileExists( storageFileName ) )
{
throw new StoreNotFoundException( "No such store[" + storageFileName + "] in " + fileSystemAbstraction );
}
try
{
this.fileChannel = fileSystemAbstraction.open( storageFileName, readOnly ? "r" : "rw" );
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to open file " + storageFileName, e );
}
try
{
if ( !readOnly || backupSlave )
{
this.fileLock = fileSystemAbstraction.tryLock( storageFileName, fileChannel );
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to lock store[" + storageFileName + "]", e );
}
catch ( OverlappingFileLockException e )
{
throw new IllegalStateException( "Unable to lock store [" + storageFileName +
"], this is usually caused by another Neo4j kernel already running in " +
"this JVM for this particular store" );
}
}
protected void checkVersion()
{
try
{
verifyCorrectTypeDescriptorAndVersion();
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to check version " + getStorageFileName(), e );
}
}
/**
* Should do first validation on store validating stuff like version and id
* generator. This method is called by constructors.
*/
protected void loadStorage()
{
try
{
readAndVerifyBlockSize();
verifyFileSizeAndTruncate();
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to load storage " + getStorageFileName(), e );
}
loadIdGenerator();
this.windowPool = windowPoolFactory.create( getStorageFileName(), getEffectiveRecordSize(),
getFileChannel(), configuration, stringLogger );
}
protected abstract int getEffectiveRecordSize();
protected abstract void verifyFileSizeAndTruncate() throws IOException;
protected abstract void readAndVerifyBlockSize() throws IOException;
private void loadIdGenerator()
{
try
{
if ( !isReadOnly() || isBackupSlave() )
{
openIdGenerator();
}
else
{
openReadOnlyIdGenerator( getEffectiveRecordSize() );
}
}
catch ( InvalidIdGeneratorException e )
{
setStoreNotOk( e );
}
finally
{
if ( !getStoreOk() )
{
if ( stringLogger != null )
{
stringLogger.logMessage( getStorageFileName() + " non clean shutdown detected", true );
}
}
}
}
protected void verifyCorrectTypeDescriptorAndVersion() throws IOException
{
String expectedTypeDescriptorAndVersion = getTypeAndVersionDescriptor();
int length = UTF8.encode( expectedTypeDescriptorAndVersion ).length;
byte bytes[] = new byte[length];
ByteBuffer buffer = ByteBuffer.wrap( bytes );
long fileSize = getFileChannel().size();
if ( fileSize >= length )
{
getFileChannel().position( fileSize - length );
}
else if ( !isReadOnly() )
{
setStoreNotOk( new IllegalStateException(
"Invalid file size " + fileSize + " for " + this + ". Expected " + length + " or bigger" ) );
return;
}
getFileChannel().read( buffer );
String foundTypeDescriptorAndVersion = UTF8.decode( bytes );
if ( !expectedTypeDescriptorAndVersion.equals( foundTypeDescriptorAndVersion ) && !isReadOnly() )
{
if ( foundTypeDescriptorAndVersion.startsWith( getTypeDescriptor() ) )
{
throw new NotCurrentStoreVersionException( ALL_STORES_VERSION, foundTypeDescriptorAndVersion, "",
false );
}
else
{
setStoreNotOk( new IllegalStateException(
"Unexpected version " + foundTypeDescriptorAndVersion + ", expected " +
expectedTypeDescriptorAndVersion ) );
}
}
}
/** Should rebuild the id generator from scratch. */
protected abstract void rebuildIdGenerator();
/**
* This method should close/release all resources that the implementation of
* this store has allocated and is called just before the <CODE>close()</CODE>
* method returns. Override this method to clean up stuff the constructor.
* <p>
* This default implementation does nothing.
*/
protected void closeStorage()
{
}
boolean isReadOnly()
{
return readOnly;
}
boolean isBackupSlave()
{
return backupSlave;
}
/**
* Marks this store as "not ok".
*/
protected void setStoreNotOk( Throwable cause )
{
if ( readOnly && !isBackupSlave() )
{
throw new UnderlyingStorageException(
"Cannot start up on non clean store as read only" );
}
storeOk = false;
causeOfStoreNotOk = cause;
}
/**
* If store is "not ok" <CODE>false</CODE> is returned.
*
* @return True if this store is ok
*/
protected boolean getStoreOk()
{
return storeOk;
}
/**
* Returns the next id for this store's {@link IdGenerator}.
*
* @return The next free id
*/
@Override public long nextId()
{
return idGenerator.nextId();
}
/**
* Frees an id for this store's {@link IdGenerator}.
*
* @param id The id to free
*/
public void freeId( long id )
{
idGenerator.freeId( id );
}
/**
* Return the highest id in use.
*
* @return The highest id in use.
*/
public long getHighId()
{
long genHighId = idGenerator != null ? idGenerator.getHighId() : -1;
long updateHighId = highestUpdateRecordId;
if ( updateHighId > genHighId )
{
return updateHighId;
}
return genHighId;
}
/**
* Sets the highest id in use (use this when rebuilding id generator).
*
* @param highId The high id to set.
*/
public void setHighId( long highId )
{
if ( idGenerator != null )
{
idGenerator.setHighId( highId );
}
}
/**
* If store is not ok a call to this method will rebuild the {@link
* IdGenerator} used by this store and if successful mark it as
* <CODE>ok</CODE>.
*/
public void makeStoreOk()
{
if ( !storeOk )
{
if ( readOnly && !backupSlave )
{
throw new ReadOnlyDbException();
}
rebuildIdGenerator();
storeOk = true;
causeOfStoreNotOk = null;
}
}
public void rebuildIdGenerators()
{
if ( readOnly && !backupSlave )
{
throw new ReadOnlyDbException();
}
rebuildIdGenerator();
}
/**
* @return the store directory from config.
*/
protected File getStoreDir()
{
return configuration.get( Configuration.store_dir );
}
/**
* Acquires a {@link PersistenceWindow} for <CODE>position</CODE> and
* operation <CODE>type</CODE>. Window must be released after operation
* has been performed via {@link #releaseWindow(PersistenceWindow)}.
*
* @param position The record position
* @param type The operation type
* @return a persistence window encapsulating the record
*/
protected PersistenceWindow acquireWindow( long position, OperationType type )
{
if ( !isInRecoveryMode() && (position > getHighId() || !storeOk) )
{
throw new InvalidRecordException(
"Position[" + position + "] requested for high id[" + getHighId() + "], store is ok[" + storeOk +
"] recovery[" + isInRecoveryMode() + "]", causeOfStoreNotOk );
}
return windowPool.acquire( position, type );
}
/**
* Releases the window and writes the data (async) if the
* <CODE>window</CODE> was a {@link PersistenceRow}.
*
* @param window The window to be released
*/
protected void releaseWindow( PersistenceWindow window )
{
windowPool.release( window );
}
public void flushAll()
{
windowPool.flushAll();
}
private boolean isRecovered = false;
public boolean isInRecoveryMode()
{
return isRecovered;
}
protected void setRecovered()
{
isRecovered = true;
}
protected void unsetRecovered()
{
isRecovered = false;
}
/**
* Returns the name of this store.
*
* @return The name of this store
*/
public File getStorageFileName()
{
return storageFileName;
}
/** Opens the {@link IdGenerator} used by this store. */
protected void openIdGenerator()
{
idGenerator = openIdGenerator( new File( storageFileName.getPath() + ".id" ), idType.getGrabSize() );
/* MP: 2011-11-23
* There may have been some migration done in the startup process, so if there have been some
* high id registered during, then update id generators. updateHighId does nothing if
* not registerIdFromUpdateRecord have been called.
*/
updateHighId();
}
protected IdGenerator openIdGenerator( File fileName, int grabSize )
{
return idGeneratorFactory
.open( fileSystemAbstraction, fileName, grabSize, getIdType(), figureOutHighestIdInUse() );
}
protected abstract long figureOutHighestIdInUse();
protected void createIdGenerator( File fileName )
{
idGeneratorFactory.create( fileSystemAbstraction, fileName, 0 );
}
protected void openReadOnlyIdGenerator( int recordSize )
{
try
{
idGenerator = new ReadOnlyIdGenerator( storageFileName + ".id",
fileChannel.size() / recordSize );
}
catch ( IOException e )
{
throw new UnderlyingStorageException( e );
}
}
/** Closed the {@link IdGenerator} used by this store */
protected void closeIdGenerator()
{
if ( idGenerator != null )
{
idGenerator.close();
}
}
/**
* Closes this store. This will cause all buffers and channels to be closed.
* Requesting an operation from after this method has been invoked is
* illegal and an exception will be thrown.
* <p>
* This method will start by invoking the {@link #closeStorage} method
* giving the implementing store way to do anything that it needs to do
* before the fileChannel is closed.
*/
public void close()
{
if ( fileChannel == null )
{
return;
}
closeStorage();
if ( windowPool != null )
{
windowPool.close();
windowPool = null;
}
if ( (isReadOnly() && !isBackupSlave()) || idGenerator == null || !storeOk )
{
releaseFileLockAndCloseFileChannel();
return;
}
long highId = idGenerator.getHighId();
int recordSize = -1;
if ( this instanceof AbstractDynamicStore )
{
recordSize = ((AbstractDynamicStore) this).getBlockSize();
}
else if ( this instanceof AbstractStore )
{
recordSize = ((AbstractStore) this).getRecordSize();
}
idGenerator.close();
boolean success = false;
IOException storedIoe = null;
// hack for WINBLOWS
if ( !readOnly || backupSlave )
{
for ( int i = 0; i < 10; i++ )
{
try
{
fileChannel.position( highId * recordSize );
ByteBuffer buffer = ByteBuffer.wrap(
UTF8.encode( getTypeAndVersionDescriptor() ) );
fileChannel.write( buffer );
stringLogger.debug( "Closing " + storageFileName + ", truncating at " + fileChannel.position() +
" vs file size " + fileChannel.size() );
fileChannel.truncate( fileChannel.position() );
fileChannel.force( false );
releaseFileLockAndCloseFileChannel();
success = true;
break;
}
catch ( IOException e )
{
storedIoe = e;
System.gc();
}
}
}
else
{
releaseFileLockAndCloseFileChannel();
success = true;
}
if ( !success )
{
throw new UnderlyingStorageException( "Unable to close store "
+ getStorageFileName(), storedIoe );
}
}
protected void releaseFileLockAndCloseFileChannel()
{
try
{
if ( fileLock != null )
{
fileLock.release();
}
if ( fileChannel != null )
{
fileChannel.close();
}
}
catch ( IOException e )
{
stringLogger.warn( "Could not close [" + storageFileName + "]", e );
}
fileChannel = null;
}
/**
* Returns a <CODE>StoreChannel</CODE> to this storage's file. If
* <CODE>close()</CODE> method has been invoked <CODE>null</CODE> will be
* returned.
*
* @return A file channel to this storage
*/
protected final StoreChannel getFileChannel()
{
return fileChannel;
}
/** @return The highest possible id in use, -1 if no id in use. */
public long getHighestPossibleIdInUse()
{
if ( idGenerator != null )
{
return idGenerator.getHighId() - 1;
}
else
{ // If we ask for this before we've recovered we can only make a best-effort guess
// about the highest possible id in use.
return figureOutHighestIdInUse();
}
}
/** @return The total number of ids in use. */
public long getNumberOfIdsInUse()
{
return idGenerator.getNumberOfIdsInUse();
}
public WindowPoolStats getWindowPoolStats()
{
return windowPool.getStats();
}
public IdType getIdType()
{
return idType;
}
protected void registerIdFromUpdateRecord( long id )
{
if ( isInRecoveryMode() )
{
highestUpdateRecordId = Math.max( highestUpdateRecordId, id + 1 );
}
}
protected void updateHighId()
{
long highId = highestUpdateRecordId;
highestUpdateRecordId = -1;
if ( highId > getHighId() )
{
setHighId( highId );
}
}
public void logVersions( StringLogger.LineLogger logger )
{
logger.logLine( " " + getTypeAndVersionDescriptor() );
}
public void logIdUsage( StringLogger.LineLogger lineLogger )
{
lineLogger.logLine( String.format( " %s: used=%s high=%s",
getTypeDescriptor(), getNumberOfIdsInUse(), getHighestPossibleIdInUse() ) );
}
@Override
public String toString()
{
return getClass().getSimpleName();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_CommonAbstractStore.java
|
1,455
|
public static class Await implements Task
{
private final CountDownLatch latch;
private Await( CountDownLatch latch )
{
this.latch = latch;
}
public Signal signal()
{
return new Signal( latch );
}
public void release()
{
latch.countDown();
}
@Override
public void perform() throws Exception
{
latch.await();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,456
|
{
@Override
public void evaluate() throws Throwable
{
repository = new Repository( description );
List<Throwable> failures = new ArrayList<>();
try
{
base.evaluate();
}
catch ( Throwable failure )
{
failures.add( failure );
}
finally
{
completeThreads( failures );
}
MultipleFailureException.assertEmpty( failures );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,457
|
public class ThreadRepository implements TestRule
{
public interface Task
{
void perform() throws Exception;
}
public interface ThreadInfo
{
StackTraceElement[] getStackTrace();
Object blocker();
Thread.State getState();
}
private Repository repository;
private final long timeout;
private final TimeUnit unit;
public ThreadRepository( long timeout, TimeUnit unit )
{
this.timeout = timeout;
this.unit = unit;
}
public ThreadInfo execute( Task... tasks )
{
return repository.createThread( null, tasks );
}
public ThreadInfo execute( String name, Task... tasks )
{
return repository.createThread( name, tasks );
}
public Signal signal()
{
return new Signal( new CountDownLatch( 1 ) );
}
public Await await()
{
return await( 1 );
}
public Await await( int events )
{
return new Await( new CountDownLatch( events ) );
}
public Events events()
{
return new Events();
}
public static class Signal implements Task
{
private final CountDownLatch latch;
private Signal( CountDownLatch latch )
{
this.latch = latch;
}
public Await await()
{
return new Await( latch );
}
public void awaitNow() throws InterruptedException
{
latch.await();
}
@Override
public void perform() throws Exception
{
latch.countDown();
}
}
public static class Await implements Task
{
private final CountDownLatch latch;
private Await( CountDownLatch latch )
{
this.latch = latch;
}
public Signal signal()
{
return new Signal( latch );
}
public void release()
{
latch.countDown();
}
@Override
public void perform() throws Exception
{
latch.await();
}
}
public class Events
{
private final List<String> collected;
private Events()
{
collected = new CopyOnWriteArrayList<>();
}
public Task trigger( final String event )
{
return new Task()
{
@Override
public void perform() throws Exception
{
collected.add( event );
}
};
}
public void assertInOrder( String... events ) throws Exception
{
try
{
completeThreads();
}
catch ( Error | Exception ok )
{
throw ok;
}
catch ( Throwable throwable )
{
throw new Exception( "Unexpected Throwable", throwable );
}
String[] actual = collected.toArray( new String[events.length] );
assertArrayEquals( events, actual );
}
public List<String> snapshot()
{
return new ArrayList<>( collected );
}
}
@Override
public Statement apply( final Statement base, final Description description )
{
return new Statement()
{
@Override
public void evaluate() throws Throwable
{
repository = new Repository( description );
List<Throwable> failures = new ArrayList<>();
try
{
base.evaluate();
}
catch ( Throwable failure )
{
failures.add( failure );
}
finally
{
completeThreads( failures );
}
MultipleFailureException.assertEmpty( failures );
}
};
}
private void completeThreads() throws Throwable
{
List<Throwable> failures = new ArrayList<>();
completeThreads( failures );
MultipleFailureException.assertEmpty( failures );
}
private void completeThreads( List<Throwable> failures )
{
if ( repository != null )
{
repository.completeAll( failures );
}
repository = null;
}
private class Repository
{
private final Description description;
private int i;
private final List<TaskThread> threads = new ArrayList<>();
Repository( Description description )
{
this.description = description;
}
synchronized TaskThread createThread( String name, Task[] tasks )
{
TaskThread thread = new TaskThread( nextName( name ), tasks );
threads.add( thread );
thread.start();
return thread;
}
private String nextName( String name )
{
return description.getMethodName() + "-" + (++i) + (name == null ? "" : (":" + name));
}
void completeAll( List<Throwable> failures )
{
for ( TaskThread thread : threads )
{
try
{
thread.complete( failures, timeout, unit );
}
catch ( InterruptedException interrupted )
{
failures.add( interrupted );
}
}
}
}
private static class TaskThread extends Thread implements ThreadInfo
{
private final Task[] tasks;
private Exception failure;
TaskThread( String name, Task[] tasks )
{
super( name );
this.tasks = tasks;
}
void complete( List<Throwable> failures, long timeout, TimeUnit unit ) throws InterruptedException
{
join( unit.toMillis( timeout ) );
if ( isAlive() )
{
failures.add( new ThreadStillRunningException( this ) );
}
if ( failure != null )
{
failures.add( failure );
}
}
@Override
public void run()
{
try
{
for ( Task task : tasks )
{
task.perform();
}
}
catch ( Exception e )
{
failure = e;
}
}
@Override
public Object blocker()
{
return getBlocker( this );
}
}
private static class ThreadStillRunningException extends Exception
{
ThreadStillRunningException( TaskThread thread )
{
super( '"' + thread.getName() + "\"; state=" + thread.getState() + "; blockedOn=" + thread.blocker() );
setStackTrace( thread.getStackTrace() );
}
@Override
public synchronized Throwable fillInStackTrace()
{
return this;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ThreadRepository.java
|
1,458
|
{
@Override
public void perform() throws Exception
{
lock.release();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ReentrantLockServiceTest.java
|
1,459
|
private class LockReference extends Lock
{
private final LockedEntity key;
private HANDLE handle;
LockReference( LockedEntity key, HANDLE handle )
{
this.key = key;
this.handle = handle;
}
@Override
public String toString()
{
StringBuilder repr = new StringBuilder( key.getClass().getSimpleName() ).append( '[' );
key.toString( repr );
if ( handle != null )
{
repr.append( "; HELD_BY=" ).append( handle );
}
else
{
repr.append( "; RELEASED" );
}
return repr.append( ']' ).toString();
}
@Override
public void release()
{
if ( handle == null )
{
return;
}
try
{
AbstractLockService.this.release( key, handle );
}
finally
{
handle = null;
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_AbstractLockService.java
|
1,460
|
abstract class AbstractLockService<HANDLE> implements LockService
{
@Override
public Lock acquireNodeLock( long nodeId, LockType type )
{
return lock( new LockedNode( nodeId ) );
}
private Lock lock( LockedEntity key )
{
return new LockReference( key, acquire( key ) );
}
protected abstract HANDLE acquire( LockedEntity key );
protected abstract void release( LockedEntity key, HANDLE handle );
protected static abstract class LockedEntity
{
private LockedEntity()
{
// all instances defined in this class
}
@Override
public final String toString()
{
StringBuilder repr = new StringBuilder( getClass().getSimpleName() ).append( '[' );
toString( repr );
return repr.append( ']' ).toString();
}
abstract void toString( StringBuilder repr );
@Override
public abstract int hashCode();
@Override
public abstract boolean equals( Object obj );
}
private class LockReference extends Lock
{
private final LockedEntity key;
private HANDLE handle;
LockReference( LockedEntity key, HANDLE handle )
{
this.key = key;
this.handle = handle;
}
@Override
public String toString()
{
StringBuilder repr = new StringBuilder( key.getClass().getSimpleName() ).append( '[' );
key.toString( repr );
if ( handle != null )
{
repr.append( "; HELD_BY=" ).append( handle );
}
else
{
repr.append( "; RELEASED" );
}
return repr.append( ']' ).toString();
}
@Override
public void release()
{
if ( handle == null )
{
return;
}
try
{
AbstractLockService.this.release( key, handle );
}
finally
{
handle = null;
}
}
}
static final class LockedNode extends LockedEntity
{
private final long nodeId;
LockedNode( long nodeId )
{
this.nodeId = nodeId;
}
@Override
void toString( StringBuilder repr )
{
repr.append( "id=" ).append( nodeId );
}
@Override
public int hashCode()
{
return (int) (nodeId ^ (nodeId >>> 32));
}
@Override
public boolean equals( Object obj )
{
if ( obj instanceof LockedNode )
{
LockedNode that = (LockedNode) obj;
return this.nodeId == that.nodeId;
}
return false;
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_AbstractLockService.java
|
1,461
|
public class TestIndexProviderStore
{
private File file;
private FileSystemAbstraction fileSystem;
@Before
public void createStore()
{
file = new File( "target/test-data/index-provider-store" );
fileSystem = new DefaultFileSystemAbstraction();
file.mkdirs();
file.delete();
}
@Test
public void lastCommitedTxGetsStoredBetweenSessions() throws Exception
{
IndexProviderStore store = new IndexProviderStore( file, fileSystem, 0, false );
store.setVersion( 5 );
store.setLastCommittedTx( 12 );
store.close();
store = new IndexProviderStore( file, fileSystem, 0, false );
assertEquals( 5, store.getVersion() );
assertEquals( 12, store.getLastCommittedTx() );
store.close();
}
@Test
public void shouldFailUpgradeIfNotAllowed()
{
IndexProviderStore store = new IndexProviderStore( file, fileSystem, versionStringToLong( "3.1" ), true );
store.close();
store = new IndexProviderStore( file, fileSystem, versionStringToLong( "3.1" ), false );
store.close();
try
{
new IndexProviderStore( file, fileSystem, versionStringToLong( "3.5" ), false );
fail( "Shouldn't be able to upgrade there" );
}
catch ( UpgradeNotAllowedByConfigurationException e )
{ // Good
}
store = new IndexProviderStore( file, fileSystem, versionStringToLong( "3.5" ), true );
assertEquals( "3.5", NeoStore.versionLongToString( store.getIndexVersion() ) );
store.close();
}
@Test( expected = NotCurrentStoreVersionException.class )
public void shouldFailToGoBackToOlderVersion() throws Exception
{
String newerVersion = "3.5";
String olderVersion = "3.1";
try
{
IndexProviderStore store = new IndexProviderStore( file, fileSystem, versionStringToLong( newerVersion ), true );
store.close();
store = new IndexProviderStore( file, fileSystem, versionStringToLong( olderVersion ), false );
}
catch ( NotCurrentStoreVersionException e )
{
assertTrue( e.getMessage().contains( newerVersion ) );
assertTrue( e.getMessage().contains( olderVersion ) );
throw e;
}
}
@Test( expected = NotCurrentStoreVersionException.class )
public void shouldFailToGoBackToOlderVersionEvenIfAllowUpgrade() throws Exception
{
String newerVersion = "3.5";
String olderVersion = "3.1";
try
{
IndexProviderStore store = new IndexProviderStore( file, fileSystem, versionStringToLong( newerVersion ), true );
store.close();
store = new IndexProviderStore( file, fileSystem, versionStringToLong( olderVersion ), true );
}
catch ( NotCurrentStoreVersionException e )
{
assertTrue( e.getMessage().contains( newerVersion ) );
assertTrue( e.getMessage().contains( olderVersion ) );
throw e;
}
}
@Test
public void upgradeForMissingVersionRecord() throws Exception
{
// This was before 1.6.M02
IndexProviderStore store = new IndexProviderStore( file, fileSystem, 0, false );
store.close();
FileUtils.truncateFile( file, 4*8 );
try
{
store = new IndexProviderStore( file, fileSystem, 0, false );
fail( "Should have thrown upgrade exception" );
}
catch ( UpgradeNotAllowedByConfigurationException e )
{ // Good
}
store = new IndexProviderStore( file, fileSystem, 0, true );
store.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_TestIndexProviderStore.java
|
1,462
|
public class TestIndexImplOnNeo
{
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private GraphDatabaseService db;
@Before
public void createDb() throws Exception
{
db = new TestGraphDatabaseFactory().setFileSystem( fs.get() ).newImpermanentDatabase( "mydb" );
}
private void restartDb() throws Exception
{
shutdownDb();
createDb();
}
@After
public void shutdownDb() throws Exception
{
db.shutdown();
}
@Test
public void createIndexWithProviderThatUsesNeoAsDataSource() throws Exception
{
String indexName = "inneo";
assertFalse( indexExists( indexName ) );
Map<String, String> config = stringMap( PROVIDER, "test-dummy-neo-index",
"config1", "A value", "another config", "Another value" );
Transaction transaction = db.beginTx();
Index<Node> index;
try
{
index = db.index().forNodes( indexName, config );
transaction.success();
}
finally
{
transaction.finish();
}
assertTrue( indexExists( indexName ) );
assertEquals( config, db.index().getConfiguration( index ) );
assertEquals( 0, count( (Iterable<Node>) index.get( "key", "something else" ) ) );
restartDb();
assertTrue( indexExists( indexName ) );
assertEquals( config, db.index().getConfiguration( index ) );
}
private boolean indexExists( String indexName )
{
Transaction transaction = db.beginTx();
try
{
return db.index().existsForNodes( indexName );
}
finally
{
transaction.finish();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_TestIndexImplOnNeo.java
|
1,463
|
public class TestIndexCommand
{
private static final String INDEX_NAME_1 = "persons";
private static final String INDEX_NAME_2 = "rels";
private static final long NODE_ID_1 = 10;
private static final long NODE_ID_2 = 11;
private static final long NODE_ID_3 = 12;
private static final long REL_ID_1 = 101;
private static final String KEY_1 = "name";
private static final String KEY_2 = "title";
private static final String KEY_3 = "type";
private static final String STRING_VALUE_1 = "Mattias";
private static final String STRING_VALUE_2 = "Blabla";
private static final int INT_VALUE = 345;
private static final Map<String, String> SOME_CONFIG = stringMap( "type", "exact", "provider", "lucene" );
@Rule
public TargetDirectory.TestDirectory directory = TargetDirectory.forTest( TestIndexCommand.class ).testDirectory();
@Test
public void testWriteReadTruncate() throws Exception
{
List<XaCommand> commands = createSomeCommands();
Pair<File, List<Long>> writtenCommands = writeCommandsToFile( commands );
List<XaCommand> readCommands = readCommandsFromFile( writtenCommands.first() );
// Assert that the read commands are equal to the written commands
Iterator<XaCommand> commandIterator = commands.iterator();
for ( XaCommand readCommand : readCommands )
{
assertEquals( commandIterator.next(), readCommand );
}
// Assert that even truncated files
// (where commands are cut off in the middle) can be read
for ( int i = 0; i < commands.size(); i++ )
{
long startPosition = writtenCommands.other().get( i );
long nextStartPosition = i+1 < commands.size() ?
writtenCommands.other().get( i+1 ) : writtenCommands.first().length();
for ( long p = startPosition; p < nextStartPosition; p++ )
{
File copy = copyAndTruncateFile( writtenCommands.first(), p );
List<XaCommand> readTruncatedCommands = readCommandsFromFile( copy );
assertEquals( i, readTruncatedCommands.size() );
FileUtils.deleteFile( copy );
}
}
writtenCommands.first().delete();
}
private File copyAndTruncateFile( File file, long fileSize ) throws IOException
{
File copy = createTempFile( "index", "copy", directory.directory() );
copyFile( file, copy );
RandomAccessFile raFile = new RandomAccessFile( copy, "rw" );
try
{
raFile.getChannel().truncate( fileSize );
}
finally
{
raFile.close();
}
return copy;
}
private List<XaCommand> createSomeCommands()
{
List<XaCommand> commands = new ArrayList<XaCommand>();
IndexDefineCommand definitions = new IndexDefineCommand();
commands.add( definitions );
commands.add( definitions.create( INDEX_NAME_1, Node.class, SOME_CONFIG ) );
commands.add( definitions.add( INDEX_NAME_1, Node.class, NODE_ID_1, KEY_1, STRING_VALUE_1 ) );
commands.add( definitions.add( INDEX_NAME_1, Node.class, NODE_ID_1, KEY_2, STRING_VALUE_2 ) );
commands.add( definitions.addRelationship( INDEX_NAME_2, Relationship.class, REL_ID_1, KEY_3, INT_VALUE, NODE_ID_2, NODE_ID_3 ) );
commands.add( definitions.remove( INDEX_NAME_1, Node.class, NODE_ID_2, KEY_1, STRING_VALUE_1 ) );
commands.add( definitions.delete( INDEX_NAME_2, Relationship.class ) );
return commands;
}
private List<XaCommand> readCommandsFromFile( File file ) throws IOException
{
FileInputStream in = null;
ReadableByteChannel reader = null;
List<XaCommand> commands;
try
{
in = new FileInputStream( file );
reader = in.getChannel();
ByteBuffer buffer = ByteBuffer.allocate( 10000 );
commands = new ArrayList<XaCommand>();
while ( true )
{
XaCommand command = readCommand( reader, buffer );
if ( command == null )
{
break;
}
commands.add( command );
}
}
finally
{
if ( in != null )
in.close();
if ( reader != null )
reader.close();
}
return commands;
}
private Pair<File, List<Long>> writeCommandsToFile( List<XaCommand> commands ) throws IOException
{
File file = createTempFile( "index", "command" );
RandomAccessFile randomAccessFile = new RandomAccessFile( file, "rw" );
List<Long> startPositions;
try
{
StoreFileChannel fileChannel = new StoreFileChannel( randomAccessFile.getChannel() );
LogBuffer writeBuffer = new DirectMappedLogBuffer( fileChannel, new Monitors().newMonitor( ByteCounterMonitor.class ) );
startPositions = new ArrayList<>();
for ( XaCommand command : commands )
{
startPositions.add( writeBuffer.getFileChannelPosition() );
command.writeToFile( writeBuffer );
}
writeBuffer.force();
fileChannel.close();
}
finally
{
randomAccessFile.close();
}
return Pair.of( file, startPositions );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_TestIndexCommand.java
|
1,464
|
public class ReadOnlyIndexConnectionBroker<T extends XaConnection> extends IndexConnectionBroker<T>
{
public ReadOnlyIndexConnectionBroker( TransactionManager transactionManager )
{
super( transactionManager );
}
@Override
public T acquireResourceConnection()
{
throw new ReadOnlyDbException();
}
@Override
public T acquireReadOnlyResourceConnection()
{
return null;
}
@Override
protected T newConnection()
{
throw new ReadOnlyDbException();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_ReadOnlyIndexConnectionBroker.java
|
1,465
|
public class IndexStore extends LifecycleAdapter
{
public static final String INDEX_DB_FILE_NAME = "index.db";
private static final byte[] MAGICK = new byte[] { 'n', 'e', 'o', '4', 'j', '-', 'i', 'n', 'd', 'e', 'x' };
private static final int VERSION = 1;
private final File file;
private final File oldFile;
private final Map<String, Map<String, String>> nodeConfig = new ConcurrentHashMap<String, Map<String,String>>();
private final Map<String, Map<String, String>> relConfig = new ConcurrentHashMap<String, Map<String,String>>();
private ByteBuffer dontUseBuffer = ByteBuffer.allocate( 100 );
private final FileSystemAbstraction fileSystem;
public IndexStore( File graphDbStoreDir, FileSystemAbstraction fileSystem )
{
this.fileSystem = fileSystem;
this.file = new File( graphDbStoreDir, INDEX_DB_FILE_NAME );
this.oldFile = new File( file.getParentFile(), file.getName() + ".old" );
}
private ByteBuffer buffer( int size )
{
if ( dontUseBuffer.capacity() < size )
{
dontUseBuffer = ByteBuffer.allocate( size*2 );
}
return dontUseBuffer;
}
private void read()
{
File fileToReadFrom = fileSystem.fileExists( file ) ? file : oldFile;
if ( !fileSystem.fileExists( fileToReadFrom ) )
{
return;
}
StoreChannel channel = null;
try
{
channel = fileSystem.open( fileToReadFrom, "r" );
Integer version = tryToReadVersion( channel );
if ( version == null )
{
close( channel );
channel = fileSystem.open( fileToReadFrom, "r" );
// Legacy format, TODO
readMap( channel, nodeConfig, version );
relConfig.putAll( nodeConfig );
}
else if ( version < VERSION )
{
// ...add version upgrade code here
throw new UnsupportedOperationException( "" + version );
}
else
{
readMap( channel, nodeConfig, readNextInt( channel ) );
readMap( channel, relConfig, readNextInt( channel ) );
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
close( channel );
}
}
@Override
public void start()
{
read();
}
private Map<String, Map<String, String>> readMap( StoreChannel channel,
Map<String, Map<String, String>> map, Integer sizeOrTillEof ) throws IOException
{
for ( int i = 0; sizeOrTillEof == null || i < sizeOrTillEof; i++ )
{
String indexName = readNextString( channel );
if ( indexName == null )
{
break;
}
Integer propertyCount = readNextInt( channel );
if ( propertyCount == null )
{
break;
}
Map<String, String> properties = new HashMap<String, String>();
for ( int p = 0; p < propertyCount; p++ )
{
String key = readNextString( channel );
if ( key == null )
{
break;
}
String value = readNextString( channel );
if ( value == null )
{
break;
}
properties.put( key, value );
}
map.put( indexName, properties );
}
return Collections.unmodifiableMap( map );
}
private Integer tryToReadVersion( ReadableByteChannel channel ) throws IOException
{
byte[] array = IoPrimitiveUtils.readBytes( channel, new byte[MAGICK.length] );
if ( !Arrays.equals( MAGICK, array ) )
{
return null;
}
return array != null ? readNextInt( channel ) : null;
}
private void close( StoreChannel channel )
{
if ( channel != null )
{
try
{
channel.close();
}
catch ( IOException e )
{
e.printStackTrace();
}
}
}
private Integer readNextInt( ReadableByteChannel channel ) throws IOException
{
return IoPrimitiveUtils.readInt( channel, buffer( 4 ) );
}
private String readNextString( ReadableByteChannel channel ) throws IOException
{
return IoPrimitiveUtils.readLengthAndString( channel, buffer( 100 ) );
}
public boolean has( Class<? extends PropertyContainer> cls, String indexName )
{
return map( cls ).containsKey( indexName );
}
public Map<String, String> get( Class<? extends PropertyContainer> cls, String indexName )
{
return map( cls ).get( indexName );
}
public String[] getNames( Class<? extends PropertyContainer> cls )
{
Map<String, Map<String, String>> indexMap = map( cls );
return indexMap.keySet().toArray( new String[indexMap.size()] );
}
private Map<String, Map<String, String>> map( Class<? extends PropertyContainer> cls )
{
if ( cls.equals( Node.class ) )
{
return nodeConfig;
}
else if ( cls.equals( Relationship.class ) )
{
return relConfig;
}
throw new IllegalArgumentException( cls.toString() );
}
// Synchronized since only one thread are allowed to write at any given time
public synchronized void remove( Class<? extends PropertyContainer> cls, String indexName )
{
if ( map( cls ).remove( indexName ) == null )
{
throw new RuntimeException( "Index config for '" + indexName + "' not found" );
}
write();
}
// Synchronized since only one thread are allowed to write at any given time
public synchronized void set( Class<? extends PropertyContainer> cls,
String name, Map<String, String> config )
{
map( cls ).put( name, Collections.unmodifiableMap( config ) );
write();
}
// Synchronized since only one thread are allowed to write at any given time
public synchronized boolean setIfNecessary( Class<? extends PropertyContainer> cls,
String name, Map<String, String> config )
{
Map<String, Map<String, String>> map = map( cls );
if ( map.containsKey( name ) )
{
return false;
}
map.put( name, Collections.unmodifiableMap( config ) );
write();
return true;
}
private void write()
{
// Write to a .tmp file
File tmpFile = new File( this.file.getParentFile(), this.file.getName() + ".tmp" );
write( tmpFile );
// Make sure the .old file doesn't exist, then rename the current one to .old
fileSystem.deleteFile( oldFile );
try
{
if ( fileSystem.fileExists( file ) && !fileSystem.renameFile( file, oldFile ) )
{
throw new RuntimeException( "Couldn't rename " + file + " -> " + oldFile );
}
}
catch ( IOException e )
{
throw new RuntimeException( "Couldn't rename " + file + " -> " + oldFile );
}
// Rename the .tmp file to the current name
try
{
if ( !fileSystem.renameFile( tmpFile, this.file ) )
{
throw new RuntimeException( "Couldn't rename " + tmpFile + " -> " + file );
}
}
catch ( IOException e )
{
throw new RuntimeException( "Couldn't rename " + tmpFile + " -> " + file );
}
fileSystem.deleteFile( oldFile );
}
private void write( File file )
{
StoreChannel channel = null;
try
{
channel = fileSystem.open( file, "rw" );
channel.write( ByteBuffer.wrap( MAGICK ) );
IoPrimitiveUtils.writeInt( channel, buffer( 4 ), VERSION );
writeMap( channel, nodeConfig );
writeMap( channel, relConfig );
channel.force( false );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
close( channel );
}
}
private void writeMap( StoreChannel channel, Map<String, Map<String, String>> map ) throws IOException
{
IoPrimitiveUtils.writeInt( channel, buffer( 4 ), map.size() );
for ( Map.Entry<String, Map<String, String>> entry : map.entrySet() )
{
writeString( channel, entry.getKey() );
writeInt( channel, entry.getValue().size() );
for ( Map.Entry<String, String> propertyEntry : entry.getValue().entrySet() )
{
writeString( channel, propertyEntry.getKey() );
writeString( channel, propertyEntry.getValue() );
}
}
}
private void writeInt( StoreChannel channel, int value ) throws IOException
{
IoPrimitiveUtils.writeInt( channel, buffer( 4 ), value );
}
private void writeString( StoreChannel channel, String value ) throws IOException
{
IoPrimitiveUtils.writeLengthAndString( channel, buffer( 200 ), value );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexStore.java
|
1,466
|
public class IndexProviderStore
{
private static final int RECORD_SIZE = 8;
private static final int RECORD_COUNT = 5;
private final long creationTime;
private final long randomIdentifier;
private long version;
private final long indexVersion;
private final StoreChannel fileChannel;
private final ByteBuffer buf = ByteBuffer.allocate( RECORD_SIZE*RECORD_COUNT );
private long lastCommittedTx;
private final File file;
private final Random random;
public IndexProviderStore( File file, FileSystemAbstraction fileSystem, long expectedVersion, boolean allowUpgrade )
{
this.file = file;
this.random = new Random( System.currentTimeMillis() );
StoreChannel channel = null;
boolean success = false;
try
{
// Create it if it doesn't exist
if ( !fileSystem.fileExists( file ) )
create( file, fileSystem, expectedVersion );
// Read all the records in the file
channel = fileSystem.open( file, "rw" );
Long[] records = readRecordsWithNullDefaults( channel, RECORD_COUNT, allowUpgrade );
creationTime = records[0].longValue();
randomIdentifier = records[1].longValue();
version = records[2].longValue();
lastCommittedTx = records[3].longValue();
Long readIndexVersion = records[4];
fileChannel = channel;
// Compare version and throw exception if there's a mismatch, also considering "allow upgrade"
boolean versionDiffers = compareExpectedVersionWithStoreVersion( expectedVersion, allowUpgrade, readIndexVersion );
// Here we know that either the version matches or we just upgraded to the expected version
indexVersion = expectedVersion;
if ( versionDiffers )
// We have upgraded the version, let's write it
writeOut();
success = true;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
if ( !success && channel != null )
{
try
{
channel.close();
}
catch ( IOException e )
{ // What to do?
}
}
}
}
private boolean compareExpectedVersionWithStoreVersion( long expectedVersion,
boolean allowUpgrade, Long readIndexVersion )
{
boolean versionDiffers = readIndexVersion == null || readIndexVersion.longValue() != expectedVersion;
if ( versionDiffers )
{
// We can throw a more explicit exception if we see that we're trying to run
// with an older version than the store is.
if ( readIndexVersion != null && expectedVersion < readIndexVersion.longValue() )
{
String expected = versionLongToString( expectedVersion );
String readVersion = versionLongToString( readIndexVersion.longValue() );
throw new NotCurrentStoreVersionException( expected, readVersion,
"Your index has been upgraded to " + readVersion +
" and cannot run with an older version " + expected, false );
}
else if ( !allowUpgrade )
{
// We try to run with a newer version than the store is but isn't allowed to upgrade.
throw new UpgradeNotAllowedByConfigurationException();
}
}
return versionDiffers;
}
private Long[] readRecordsWithNullDefaults( StoreChannel fileChannel, int count, boolean allowUpgrade ) throws IOException
{
buf.clear();
int bytesRead = fileChannel.read( buf );
int wholeRecordsRead = bytesRead/RECORD_SIZE;
if ( wholeRecordsRead < RECORD_COUNT && !allowUpgrade )
throw new UpgradeNotAllowedByConfigurationException( "Index version (managed by " + file + ") has changed " +
"and cannot be upgraded unless " + GraphDatabaseSettings.allow_store_upgrade.name() +
"=true is supplied in the configuration" );
buf.flip();
Long[] result = new Long[count];
for ( int i = 0; i < wholeRecordsRead; i++ )
result[i] = buf.getLong();
return result;
}
private void create( File file, FileSystemAbstraction fileSystem, long indexVersion ) throws IOException
{
if ( fileSystem.fileExists( file ) )
throw new IllegalArgumentException( file + " already exist" );
StoreChannel fileChannel = null;
try
{
fileChannel = fileSystem.open( file, "rw" );
write( fileChannel, System.currentTimeMillis(), random.nextLong(),
0, 1, indexVersion );
}
finally
{
if (fileChannel != null)
fileChannel.close();
}
}
private void write( StoreChannel channel, long time, long identifier, long version, long lastCommittedTxId,
long indexVersion ) throws IOException
{
buf.clear();
buf.putLong( time ).putLong( identifier ).putLong( version ).putLong( lastCommittedTxId ).putLong( indexVersion );
buf.flip();
channel.position( 0 );
int written = channel.write( buf );
int expectedLength = RECORD_COUNT*RECORD_SIZE;
if ( written != expectedLength )
throw new RuntimeException( "Expected to write " + expectedLength + " bytes, but wrote " + written );
}
public File getFile()
{
return file;
}
public long getCreationTime()
{
return creationTime;
}
public long getRandomNumber()
{
return randomIdentifier;
}
public long getVersion()
{
return version;
}
public long getIndexVersion()
{
return indexVersion;
}
public synchronized long incrementVersion()
{
long current = getVersion();
version++;
writeOut();
return current;
}
public synchronized void setVersion( long version )
{
this.version = version;
writeOut();
}
public synchronized void setLastCommittedTx( long txId )
{
this.lastCommittedTx = txId;
}
public long getLastCommittedTx()
{
return this.lastCommittedTx;
}
public synchronized void flush()
{
writeOut();
}
private void writeOut()
{
try
{
write( fileChannel, creationTime, randomIdentifier, version, lastCommittedTx, indexVersion );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
public void close()
{
if ( !fileChannel.isOpen() )
return;
writeOut();
try
{
fileChannel.close();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexProviderStore.java
|
1,467
|
public class IndexDefineCommand extends XaCommand
{
private final AtomicInteger nextIndexNameId = new AtomicInteger();
private final AtomicInteger nextKeyId = new AtomicInteger();
private final Map<String, Byte> indexNameIdRange;
private final Map<String, Byte> keyIdRange;
private final Map<Byte, String> idToIndexName;
private final Map<Byte, String> idToKey;
public IndexDefineCommand()
{
indexNameIdRange = new HashMap<String, Byte>();
keyIdRange = new HashMap<String, Byte>();
idToIndexName = new HashMap<Byte, String>();
idToKey = new HashMap<Byte, String>();
}
public IndexDefineCommand( Map<String, Byte> indexNames, Map<String, Byte> keys )
{
this.indexNameIdRange = indexNames;
this.keyIdRange = keys;
idToIndexName = reverse( indexNames );
idToKey = reverse( keys );
}
private static String getFromMap( Map<Byte, String> map, byte id )
{
String result = map.get( id );
if ( result == null )
{
throw new IllegalArgumentException( "" + id );
}
return result;
}
public IndexCommand create( String indexName, Class<?> entityType, Map<String, String> config )
{
return new IndexCommand.CreateCommand( indexNameId( indexName ),
entityTypeId( entityType ), config );
}
public IndexCommand add( String indexName, Class<?> entityType, long entityId, String key,
Object value )
{
return new IndexCommand.AddCommand( indexNameId( indexName ), entityTypeId( entityType ),
entityId, keyId( key ), value );
}
public IndexCommand addRelationship( String indexName, Class<?> entityType, long entityId, String key,
Object value, long startNode, long endNode )
{
return new IndexCommand.AddRelationshipCommand( indexNameId( indexName ),
entityTypeId( entityType ), entityId, keyId( key ), value, startNode, endNode );
}
public IndexCommand remove( String indexName, Class<?> entityType, long entityId,
String key, Object value )
{
return new IndexCommand.RemoveCommand( indexNameId( indexName ), entityTypeId( entityType ),
entityId, key != null ? keyId( key ) : 0, value );
}
public IndexCommand delete( String indexName, Class<?> entityType )
{
return new IndexCommand.DeleteCommand( indexNameId( indexName ), entityTypeId( entityType ) );
}
public String getIndexName( byte id )
{
return getFromMap( idToIndexName, id );
}
public String getKey( byte id )
{
return getFromMap( idToKey, id );
}
public static byte entityTypeId( Class<?> entityType )
{
return entityType.equals( Relationship.class ) ? IndexCommand.RELATIONSHIP : IndexCommand.NODE;
}
public static Class<? extends PropertyContainer> entityType( byte id )
{
switch ( id )
{
case IndexCommand.NODE: return Node.class;
case IndexCommand.RELATIONSHIP: return Relationship.class;
default: throw new IllegalArgumentException( "" + id );
}
}
private byte indexNameId( String indexName )
{
return id( indexName, indexNameIdRange, nextIndexNameId, idToIndexName );
}
private byte keyId( String key )
{
return id( key, keyIdRange, nextKeyId, idToKey );
}
private byte id( String key, Map<String, Byte> idRange, AtomicInteger nextId,
Map<Byte, String> reverse )
{
Byte id = idRange.get( key );
if ( id == null )
{
id = Byte.valueOf( (byte) nextId.incrementAndGet() );
idRange.put( key, id );
reverse.put( id, key );
}
return id;
}
@Override
public void execute()
{
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( (byte)(IndexCommand.DEFINE_COMMAND << 5) );
buffer.put( (byte)0 );
buffer.put( (byte)0 );
writeMap( indexNameIdRange, buffer );
writeMap( keyIdRange, buffer );
}
static Map<String, Byte> readMap( ReadableByteChannel channel, ByteBuffer buffer )
throws IOException
{
Byte size = readByte( channel, buffer );
if ( size == null ) return null;
Map<String, Byte> result = new HashMap<String, Byte>();
for ( int i = 0; i < size; i++ )
{
String key = read2bLengthAndString( channel, buffer );
Byte id = readByte( channel, buffer );
if ( key == null || id == null ) return null;
result.put( key, id );
}
return result;
}
private static void writeMap( Map<String, Byte> map, LogBuffer buffer ) throws IOException
{
buffer.put( (byte)map.size() );
for ( Map.Entry<String, Byte> entry : map.entrySet() )
{
write2bLengthAndString( buffer, entry.getKey() );
buffer.put( entry.getValue() );
}
}
@Override
public int hashCode()
{
int result = nextIndexNameId != null ? nextIndexNameId.hashCode() : 0;
result = 31 * result + (nextKeyId != null ? nextKeyId.hashCode() : 0);
result = 31 * result + (indexNameIdRange != null ? indexNameIdRange.hashCode() : 0);
result = 31 * result + (keyIdRange != null ? keyIdRange.hashCode() : 0);
result = 31 * result + (idToIndexName != null ? idToIndexName.hashCode() : 0);
result = 31 * result + (idToKey != null ? idToKey.hashCode() : 0);
return result;
}
@Override
public boolean equals( Object obj )
{
IndexDefineCommand other = (IndexDefineCommand) obj;
return indexNameIdRange.equals( other.indexNameIdRange ) &&
keyIdRange.equals( other.keyIdRange );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexDefineCommand.java
|
1,468
|
private class TxCommitHook implements Synchronization
{
private final Transaction tx;
TxCommitHook( Transaction tx )
{
this.tx = tx;
}
public void afterCompletion( int param )
{
releaseResourceConnectionsForTransaction( tx );
}
public void beforeCompletion()
{
delistResourcesForTransaction();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexConnectionBroker.java
|
1,469
|
public abstract class IndexConnectionBroker<T extends XaConnection>
{
private final ArrayMap<Transaction, T> txConnectionMap =
new ArrayMap<Transaction, T>( (byte)5, true, true );
private final TransactionManager transactionManager;
protected IndexConnectionBroker( TransactionManager transactionManager )
{
this.transactionManager = transactionManager;
}
public T acquireResourceConnection()
{
Transaction tx = this.getCurrentTransaction();
if ( tx == null )
{
throw new NotInTransactionException();
}
T con = txConnectionMap.get( tx );
if ( con == null )
{
try
{
con = newConnection();
if ( !con.enlistResource( tx ) )
{
throw new RuntimeException( "Unable to enlist '"
+ con.getXaResource() + "' in "
+ tx );
}
tx.registerSynchronization( new TxCommitHook( tx ) );
txConnectionMap.put( tx, con );
}
catch ( javax.transaction.RollbackException re )
{
String msg = "The transaction is marked for rollback only.";
throw new RuntimeException( msg, re );
}
catch ( SystemException se )
{
String msg = "TM encountered an unexpected error condition.";
throw new RuntimeException( msg, se );
}
}
return con;
}
protected abstract T newConnection();
public T acquireReadOnlyResourceConnection()
{
Transaction tx = this.getCurrentTransaction();
return tx != null ? txConnectionMap.get( tx ) : null;
}
void releaseResourceConnectionsForTransaction( Transaction tx )
throws NotInTransactionException
{
T con = txConnectionMap.remove( tx );
if ( con != null )
{
con.destroy();
}
}
void delistResourcesForTransaction() throws NotInTransactionException
{
Transaction tx = this.getCurrentTransaction();
if ( tx == null )
{
throw new NotInTransactionException();
}
T con = txConnectionMap.get( tx );
if ( con != null )
{
try
{
con.delistResource(tx, XAResource.TMSUCCESS);
}
catch ( IllegalStateException | SystemException e )
{
throw new RuntimeException(
"Unable to delist lucene resource from tx", e );
}
}
}
private Transaction getCurrentTransaction()
throws NotInTransactionException
{
try
{
return transactionManager.getTransaction();
}
catch ( SystemException se )
{
throw new NotInTransactionException(
"Error fetching transaction for current thread", se );
}
}
private class TxCommitHook implements Synchronization
{
private final Transaction tx;
TxCommitHook( Transaction tx )
{
this.tx = tx;
}
public void afterCompletion( int param )
{
releaseResourceConnectionsForTransaction( tx );
}
public void beforeCompletion()
{
delistResourcesForTransaction();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexConnectionBroker.java
|
1,470
|
public static class RemoveCommand extends IndexCommand
{
RemoveCommand( byte indexNameId, byte entityType, long entityId, byte keyId, Object value )
{
super( REMOVE_COMMAND, indexNameId, entityType, entityId, keyId, value );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexCommand.java
|
1,471
|
public static class DeleteCommand extends IndexCommand
{
DeleteCommand( byte indexNameId, byte entityType )
{
super( DELETE_COMMAND, indexNameId, entityType, 0L, (byte)0, null );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
writeHeader( buffer );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexCommand.java
|
1,472
|
public static class CreateCommand extends IndexCommand
{
private final Map<String, String> config;
CreateCommand( byte indexNameId, byte entityType, Map<String, String> config )
{
super( CREATE_COMMAND, indexNameId, entityType, 0L, (byte)0, null );
this.config = config;
}
public Map<String, String> getConfig()
{
return config;
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
writeHeader( buffer );
buffer.putShort( (short)config.size() );
for ( Map.Entry<String, String> entry : config.entrySet() )
{
write2bLengthAndString( buffer, entry.getKey() );
write2bLengthAndString( buffer, entry.getValue() );
}
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
@Override
public int hashCode()
{
return config != null ? config.hashCode() : 0;
}
@Override
public boolean equals( Object obj )
{
return super.equals( obj ) && config.equals( ((CreateCommand)obj).config );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexCommand.java
|
1,473
|
public static class AddRelationshipCommand extends IndexCommand
{
private final long startNode;
private final long endNode;
AddRelationshipCommand( byte indexNameId, byte entityType, long entityId, byte keyId,
Object value, long startNode, long endNode )
{
super( ADD_RELATIONSHIP_COMMAND, indexNameId, entityType, entityId, keyId, value );
this.startNode = startNode;
this.endNode = endNode;
}
public long getStartNode()
{
return startNode;
}
public long getEndNode()
{
return endNode;
}
@Override
protected byte startNodeNeedsLong()
{
return needsLong( startNode );
}
@Override
protected byte endNodeNeedsLong()
{
return needsLong( endNode );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
super.writeToFile( buffer );
putIntOrLong( buffer, startNode );
putIntOrLong( buffer, endNode );
}
@Override
public int hashCode()
{
int result = (int) (startNode ^ (startNode >>> 32));
result = 31 * result + (int) (endNode ^ (endNode >>> 32));
return result;
}
@Override
public boolean equals( Object obj )
{
if ( !super.equals( obj ) )
{
return false;
}
AddRelationshipCommand other = (AddRelationshipCommand) obj;
return startNode == other.startNode && endNode == other.endNode;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexCommand.java
|
1,474
|
public static class AddCommand extends IndexCommand
{
AddCommand( byte indexNameId, byte entityType, long entityId, byte keyId, Object value )
{
super( ADD_COMMAND, indexNameId, entityType, entityId, keyId, value );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexCommand.java
|
1,475
|
public abstract class IndexCommand extends XaCommand
{
static final byte DEFINE_COMMAND = (byte) 0;
static final byte ADD_COMMAND = (byte) 1;
static final byte ADD_RELATIONSHIP_COMMAND = (byte) 2;
static final byte REMOVE_COMMAND = (byte) 3;
static final byte DELETE_COMMAND = (byte) 4;
static final byte CREATE_COMMAND = (byte) 5;
public static final byte NODE = (byte) 0;
public static final byte RELATIONSHIP = (byte) 1;
private static final byte VALUE_TYPE_NULL = (byte) 0;
private static final byte VALUE_TYPE_SHORT = (byte) 1;
private static final byte VALUE_TYPE_INT = (byte) 2;
private static final byte VALUE_TYPE_LONG = (byte) 3;
private static final byte VALUE_TYPE_FLOAT = (byte) 4;
private static final byte VALUE_TYPE_DOUBLE = (byte) 5;
private static final byte VALUE_TYPE_STRING = (byte) 6;
private final byte commandType;
private final byte indexNameId;
private final byte entityType;
private final long entityId;
private final byte keyId;
private final byte valueType;
private final Object value;
IndexCommand( byte commandType, byte indexNameId, byte entityType, long entityId, byte keyId, Object value )
{
this.commandType = commandType;
this.indexNameId = indexNameId;
this.entityType = entityType;
this.entityId = entityId;
this.keyId = keyId;
this.value = value;
this.valueType = valueTypeOf( value );
}
public byte getIndexNameId()
{
return indexNameId;
}
public byte getEntityType()
{
return entityType;
}
public long getEntityId()
{
return entityId;
}
public byte getKeyId()
{
return keyId;
}
public Object getValue()
{
return value;
}
@Override
public void execute()
{
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
/* c: commandType
* e: entityType
* n: indexNameId
* k: keyId
* i: entityId
* v: value type
* u: value
* x: 0=entityId needs 4b, 1=entityId needs 8b
* y: 0=startNode needs 4b, 1=startNode needs 8b
* z: 0=endNode needs 4b, 1=endNode needs 8b
*
* [cccv,vvex][yznn,nnnn][kkkk,kkkk]
* [iiii,iiii] x 4 or 8
* (either string value)
* [llll,llll][llll,llll][llll,llll][string chars...]
* (numeric value)
* [uuuu,uuuu] x 2-8 (depending on value type)
*/
writeHeader( buffer );
putIntOrLong( buffer, entityId );
// Value
switch ( valueType )
{
case VALUE_TYPE_STRING: write3bLengthAndString( buffer, value.toString() ); break;
case VALUE_TYPE_SHORT: buffer.putShort( ((Number) value).shortValue() ); break;
case VALUE_TYPE_INT: buffer.putInt( ((Number) value).intValue() ); break;
case VALUE_TYPE_LONG: buffer.putLong( ((Number) value).longValue() ); break;
case VALUE_TYPE_FLOAT: buffer.putFloat( ((Number) value).floatValue() ); break;
case VALUE_TYPE_DOUBLE: buffer.putDouble( ((Number) value).doubleValue() ); break;
case VALUE_TYPE_NULL: break;
default: throw new RuntimeException( "Unknown value type " + valueType );
}
}
protected void writeHeader( LogBuffer buffer ) throws IOException
{
buffer.put( (byte)((commandType<<5) | (valueType<<2) | (entityType<<1) | (needsLong( entityId ))) );
buffer.put( (byte)((startNodeNeedsLong()<<7) | (endNodeNeedsLong()<<6) | (indexNameId)) );
buffer.put( keyId );
}
protected static void putIntOrLong( LogBuffer buffer, long id ) throws IOException
{
if ( needsLong( id ) == 1 )
{
buffer.putLong( id );
}
else
{
buffer.putInt( (int)id );
}
}
protected static byte needsLong( long value )
{
return value > Integer.MAX_VALUE ? (byte)1 : (byte)0;
}
protected byte startNodeNeedsLong()
{
return 0;
}
protected byte endNodeNeedsLong()
{
return 0;
}
private static byte valueTypeOf( Object value )
{
byte valueType = 0;
if ( value == null )
{
valueType = VALUE_TYPE_NULL;
}
else if ( value instanceof Number )
{
if ( value instanceof Float )
{
valueType = VALUE_TYPE_FLOAT;
}
else if ( value instanceof Double )
{
valueType = VALUE_TYPE_DOUBLE;
}
else if ( value instanceof Long )
{
valueType = VALUE_TYPE_LONG;
}
else if ( value instanceof Short )
{
valueType = VALUE_TYPE_SHORT;
}
else
{
valueType = VALUE_TYPE_INT;
}
}
else
{
valueType = VALUE_TYPE_STRING;
}
return valueType;
}
public boolean isConsideredNormalWriteCommand()
{
return true;
}
public static class AddCommand extends IndexCommand
{
AddCommand( byte indexNameId, byte entityType, long entityId, byte keyId, Object value )
{
super( ADD_COMMAND, indexNameId, entityType, entityId, keyId, value );
}
}
public static class AddRelationshipCommand extends IndexCommand
{
private final long startNode;
private final long endNode;
AddRelationshipCommand( byte indexNameId, byte entityType, long entityId, byte keyId,
Object value, long startNode, long endNode )
{
super( ADD_RELATIONSHIP_COMMAND, indexNameId, entityType, entityId, keyId, value );
this.startNode = startNode;
this.endNode = endNode;
}
public long getStartNode()
{
return startNode;
}
public long getEndNode()
{
return endNode;
}
@Override
protected byte startNodeNeedsLong()
{
return needsLong( startNode );
}
@Override
protected byte endNodeNeedsLong()
{
return needsLong( endNode );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
super.writeToFile( buffer );
putIntOrLong( buffer, startNode );
putIntOrLong( buffer, endNode );
}
@Override
public int hashCode()
{
int result = (int) (startNode ^ (startNode >>> 32));
result = 31 * result + (int) (endNode ^ (endNode >>> 32));
return result;
}
@Override
public boolean equals( Object obj )
{
if ( !super.equals( obj ) )
{
return false;
}
AddRelationshipCommand other = (AddRelationshipCommand) obj;
return startNode == other.startNode && endNode == other.endNode;
}
}
public static class RemoveCommand extends IndexCommand
{
RemoveCommand( byte indexNameId, byte entityType, long entityId, byte keyId, Object value )
{
super( REMOVE_COMMAND, indexNameId, entityType, entityId, keyId, value );
}
}
public static class DeleteCommand extends IndexCommand
{
DeleteCommand( byte indexNameId, byte entityType )
{
super( DELETE_COMMAND, indexNameId, entityType, 0L, (byte)0, null );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
writeHeader( buffer );
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
}
public static class CreateCommand extends IndexCommand
{
private final Map<String, String> config;
CreateCommand( byte indexNameId, byte entityType, Map<String, String> config )
{
super( CREATE_COMMAND, indexNameId, entityType, 0L, (byte)0, null );
this.config = config;
}
public Map<String, String> getConfig()
{
return config;
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
writeHeader( buffer );
buffer.putShort( (short)config.size() );
for ( Map.Entry<String, String> entry : config.entrySet() )
{
write2bLengthAndString( buffer, entry.getKey() );
write2bLengthAndString( buffer, entry.getValue() );
}
}
@Override
public boolean isConsideredNormalWriteCommand()
{
return false;
}
@Override
public int hashCode()
{
return config != null ? config.hashCode() : 0;
}
@Override
public boolean equals( Object obj )
{
return super.equals( obj ) && config.equals( ((CreateCommand)obj).config );
}
}
private static Object readValue( byte valueType, ReadableByteChannel channel, ByteBuffer buffer )
throws IOException
{
switch ( valueType )
{
case VALUE_TYPE_NULL: return null;
case VALUE_TYPE_SHORT: return readShort( channel, buffer );
case VALUE_TYPE_INT: return readInt( channel, buffer );
case VALUE_TYPE_LONG: return readLong( channel, buffer );
case VALUE_TYPE_FLOAT: return readFloat( channel, buffer );
case VALUE_TYPE_DOUBLE: return readDouble( channel, buffer );
case VALUE_TYPE_STRING: return read3bLengthAndString( channel, buffer );
default: throw new RuntimeException( "Unknown value type " + valueType );
}
}
public static XaCommand readCommand( ReadableByteChannel channel, ByteBuffer buffer ) throws IOException
{
byte[] headerBytes = readBytes( channel, new byte[3] );
if ( headerBytes == null ) return null;
byte commandType = (byte)((headerBytes[0] & 0xE0) >> 5);
byte valueType = (byte)((headerBytes[0] & 0x1C) >> 2);
byte entityType = (byte)((headerBytes[0] & 0x2) >> 1);
boolean entityIdNeedsLong = (headerBytes[0] & 0x1) > 0;
byte indexNameId = (byte)(headerBytes[1] & 0x3F);
byte keyId = headerBytes[2];
switch ( commandType )
{
case DEFINE_COMMAND:
Map<String, Byte> indexNames = IndexDefineCommand.readMap( channel, buffer );
Map<String, Byte> keys = IndexDefineCommand.readMap( channel, buffer );
if ( indexNames == null || keys == null ) return null;
return new IndexDefineCommand( indexNames, keys );
case CREATE_COMMAND:
Map<String, String> config = read2bMap( channel, buffer );
if ( config == null ) return null;
return new CreateCommand( indexNameId, entityType, config );
case DELETE_COMMAND:
return new DeleteCommand( indexNameId, entityType );
case ADD_COMMAND: case REMOVE_COMMAND: case ADD_RELATIONSHIP_COMMAND:
Number entityId = entityIdNeedsLong ? (Number)readLong( channel, buffer ) : (Number)readInt( channel, buffer );
if ( entityId == null ) return null;
Object value = readValue( valueType, channel, buffer );
if ( valueType != VALUE_TYPE_NULL && value == null ) return null;
if ( commandType == ADD_COMMAND )
{
return new AddCommand( indexNameId, entityType, entityId.longValue(), keyId, value );
}
else if ( commandType == REMOVE_COMMAND )
{
return new RemoveCommand( indexNameId, entityType, entityId.longValue(), keyId, value );
}
else
{
boolean startNodeNeedsLong = (headerBytes[1] & 0x8) > 0;
boolean endNodeNeedsLong = (headerBytes[1] & 0x40) > 0;
Number startNode = startNodeNeedsLong ? (Number)readLong( channel, buffer ) : (Number)readInt( channel, buffer );
Number endNode = endNodeNeedsLong ? (Number)readLong( channel, buffer ) : (Number)readInt( channel, buffer );
if ( startNode == null || endNode == null ) return null;
return new AddRelationshipCommand( indexNameId, entityType, entityId.longValue(),
keyId, value, startNode.longValue(), endNode.longValue() );
}
default: throw new RuntimeException( "Unknown command type " + commandType );
}
}
@Override
public boolean equals( Object obj )
{
IndexCommand other = (IndexCommand) obj;
boolean equals = commandType == other.commandType &&
entityType == other.entityType &&
indexNameId == other.indexNameId &&
keyId == other.keyId &&
valueType == other.valueType;
if ( !equals )
{
return false;
}
return value == null ? other.value == null : value.equals( other.value );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_index_IndexCommand.java
|
1,476
|
private static class IteratorIndexHits<T> implements IndexHits<T>
{
private final List<T> list;
private final Iterator<T> iterator;
IteratorIndexHits( List<T> list )
{
this.list = list;
this.iterator = list.iterator();
}
@Override
public boolean hasNext()
{
return iterator.hasNext();
}
@Override
public T next()
{
return iterator.next();
}
@Override
public void remove()
{
iterator.remove();
}
@Override
public IndexHits<T> iterator()
{
return this;
}
@Override
public int size()
{
return list.size();
}
@Override
public void close()
{
}
@Override
public T getSingle()
{
return IteratorUtil.singleOrNull( (Iterator<T>) this );
}
@Override
public float currentScore()
{
return Float.NaN;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_DummyIndexExtensionFactory.java
|
1,477
|
private class DummyRelationshipIndex extends DummyIndex<Relationship> implements RelationshipIndex
{
public DummyRelationshipIndex( String name, InternalAbstractGraphDatabase db )
{
super( name, db );
}
@Override
public Class<Relationship> getEntityType()
{
return Relationship.class;
}
@Override
public IndexHits<Relationship> get( String key, Object valueOrNull, Node startNodeOrNull,
Node endNodeOrNull )
{
throw new UnsupportedOperationException();
}
@Override
public IndexHits<Relationship> query( String key, Object queryOrQueryObjectOrNull,
Node startNodeOrNull, Node endNodeOrNull )
{
throw new UnsupportedOperationException();
}
@Override
public IndexHits<Relationship> query( Object queryOrQueryObjectOrNull,
Node startNodeOrNull, Node endNodeOrNull )
{
throw new UnsupportedOperationException();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_index_DummyIndexExtensionFactory.java
|
1,478
|
protected static abstract class LockedEntity
{
private LockedEntity()
{
// all instances defined in this class
}
@Override
public final String toString()
{
StringBuilder repr = new StringBuilder( getClass().getSimpleName() ).append( '[' );
toString( repr );
return repr.append( ']' ).toString();
}
abstract void toString( StringBuilder repr );
@Override
public abstract int hashCode();
@Override
public abstract boolean equals( Object obj );
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_AbstractLockService.java
|
1,479
|
static final class LockedNode extends LockedEntity
{
private final long nodeId;
LockedNode( long nodeId )
{
this.nodeId = nodeId;
}
@Override
void toString( StringBuilder repr )
{
repr.append( "id=" ).append( nodeId );
}
@Override
public int hashCode()
{
return (int) (nodeId ^ (nodeId >>> 32));
}
@Override
public boolean equals( Object obj )
{
if ( obj instanceof LockedNode )
{
LockedNode that = (LockedNode) obj;
return this.nodeId == that.nodeId;
}
return false;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_AbstractLockService.java
|
1,480
|
public final class IndexEntryLock
{
private final int labelId;
private final int propertyKeyId;
private final String propertyValue;
public IndexEntryLock( int labelId, int propertyKeyId, String propertyValue )
{
this.labelId = labelId;
this.propertyKeyId = propertyKeyId;
this.propertyValue = propertyValue;
}
public int labelId()
{
return labelId;
}
public int propertyKeyId()
{
return propertyKeyId;
}
public String propertyValue()
{
return propertyValue;
}
@Override
public String toString()
{
return format( "IndexEntryLock{labelId=%d, propertyKeyId=%d, propertyValue=%s}",
labelId, propertyKeyId, propertyValue );
}
@Override
public boolean equals( Object obj )
{
if ( this == obj )
{
return true;
}
if ( obj instanceof IndexEntryLock )
{
IndexEntryLock that = (IndexEntryLock) obj;
return labelId == that.labelId && propertyKeyId == that.propertyKeyId &&
propertyValue.equals( that.propertyValue );
}
return false;
}
@Override
public int hashCode()
{
int result = labelId;
result = 31 * result + propertyKeyId;
result = 31 * result + propertyValue.hashCode();
return result;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_IndexEntryLock.java
|
1,481
|
HANDOVER
{
@Override
void execute( Implementation impl )
{
int minThreads = Integer.getInteger( "minThreads", 1 );
int maxThreads = Integer.getInteger( "maxThreads", cores() * 2 );
int iterations = Integer.getInteger( "iterations", 100 );
int lockCount = Integer.getInteger( "lockCount", 100_000 );
for ( int threads = minThreads; threads <= maxThreads; threads++ )
{
System.out.printf( "=== %s / %s - %s threads ===%n", this, impl, threads );
executeHandover( impl, threads, iterations, lockCount );
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,482
|
private static class LockNode implements ThreadRepository.Task
{
private final LockService locks;
private final long nodeId;
private Lock lock;
LockNode( LockService locks, long nodeId )
{
this.locks = locks;
this.nodeId = nodeId;
}
private final ThreadRepository.Task release = new ThreadRepository.Task()
{
@Override
public void perform() throws Exception
{
lock.release();
}
};
@Override
public void perform() throws Exception
{
this.lock = locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ReentrantLockServiceTest.java
|
1,483
|
public class ReentrantLockServiceTest
{
@Rule
public final ThreadRepository threads = new ThreadRepository( 5, TimeUnit.SECONDS );
@Test
public void shouldFormLinkedListOfWaitingLockOwners() throws Exception
{
// given
ReentrantLockService.OwnerQueueElement<Integer> queue = new ReentrantLockService.OwnerQueueElement<>( 0 );
ReentrantLockService.OwnerQueueElement<Integer> element1 = new ReentrantLockService.OwnerQueueElement<>( 1 );
ReentrantLockService.OwnerQueueElement<Integer> element2 = new ReentrantLockService.OwnerQueueElement<>( 2 );
ReentrantLockService.OwnerQueueElement<Integer> element3 = new ReentrantLockService.OwnerQueueElement<>( 3 );
ReentrantLockService.OwnerQueueElement<Integer> element4 = new ReentrantLockService.OwnerQueueElement<>( 4 );
// when
queue.enqueue( element1 );
// then
assertEquals( 1, queue.dequeue().intValue() );
// when
queue.enqueue( element2 );
queue.enqueue( element3 );
queue.enqueue( element4 );
// then
assertEquals( 2, queue.dequeue().intValue() );
assertEquals( 3, queue.dequeue().intValue() );
assertEquals( 4, queue.dequeue().intValue() );
assertEquals( "should get the current element when dequeuing the current head", 4, queue.dequeue().intValue() );
assertEquals( "should get null when dequeuing from a dead list", null, queue.dequeue() );
assertEquals( "should get null continuously when dequeuing from a dead list", null, queue.dequeue() );
}
@Test
public void shouldAllowReEntrance() throws Exception
{
// given
LockService locks = new ReentrantLockService();
ThreadRepository.Events events = threads.events();
LockNode lock1once = new LockNode( locks, 1 );
LockNode lock1again = new LockNode( locks, 1 );
LockNode lock1inOtherThread = new LockNode( locks, 1 );
ThreadRepository.Signal lockedOnce = threads.signal();
ThreadRepository.Signal ready = threads.signal();
// when
threads.execute( lock1once, ready.await(), lockedOnce, lock1again,
events.trigger( "Double Locked" ),
lock1once.release, lock1again.release );
threads.execute( ready, lockedOnce.await(), lock1inOtherThread,
events.trigger( "Other Thread" ),
lock1inOtherThread.release );
// then
events.assertInOrder( "Double Locked", "Other Thread" );
}
@Test
public void shouldBlockOnLockedLock() throws Exception
{
// given
LockService locks = new ReentrantLockService();
LockNode lockSameNode = new LockNode( locks, 17 );
ThreadRepository.Events events = threads.events();
ThreadRepository.Signal ready = threads.signal();
// when
try ( Lock ignored = locks.acquireNodeLock( 17, LockService.LockType.WRITE_LOCK ) )
{
ThreadRepository.ThreadInfo thread =
threads.execute( ready, lockSameNode, events.trigger( "locked" ), lockSameNode.release );
ready.awaitNow();
// then
assertTrue( awaitParked( thread, 5, TimeUnit.SECONDS ) );
assertTrue( events.snapshot().isEmpty() );
}
events.assertInOrder( "locked" );
}
@Test
public void shouldNotLeaveResidualLockStateAfterAllLocksHaveBeenReleased() throws Exception
{
// given
ReentrantLockService locks = new ReentrantLockService();
// when
locks.acquireNodeLock( 42, LockService.LockType.WRITE_LOCK ).release();
// then
assertEquals( 0, locks.lockCount() );
}
@Test
public void shouldPresentLockStateInStringRepresentationOfLock() throws Exception
{
// given
LockService locks = new ReentrantLockService();
Lock first, second;
// when
try ( Lock lock = first = locks.acquireNodeLock( 666, LockService.LockType.WRITE_LOCK ) )
{
// then
assertEquals( "LockedNode[id=666; HELD_BY=1*" + Thread.currentThread() + "]", lock.toString() );
// when
try ( Lock inner = second = locks.acquireNodeLock( 666, LockService.LockType.WRITE_LOCK ) )
{
assertEquals( "LockedNode[id=666; HELD_BY=2*" + Thread.currentThread() + "]", lock.toString() );
assertEquals( lock.toString(), inner.toString() );
}
// then
assertEquals( "LockedNode[id=666; HELD_BY=1*" + Thread.currentThread() + "]", lock.toString() );
assertEquals( "LockedNode[id=666; RELEASED]", second.toString() );
}
// then
assertEquals( "LockedNode[id=666; RELEASED]", first.toString() );
assertEquals( "LockedNode[id=666; RELEASED]", second.toString() );
}
private static class LockNode implements ThreadRepository.Task
{
private final LockService locks;
private final long nodeId;
private Lock lock;
LockNode( LockService locks, long nodeId )
{
this.locks = locks;
this.nodeId = nodeId;
}
private final ThreadRepository.Task release = new ThreadRepository.Task()
{
@Override
public void perform() throws Exception
{
lock.release();
}
};
@Override
public void perform() throws Exception
{
this.lock = locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK );
}
}
private static boolean awaitParked( ThreadRepository.ThreadInfo thread, long timeout, TimeUnit unit )
{
boolean parked = false;
for ( long end = System.currentTimeMillis() + unit.toMillis( timeout ); System.currentTimeMillis() < end; )
{
StackTraceElement frame = thread.getStackTrace()[0];
if ( "park".equals( frame.getMethodName() ) && "sun.misc.Unsafe".equals( frame.getClassName() ) )
{
if ( thread.getState().name().endsWith( "WAITING" ) )
{
parked = true;
break;
}
}
}
return parked;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_ReentrantLockServiceTest.java
|
1,484
|
static final class OwnerQueueElement<OWNER>
{
volatile OWNER owner;
int count = 1; // does not need to be volatile, only updated by the owning thread.
OwnerQueueElement( OWNER owner )
{
this.owner = owner;
}
/**
* In the first element, head will point to the next waiting element, and tail is where we enqueue new elements.
* In the waiting elements, head will point to the first element, and tail to the next element.
*/
private OwnerQueueElement<OWNER> head = this, tail = this;
/**
* Return true if the item was enqueued, or false if this LockOwner is dead.
* A dead LockOwner is no longer reachable from the map, and so no longer participates in the lock.
*/
synchronized boolean enqueue( OwnerQueueElement<OWNER> last )
{
if ( owner == null )
{
return false; // don't enqueue into a dead queue
}
last.head = this;
last.tail = this;
tail.tail = last;
this.tail = last;
if ( head == this )
{
head = last;
}
return true;
}
synchronized OWNER dequeue()
{
OwnerQueueElement<OWNER> first = this.head;
(this.head = first.tail).head = this;
first.tail = this;
if ( this.head == this )
{
this.tail = this; // don't leave junk references around!
}
try
{
return (this.owner = first.owner);
}
finally
{
first.owner = null; // mark 'first' as dead.
}
}
@Override
public String toString()
{
return String.format( "%s*%s", count, owner );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_ReentrantLockService.java
|
1,485
|
public final class ReentrantLockService extends AbstractLockService<ReentrantLockService.OwnerQueueElement<Thread>>
{
private final ConcurrentMap<LockedEntity, OwnerQueueElement<Thread>> locks = new ConcurrentHashMap<>();
private final long maxParkNanos;
int lockCount()
{
return locks.size();
}
public ReentrantLockService()
{
this( 1, TimeUnit.MILLISECONDS );
}
public ReentrantLockService( long maxParkTime, TimeUnit unit )
{
this.maxParkNanos = unit.toNanos( maxParkTime );
}
@Override
protected OwnerQueueElement<Thread> acquire( LockedEntity key )
{
OwnerQueueElement<Thread> suggestion = new OwnerQueueElement<>( currentThread() );
for(;;)
{
OwnerQueueElement<Thread> owner = locks.putIfAbsent( key, suggestion );
if ( owner == null )
{ // Our suggestion was accepted, we got the lock
return suggestion;
}
Thread other = owner.owner;
if ( other == currentThread() )
{ // the lock has been handed to us (or we are re-entering), claim it!
owner.count++;
return owner;
}
// Make sure that we only add to the queue once, and if that addition fails (because the queue is dead
// - i.e. has been removed from the map), retry form the top of the loop immediately.
if ( suggestion.head == suggestion ) // true if enqueue() has not been invoked (i.e. first time around)
{ // otherwise it has already been enqueued, and we are in a spurious (or timed) wake up
if ( !owner.enqueue( suggestion ) )
{
continue; // the lock has already been released, the queue is dead, retry!
}
}
parkNanos( key, maxParkNanos );
}
}
@Override
@SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter")
protected void release( LockedEntity key, OwnerQueueElement<Thread> ownerQueueElement )
{
if ( 0 == --ownerQueueElement.count )
{
Thread nextThread;
synchronized ( ownerQueueElement )
{
nextThread = ownerQueueElement.dequeue();
if ( nextThread == currentThread() )
{ // no more threads in the queue, remove this list
locks.remove( key, ownerQueueElement ); // done under synchronization to honour definition of 'dead'
nextThread = null; // to make unpark() a no-op.
}
}
unpark( nextThread );
}
}
/**
* Element in a queue of owners. Contains two fields {@link #head} and {@link #tail} which form the queue.
*
* Example queue with 3 members:
*
* <pre>
* locks -> [H]--+ <+
* [T] | |
* ^| V |
* || [H]-+
* || [T] ^
* || | |
* || V |
* |+->[H]-+
* +---[T]
* </pre>
* @param <OWNER> Type of the object that owns (or wishes to own) the lock.
* In practice this is always {@link Thread}, only a parameter for testing purposes.
*/
static final class OwnerQueueElement<OWNER>
{
volatile OWNER owner;
int count = 1; // does not need to be volatile, only updated by the owning thread.
OwnerQueueElement( OWNER owner )
{
this.owner = owner;
}
/**
* In the first element, head will point to the next waiting element, and tail is where we enqueue new elements.
* In the waiting elements, head will point to the first element, and tail to the next element.
*/
private OwnerQueueElement<OWNER> head = this, tail = this;
/**
* Return true if the item was enqueued, or false if this LockOwner is dead.
* A dead LockOwner is no longer reachable from the map, and so no longer participates in the lock.
*/
synchronized boolean enqueue( OwnerQueueElement<OWNER> last )
{
if ( owner == null )
{
return false; // don't enqueue into a dead queue
}
last.head = this;
last.tail = this;
tail.tail = last;
this.tail = last;
if ( head == this )
{
head = last;
}
return true;
}
synchronized OWNER dequeue()
{
OwnerQueueElement<OWNER> first = this.head;
(this.head = first.tail).head = this;
first.tail = this;
if ( this.head == this )
{
this.tail = this; // don't leave junk references around!
}
try
{
return (this.owner = first.owner);
}
finally
{
first.owner = null; // mark 'first' as dead.
}
}
@Override
public String toString()
{
return String.format( "%s*%s", count, owner );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_ReentrantLockService.java
|
1,486
|
static abstract class MeasuringThread extends Thread
{
final int iterations;
long minTime = Long.MAX_VALUE, maxTime, totalTime;
MeasuringThread( int iterations )
{
this.iterations = iterations;
}
@Override
public final void run()
{
init();
for ( int i = 0; i < iterations; i++ )
{
execute();
}
}
void init()
{
}
void update( long time )
{
minTime = min( minTime, time );
maxTime = max( maxTime, time );
totalTime += time;
}
protected abstract void execute();
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,487
|
static class LockingThread extends MeasuringThread
{
private final LockService locks;
private final long nodeId;
private final boolean reentry;
LockingThread( long nodeId, LockService locks, int lockCount, boolean reentry )
{
super( lockCount );
this.locks = locks;
this.nodeId = nodeId;
this.reentry = reentry;
}
@Override
void init()
{
if ( reentry )
{
locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK );
}
}
@Override
protected void execute()
{
long time = nanoTime();
Lock lock = locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK );
update( nanoTime() - time );
lock.release();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,488
|
REENTRANT_LOCK_SERVICE
{
@Override
LockService create()
{
return new ReentrantLockService();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,489
|
LOCK_MANAGER
{
@Override
LockService create()
{
return new AdaptedLockManager();
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,490
|
REENTRY
{
@Override
void execute( Implementation impl )
{
int minThreads = Integer.getInteger( "minThreads", 1 );
int maxThreads = Integer.getInteger( "maxThreads", cores() * 2 );
int iterations = Integer.getInteger( "iterations", 100 );
int lockCount = Integer.getInteger( "lockCount", 100_000 );
for ( int threads = minThreads; threads <= maxThreads; threads++ )
{
System.out.printf( "=== %s / %s - %s threads ===%n", this, impl, threads );
executeUncontended( impl, threads, iterations, lockCount, true );
}
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,491
|
public abstract class Lock implements AutoCloseable
{
public abstract void release();
@Override
public final void close()
{
release();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_Lock.java
|
1,492
|
UNCONTENDED
{
@Override
void execute( Implementation impl )
{
int minThreads = Integer.getInteger( "minThreads", 1 );
int maxThreads = Integer.getInteger( "maxThreads", cores() * 2 );
int iterations = Integer.getInteger( "iterations", 100 );
int lockCount = Integer.getInteger( "lockCount", 100_000 );
for ( int threads = minThreads; threads <= maxThreads; threads++ )
{
System.out.printf( "=== %s / %s - %s threads ===%n", this, impl, threads );
executeUncontended( impl, threads, iterations, lockCount, false );
}
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,493
|
private class WriteRelease extends Lock
{
private final AbstractLockService.LockedNode resource;
WriteRelease( AbstractLockService.LockedNode resource )
{
this.resource = resource;
}
@Override
public void release()
{
releaseWriteLock( resource, threadMark.get() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,494
|
static class ThreadMark implements Transaction
{
@Override
public void commit()
throws HeuristicMixedException, HeuristicRollbackException, RollbackException, SecurityException,
SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public boolean delistResource( XAResource xaRes, int flag ) throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public boolean enlistResource( XAResource xaRes )
throws IllegalStateException, RollbackException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public int getStatus() throws SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void registerSynchronization( Synchronization synch )
throws IllegalStateException, RollbackException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void rollback() throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,495
|
private final ThreadLocal<Transaction> threadMark = new ThreadLocal<Transaction>(){
@Override
protected Transaction initialValue()
{
return new ThreadMark();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,496
|
private static class AdaptedLockManager extends LockManagerImpl implements LockService
{
private final ThreadLocal<Transaction> threadMark = new ThreadLocal<Transaction>(){
@Override
protected Transaction initialValue()
{
return new ThreadMark();
}
};
AdaptedLockManager()
{
super( new RagManager() );
}
@Override
public Lock acquireNodeLock( long nodeId, LockType type )
{
AbstractLockService.LockedNode resource = new AbstractLockService.LockedNode( nodeId );
getWriteLock( resource, threadMark.get() );
return new WriteRelease( resource );
}
private class WriteRelease extends Lock
{
private final AbstractLockService.LockedNode resource;
WriteRelease( AbstractLockService.LockedNode resource )
{
this.resource = resource;
}
@Override
public void release()
{
releaseWriteLock( resource, threadMark.get() );
}
}
static class ThreadMark implements Transaction
{
@Override
public void commit()
throws HeuristicMixedException, HeuristicRollbackException, RollbackException, SecurityException,
SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public boolean delistResource( XAResource xaRes, int flag ) throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public boolean enlistResource( XAResource xaRes )
throws IllegalStateException, RollbackException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public int getStatus() throws SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void registerSynchronization( Synchronization synch )
throws IllegalStateException, RollbackException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void rollback() throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,497
|
public class LockServiceMicroBenchmark
{
public static void main( String... args )
{
get( Benchmark.class ).execute( get( Implementation.class ) );
}
enum Benchmark
{
UNCONTENDED
{
@Override
void execute( Implementation impl )
{
int minThreads = Integer.getInteger( "minThreads", 1 );
int maxThreads = Integer.getInteger( "maxThreads", cores() * 2 );
int iterations = Integer.getInteger( "iterations", 100 );
int lockCount = Integer.getInteger( "lockCount", 100_000 );
for ( int threads = minThreads; threads <= maxThreads; threads++ )
{
System.out.printf( "=== %s / %s - %s threads ===%n", this, impl, threads );
executeUncontended( impl, threads, iterations, lockCount, false );
}
}
},
REENTRY
{
@Override
void execute( Implementation impl )
{
int minThreads = Integer.getInteger( "minThreads", 1 );
int maxThreads = Integer.getInteger( "maxThreads", cores() * 2 );
int iterations = Integer.getInteger( "iterations", 100 );
int lockCount = Integer.getInteger( "lockCount", 100_000 );
for ( int threads = minThreads; threads <= maxThreads; threads++ )
{
System.out.printf( "=== %s / %s - %s threads ===%n", this, impl, threads );
executeUncontended( impl, threads, iterations, lockCount, true );
}
}
},
HANDOVER
{
@Override
void execute( Implementation impl )
{
int minThreads = Integer.getInteger( "minThreads", 1 );
int maxThreads = Integer.getInteger( "maxThreads", cores() * 2 );
int iterations = Integer.getInteger( "iterations", 100 );
int lockCount = Integer.getInteger( "lockCount", 100_000 );
for ( int threads = minThreads; threads <= maxThreads; threads++ )
{
System.out.printf( "=== %s / %s - %s threads ===%n", this, impl, threads );
executeHandover( impl, threads, iterations, lockCount );
}
}
};
abstract void execute( Implementation impl );
}
enum Implementation
{
LOCK_MANAGER
{
@Override
LockService create()
{
return new AdaptedLockManager();
}
},
REENTRANT_LOCK_SERVICE
{
@Override
LockService create()
{
return new ReentrantLockService();
}
};
abstract LockService create();
}
static class LockingThread extends MeasuringThread
{
private final LockService locks;
private final long nodeId;
private final boolean reentry;
LockingThread( long nodeId, LockService locks, int lockCount, boolean reentry )
{
super( lockCount );
this.locks = locks;
this.nodeId = nodeId;
this.reentry = reentry;
}
@Override
void init()
{
if ( reentry )
{
locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK );
}
}
@Override
protected void execute()
{
long time = nanoTime();
Lock lock = locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK );
update( nanoTime() - time );
lock.release();
}
}
static void executeUncontended( Implementation impl, int threadCount, int iterations, int lockCount,
boolean reentry )
{
for ( int i = 0; i < iterations; i++ )
{
LockService locks = impl.create();
MeasuringThread[] threads = new MeasuringThread[threadCount];
for ( int nodeId = 0; nodeId < threadCount; nodeId++ )
{
threads[nodeId] = new LockingThread( nodeId, locks, lockCount, reentry );
}
execute( threads );
}
}
private static void execute( MeasuringThread[] threads )
{
for ( MeasuringThread thread : threads )
{
thread.start();
}
for ( MeasuringThread thread : threads )
{
try
{
thread.join();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
}
long minTime = Long.MAX_VALUE, maxTime = 0, totalTime = 0;
double count = 0.0;
for ( MeasuringThread thread : threads )
{
minTime = min( minTime, thread.minTime );
maxTime = max( maxTime, thread.maxTime );
totalTime += thread.totalTime;
count += thread.iterations;
}
System.out.printf( "min=%dns; max=%.3fus; total=%.3fms; avg=%.3fns%n",
minTime, maxTime / 1_000.0, totalTime / 1_000_000.0, totalTime / count );
}
static void executeHandover( Implementation impl, int threadCount, int iterations, int lockCount )
{
throw new UnsupportedOperationException( "not implemented" );
}
static <E extends Enum<E>> E get( Class<E> type )
{
try
{
return Enum.valueOf( type, System.getProperty( type.getSimpleName() ) );
}
catch ( IllegalArgumentException e )
{
throw new IllegalArgumentException(
"No such " + type.getSimpleName() + ": " + System.getProperty( type.getSimpleName() ) );
}
catch ( NullPointerException e )
{
throw new IllegalArgumentException( type.getSimpleName() + " not specified." );
}
}
private static int cores()
{
return Runtime.getRuntime().availableProcessors();
}
static abstract class MeasuringThread extends Thread
{
final int iterations;
long minTime = Long.MAX_VALUE, maxTime, totalTime;
MeasuringThread( int iterations )
{
this.iterations = iterations;
}
@Override
public final void run()
{
init();
for ( int i = 0; i < iterations; i++ )
{
execute();
}
}
void init()
{
}
void update( long time )
{
minTime = min( minTime, time );
maxTime = max( maxTime, time );
totalTime += time;
}
protected abstract void execute();
}
private static class AdaptedLockManager extends LockManagerImpl implements LockService
{
private final ThreadLocal<Transaction> threadMark = new ThreadLocal<Transaction>(){
@Override
protected Transaction initialValue()
{
return new ThreadMark();
}
};
AdaptedLockManager()
{
super( new RagManager() );
}
@Override
public Lock acquireNodeLock( long nodeId, LockType type )
{
AbstractLockService.LockedNode resource = new AbstractLockService.LockedNode( nodeId );
getWriteLock( resource, threadMark.get() );
return new WriteRelease( resource );
}
private class WriteRelease extends Lock
{
private final AbstractLockService.LockedNode resource;
WriteRelease( AbstractLockService.LockedNode resource )
{
this.resource = resource;
}
@Override
public void release()
{
releaseWriteLock( resource, threadMark.get() );
}
}
static class ThreadMark implements Transaction
{
@Override
public void commit()
throws HeuristicMixedException, HeuristicRollbackException, RollbackException, SecurityException,
SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public boolean delistResource( XAResource xaRes, int flag ) throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public boolean enlistResource( XAResource xaRes )
throws IllegalStateException, RollbackException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public int getStatus() throws SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void registerSynchronization( Synchronization synch )
throws IllegalStateException, RollbackException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void rollback() throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException
{
throw new UnsupportedOperationException( "not implemented" );
}
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockServiceMicroBenchmark.java
|
1,498
|
public class LockGroupTest
{
@Test
public void shouldReleaseAllLocksWhenExitingTheLockGroupRegion() throws Exception
{
// given
Lock lock1 = mock( Lock.class ), lock2 = mock( Lock.class ), lock3 = mock( Lock.class );
// when
try ( LockGroup locks = new LockGroup() )
{
locks.add( lock1 );
locks.add( lock2 );
locks.add( lock3 );
}
// then
verify( lock1, times( 1 ) ).release();
verify( lock2, times( 1 ) ).release();
verify( lock3, times( 1 ) ).release();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_locking_LockGroupTest.java
|
1,499
|
public class LockGroup implements AutoCloseable
{
private final List<Lock> locks = new ArrayList<>();
public final void add( Lock lock )
{
locks.add( lock );
}
@Override
public void close()
{
for ( Lock lock : locks )
{
lock.release();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_locking_LockGroup.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.