Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
200
|
private static class KnownTxIdCollector implements LogEntryCollector
{
private final Map<Integer,List<LogEntry>> transactions = new HashMap<Integer,List<LogEntry>>();
private final long startTxId;
private int identifier;
private final Map<Long, List<LogEntry>> futureQueue = new HashMap<Long, List<LogEntry>>();
private long nextExpectedTxId;
private LogEntry.Start lastStartEntry;
KnownTxIdCollector( long startTxId )
{
this.startTxId = startTxId;
this.nextExpectedTxId = startTxId;
}
@Override
public int getIdentifier()
{
return identifier;
}
@Override
public boolean hasInFutureQueue()
{
return futureQueue.containsKey( nextExpectedTxId );
}
@Override
public LogEntry.Start getLastStartEntry()
{
return lastStartEntry;
}
@Override
public LogEntry collect( LogEntry entry, LogBuffer target ) throws IOException
{
if ( futureQueue.containsKey( nextExpectedTxId ) )
{
long txId = nextExpectedTxId++;
List<LogEntry> list = futureQueue.remove( txId );
lastStartEntry = (LogEntry.Start)list.get( 0 );
writeToBuffer( list, target );
return commitEntryOf( txId, list );
}
if ( entry instanceof LogEntry.Start )
{
List<LogEntry> list = new LinkedList<LogEntry>();
list.add( entry );
transactions.put( entry.getIdentifier(), list );
}
else if ( entry instanceof LogEntry.Commit )
{
long commitTxId = ((LogEntry.Commit) entry).getTxId();
if ( commitTxId < startTxId ) return null;
identifier = entry.getIdentifier();
List<LogEntry> entries = transactions.get( identifier );
if ( entries == null ) return null;
entries.add( entry );
if ( nextExpectedTxId != startTxId && commitTxId < nextExpectedTxId )
{ // Have returned some previous tx
// If we encounter an already extracted tx in the middle of the stream
// then just ignore it. This can happen when we do log rotation,
// where records are copied over from the active log to the new.
return null;
}
if ( commitTxId != nextExpectedTxId )
{ // There seems to be a hole in the tx stream, or out-of-ordering
futureQueue.put( commitTxId, entries );
return null;
}
writeToBuffer( entries, target );
nextExpectedTxId = commitTxId+1;
lastStartEntry = (LogEntry.Start)entries.get( 0 );
return entry;
}
else if ( entry instanceof LogEntry.Command || entry instanceof LogEntry.Prepare )
{
List<LogEntry> list = transactions.get( entry.getIdentifier() );
// Since we can start reading at any position in the log it might be the case
// that we come across a record which corresponding start record resides
// before the position we started reading from. If that should be the case
// then skip it since it isn't an important record for us here.
if ( list != null )
{
list.add( entry );
}
}
else if ( entry instanceof LogEntry.Done )
{
transactions.remove( entry.getIdentifier() );
}
else
{
throw new RuntimeException( "Unknown entry: " + entry );
}
return null;
}
private LogEntry commitEntryOf( long txId, List<LogEntry> list ) throws IOException
{
for ( LogEntry entry : list )
{
if ( entry instanceof LogEntry.Commit ) return entry;
}
throw new NoSuchTransactionException( txId, "No commit entry in " + list );
}
private void writeToBuffer( List<LogEntry> entries, LogBuffer target ) throws IOException
{
if ( target != null )
{
for ( LogEntry entry : entries )
{
LogIoUtils.writeLogEntry( entry, target );
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogExtractor.java
|
201
|
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
return Command.readCommand( null, null, byteChannel, buffer );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogExtractor.java
|
202
|
{
private final Map<Long, File> activeLogFiles = getActiveLogs( storeDir );
private final long highestLogVersion = max( getHighestHistoryLogVersion( fileSystem, storeDir,
LOGICAL_LOG_DEFAULT_NAME ), maxKey( activeLogFiles ) );
@Override
public ReadableByteChannel getLogicalLogOrMyselfCommitted( long version, long position )
throws IOException
{
File name = getFileName( version );
if ( !fileSystem.fileExists( name ) )
{
name = activeLogFiles.get( version );
if ( name == null ) throw new NoSuchLogVersionException( version );
}
StoreChannel channel = fileSystem.open( name, "r" );
channel.position( position );
return new BufferedFileChannel( channel, monitor );
}
private long maxKey( Map<Long, File> activeLogFiles )
{
long max = 0;
for ( Long key : activeLogFiles.keySet() ) max = max( max, key );
return max;
}
private Map<Long, File> getActiveLogs( File storeDir ) throws IOException
{
Map<Long, File> result = new HashMap<Long, File>();
for ( String postfix : ACTIVE_POSTFIXES )
{
File candidateFile = new File( storeDir, LOGICAL_LOG_DEFAULT_NAME + postfix );
if ( !fileSystem.fileExists( candidateFile ) )
continue;
long[] header = LogIoUtils.readLogHeader( fileSystem, candidateFile );
result.put( header[0], candidateFile );
}
return result;
}
@Override
public File getFileName( long version )
{
return new File( storeDir, LOGICAL_LOG_DEFAULT_NAME + ".v" + version );
}
@Override
public long getHighestLogVersion()
{
return highestLogVersion;
}
@Override
public Long getFirstCommittedTxId( long version )
{
throw new UnsupportedOperationException();
}
@Override
public Long getFirstStartRecordTimestamp( long version )
{
throw new UnsupportedOperationException();
}
@Override
public long getLastCommittedTxId()
{
throw new UnsupportedOperationException();
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + storeDir + "]";
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogExtractor.java
|
203
|
public class LogExtractor
{
private static final String[] ACTIVE_POSTFIXES = { ".1", ".2" };
/**
* If tx range is smaller than this threshold ask the position cache for the
* start position farthest back. Otherwise jump to right log and scan.
*/
private static final int CACHE_FIND_THRESHOLD = 10000;
private final ByteBuffer localBuffer = newLogReaderBuffer();
private ReadableByteChannel source;
private final LogEntryCollector collector;
private long version;
private LogEntry.Commit lastCommitEntry;
private LogEntry.Commit previousCommitEntry;
private final long startTxId;
private long nextExpectedTxId;
private int counter;
private final LogPositionCache cache;
private final LogLoader logLoader;
private final XaCommandFactory commandFactory;
public static class LogPositionCache
{
private final LruCache<Long, TxPosition> txStartPositionCache =
new LruCache<Long, TxPosition>( "Tx start position cache", 10000 );
private final LruCache<Long /*log version*/, Long /*last committed tx*/> logHeaderCache =
new LruCache<Long, Long>( "Log header cache", 1000 );
public void clear()
{
logHeaderCache.clear();
txStartPositionCache.clear();
}
public TxPosition positionOf( long txId )
{
return txStartPositionCache.get( txId );
}
public void putHeader( long logVersion, long previousLogLastCommittedTx )
{
logHeaderCache.put( logVersion, previousLogLastCommittedTx );
}
public Long getHeader( long logVersion )
{
return logHeaderCache.get( logVersion );
}
public void putStartPosition( long txId, TxPosition position )
{
txStartPositionCache.put( txId, position );
}
public TxPosition getStartPosition( long txId )
{
return txStartPositionCache.get( txId );
}
public synchronized TxPosition cacheStartPosition( long txId, LogEntry.Start startEntry, long logVersion )
{
if ( startEntry.getStartPosition() == -1 )
{
throw new RuntimeException( "StartEntry.position is " + startEntry.getStartPosition() );
}
TxPosition result = new TxPosition( logVersion, startEntry.getMasterId(), startEntry.getIdentifier(),
startEntry.getStartPosition(), startEntry.getChecksum() );
putStartPosition( txId, result );
return result;
}
}
public interface LogLoader
{
ReadableByteChannel getLogicalLogOrMyselfCommitted( long version, long position ) throws IOException;
long getHighestLogVersion();
File getFileName( long version );
/**
* @param version the log version to get first committed tx for.
* @return the first committed transaction id for the log with {@code version}.
* If that log doesn't exist {@code null} is returned.
*/
Long getFirstCommittedTxId( long version );
/**
* @return the first committed transaction id for the log with {@code version}.
* If that log doesn't exist {@code null} is returned.
*/
long getLastCommittedTxId();
/**
* @param version the log version to get first tx timestamp for.
* @return the timestamp for the start record for the first encountered transaction
* in the log {@code version}.
*/
Long getFirstStartRecordTimestamp( long version ) throws IOException;
}
public static ByteBuffer newLogReaderBuffer()
{
return ByteBuffer.allocate( 9 + Xid.MAXGTRIDSIZE + Xid.MAXBQUALSIZE * 10 );
}
public LogExtractor( LogPositionCache cache, LogLoader logLoader,
XaCommandFactory commandFactory, long startTxId, long endTxIdHint ) throws IOException
{
this.cache = cache;
this.logLoader = logLoader;
this.commandFactory = commandFactory;
this.startTxId = startTxId;
this.nextExpectedTxId = startTxId;
long diff = endTxIdHint-startTxId + 1/*since they are inclusive*/;
if ( diff < CACHE_FIND_THRESHOLD )
{ // Find it from cache, we must check with all the requested transactions
// because the first committed transaction doesn't necessarily have its
// start record before the others.
TxPosition earliestPosition = getEarliestStartPosition( startTxId, endTxIdHint );
if ( earliestPosition != null )
{
this.version = earliestPosition.version;
this.source = logLoader.getLogicalLogOrMyselfCommitted( version, earliestPosition.position );
}
}
if ( source == null )
{ // Find the start position by jumping to the right log and scan linearly.
// for consecutive transaction there's no scan needed, only the first one.
this.version = findLogContainingTxId( startTxId )[0];
this.source = logLoader.getLogicalLogOrMyselfCommitted( version, 0 );
// To get to the right position to start reading entries from
readAndAssertLogHeader( localBuffer, source, version );
}
this.collector = new KnownTxIdCollector( startTxId );
}
private TxPosition getEarliestStartPosition( long startTxId, long endTxIdHint )
{
TxPosition earliest = null;
for ( long txId = startTxId; txId <= endTxIdHint; txId++ )
{
TxPosition position = cache.positionOf( txId );
if ( position == null ) return null;
if ( earliest == null || position.earlierThan( earliest ) )
{
earliest = position;
}
}
return earliest;
}
/**
* @return the txId for the extracted tx. Or -1 if end-of-stream was reached.
* @throws RuntimeException if there was something unexpected with the stream.
*/
public long extractNext( LogBuffer target ) throws IOException
{
try
{
while ( this.version <= logLoader.getHighestLogVersion() )
{
long result = collectNextFromCurrentSource( target );
if ( result != -1 )
{
// TODO Should be assertions?
if ( previousCommitEntry != null && result == previousCommitEntry.getTxId() ) continue;
if ( result != nextExpectedTxId )
{
throw new RuntimeException( "Expected txId " + nextExpectedTxId + ", but got " + result +
" (starting from " + startTxId + ")" + " " + counter + ", "
+ previousCommitEntry + ", " + lastCommitEntry );
}
nextExpectedTxId++;
counter++;
return result;
}
if ( this.version < logLoader.getHighestLogVersion() )
{
continueInNextLog();
}
else break;
}
return -1;
}
catch ( IOException e )
{
throw e;
}
catch ( Exception e )
{
// Something is wrong with the cached tx start position for this (expected) tx,
// remove it from cache so that next request will have to bypass the cache
cache.clear();
throw Exceptions.launderedException( e );
}
}
private void continueInNextLog() throws IOException
{
ensureSourceIsClosed();
this.source = logLoader.getLogicalLogOrMyselfCommitted( ++version, 0 );
readAndAssertLogHeader( localBuffer, source, version ); // To get to the right position to start reading entries from
}
private long collectNextFromCurrentSource( LogBuffer target ) throws IOException
{
LogEntry entry = null;
while ( collector.hasInFutureQueue() || // if something in queue then don't read next entry
(entry = LogIoUtils.readEntry( localBuffer, source, commandFactory )) != null )
{
LogEntry foundEntry = collector.collect( entry, target );
if ( foundEntry != null )
{ // It just wrote the transaction, w/o the done record though. Add it
previousCommitEntry = lastCommitEntry;
LogIoUtils.writeLogEntry( new LogEntry.Done( collector.getIdentifier() ), target );
lastCommitEntry = (LogEntry.Commit)foundEntry;
return lastCommitEntry.getTxId();
}
}
return -1;
}
public void close()
{
ensureSourceIsClosed();
}
@Override
protected void finalize() throws Throwable
{
ensureSourceIsClosed();
}
private void ensureSourceIsClosed()
{
try
{
if ( source != null )
{
source.close();
source = null;
}
}
catch ( IOException e )
{ // OK?
System.out.println( "Couldn't close logical after extracting transactions from it" );
e.printStackTrace();
}
}
public LogEntry.Commit getLastCommitEntry()
{
return lastCommitEntry;
}
public long getLastTxChecksum()
{
return getLastStartEntry().getChecksum();
}
public Start getLastStartEntry()
{
return collector.getLastStartEntry();
}
private long[] findLogContainingTxId( long txId ) throws IOException
{
long version = logLoader.getHighestLogVersion();
long committedTx = 1;
while ( version >= 0 )
{
Long cachedLastTx = cache.getHeader( version );
if ( cachedLastTx != null )
{
committedTx = cachedLastTx.longValue();
}
else
{
ReadableByteChannel logChannel = logLoader.getLogicalLogOrMyselfCommitted( version, 0 );
try
{
ByteBuffer buf = ByteBuffer.allocate( 16 );
long[] header = readAndAssertLogHeader( buf, logChannel, version );
committedTx = header[1];
cache.putHeader( version, committedTx );
}
finally
{
logChannel.close();
}
}
if ( committedTx < txId )
{
break;
}
version--;
}
if ( version == -1 )
{
throw new NoSuchTransactionException( txId, "started at " + logLoader.getHighestLogVersion() + " searching backwards" );
}
return new long[] { version, committedTx };
}
private interface LogEntryCollector
{
LogEntry collect( LogEntry entry, LogBuffer target ) throws IOException;
LogEntry.Start getLastStartEntry();
boolean hasInFutureQueue();
int getIdentifier();
}
private static class KnownTxIdCollector implements LogEntryCollector
{
private final Map<Integer,List<LogEntry>> transactions = new HashMap<Integer,List<LogEntry>>();
private final long startTxId;
private int identifier;
private final Map<Long, List<LogEntry>> futureQueue = new HashMap<Long, List<LogEntry>>();
private long nextExpectedTxId;
private LogEntry.Start lastStartEntry;
KnownTxIdCollector( long startTxId )
{
this.startTxId = startTxId;
this.nextExpectedTxId = startTxId;
}
@Override
public int getIdentifier()
{
return identifier;
}
@Override
public boolean hasInFutureQueue()
{
return futureQueue.containsKey( nextExpectedTxId );
}
@Override
public LogEntry.Start getLastStartEntry()
{
return lastStartEntry;
}
@Override
public LogEntry collect( LogEntry entry, LogBuffer target ) throws IOException
{
if ( futureQueue.containsKey( nextExpectedTxId ) )
{
long txId = nextExpectedTxId++;
List<LogEntry> list = futureQueue.remove( txId );
lastStartEntry = (LogEntry.Start)list.get( 0 );
writeToBuffer( list, target );
return commitEntryOf( txId, list );
}
if ( entry instanceof LogEntry.Start )
{
List<LogEntry> list = new LinkedList<LogEntry>();
list.add( entry );
transactions.put( entry.getIdentifier(), list );
}
else if ( entry instanceof LogEntry.Commit )
{
long commitTxId = ((LogEntry.Commit) entry).getTxId();
if ( commitTxId < startTxId ) return null;
identifier = entry.getIdentifier();
List<LogEntry> entries = transactions.get( identifier );
if ( entries == null ) return null;
entries.add( entry );
if ( nextExpectedTxId != startTxId && commitTxId < nextExpectedTxId )
{ // Have returned some previous tx
// If we encounter an already extracted tx in the middle of the stream
// then just ignore it. This can happen when we do log rotation,
// where records are copied over from the active log to the new.
return null;
}
if ( commitTxId != nextExpectedTxId )
{ // There seems to be a hole in the tx stream, or out-of-ordering
futureQueue.put( commitTxId, entries );
return null;
}
writeToBuffer( entries, target );
nextExpectedTxId = commitTxId+1;
lastStartEntry = (LogEntry.Start)entries.get( 0 );
return entry;
}
else if ( entry instanceof LogEntry.Command || entry instanceof LogEntry.Prepare )
{
List<LogEntry> list = transactions.get( entry.getIdentifier() );
// Since we can start reading at any position in the log it might be the case
// that we come across a record which corresponding start record resides
// before the position we started reading from. If that should be the case
// then skip it since it isn't an important record for us here.
if ( list != null )
{
list.add( entry );
}
}
else if ( entry instanceof LogEntry.Done )
{
transactions.remove( entry.getIdentifier() );
}
else
{
throw new RuntimeException( "Unknown entry: " + entry );
}
return null;
}
private LogEntry commitEntryOf( long txId, List<LogEntry> list ) throws IOException
{
for ( LogEntry entry : list )
{
if ( entry instanceof LogEntry.Commit ) return entry;
}
throw new NoSuchTransactionException( txId, "No commit entry in " + list );
}
private void writeToBuffer( List<LogEntry> entries, LogBuffer target ) throws IOException
{
if ( target != null )
{
for ( LogEntry entry : entries )
{
LogIoUtils.writeLogEntry( entry, target );
}
}
}
}
public static class TxPosition
{
final long version;
final int masterId;
final int identifier;
final long position;
final long checksum;
public TxPosition( long version, int masterId, int identifier, long position, long checksum )
{
this.version = version;
this.masterId = masterId;
this.identifier = identifier;
this.position = position;
this.checksum = checksum;
}
public boolean earlierThan( TxPosition other )
{
if ( version < other.version ) return true;
if ( version > other.version ) return false;
return position < other.position;
}
@Override
public String toString()
{
return "TxPosition[version:" + version + ", pos:" + position + "]";
}
}
public static LogExtractor from( FileSystemAbstraction fileSystem, File storeDir, ByteCounterMonitor monitor ) throws IOException
{
return from( fileSystem, storeDir, NIONEO_COMMAND_FACTORY, monitor );
}
public static LogExtractor from( FileSystemAbstraction fileSystem, File storeDir, ByteCounterMonitor monitor,
long startTxId ) throws IOException
{
return from( fileSystem, storeDir, NIONEO_COMMAND_FACTORY, monitor, startTxId );
}
public static LogExtractor from( FileSystemAbstraction fileSystem, File storeDir,
XaCommandFactory commandFactory, ByteCounterMonitor monitor ) throws IOException
{
// 2 is a "magic" first tx :)
return from( fileSystem, storeDir, commandFactory, monitor, 2 );
}
public static LogExtractor from( final FileSystemAbstraction fileSystem, final File storeDir,
XaCommandFactory commandFactory, final ByteCounterMonitor monitor, long startTxId ) throws IOException
{
LogLoader loader = new LogLoader()
{
private final Map<Long, File> activeLogFiles = getActiveLogs( storeDir );
private final long highestLogVersion = max( getHighestHistoryLogVersion( fileSystem, storeDir,
LOGICAL_LOG_DEFAULT_NAME ), maxKey( activeLogFiles ) );
@Override
public ReadableByteChannel getLogicalLogOrMyselfCommitted( long version, long position )
throws IOException
{
File name = getFileName( version );
if ( !fileSystem.fileExists( name ) )
{
name = activeLogFiles.get( version );
if ( name == null ) throw new NoSuchLogVersionException( version );
}
StoreChannel channel = fileSystem.open( name, "r" );
channel.position( position );
return new BufferedFileChannel( channel, monitor );
}
private long maxKey( Map<Long, File> activeLogFiles )
{
long max = 0;
for ( Long key : activeLogFiles.keySet() ) max = max( max, key );
return max;
}
private Map<Long, File> getActiveLogs( File storeDir ) throws IOException
{
Map<Long, File> result = new HashMap<Long, File>();
for ( String postfix : ACTIVE_POSTFIXES )
{
File candidateFile = new File( storeDir, LOGICAL_LOG_DEFAULT_NAME + postfix );
if ( !fileSystem.fileExists( candidateFile ) )
continue;
long[] header = LogIoUtils.readLogHeader( fileSystem, candidateFile );
result.put( header[0], candidateFile );
}
return result;
}
@Override
public File getFileName( long version )
{
return new File( storeDir, LOGICAL_LOG_DEFAULT_NAME + ".v" + version );
}
@Override
public long getHighestLogVersion()
{
return highestLogVersion;
}
@Override
public Long getFirstCommittedTxId( long version )
{
throw new UnsupportedOperationException();
}
@Override
public Long getFirstStartRecordTimestamp( long version )
{
throw new UnsupportedOperationException();
}
@Override
public long getLastCommittedTxId()
{
throw new UnsupportedOperationException();
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + storeDir + "]";
}
};
return new LogExtractor( new LogPositionCache(), loader, commandFactory, startTxId, Long.MAX_VALUE );
}
public static final XaCommandFactory NIONEO_COMMAND_FACTORY = new XaCommandFactory()
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
return Command.readCommand( null, null, byteChannel, buffer );
}
};
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogExtractor.java
|
204
|
public static class TwoPhaseCommit extends Commit
{
TwoPhaseCommit( int identifier, long txId, long timeWritten )
{
super( identifier, txId, timeWritten, "2PC" );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
205
|
public static class
Start extends LogEntry
{
private final Xid xid;
private final int masterId;
private final int myId;
private final long timeWritten;
private final long lastCommittedTxWhenTransactionStarted;
private long startPosition;
Start( Xid xid, int identifier, int masterId, int myId, long startPosition, long timeWritten,
long lastCommittedTxWhenTransactionStarted )
{
super( identifier );
this.xid = xid;
this.masterId = masterId;
this.myId = myId;
this.startPosition = startPosition;
this.timeWritten = timeWritten;
this.lastCommittedTxWhenTransactionStarted = lastCommittedTxWhenTransactionStarted;
}
public Xid getXid()
{
return xid;
}
public int getMasterId()
{
return masterId;
}
public int getLocalId()
{
return myId;
}
public long getStartPosition()
{
return startPosition;
}
void setStartPosition( long position )
{
this.startPosition = position;
}
public long getTimeWritten()
{
return timeWritten;
}
public long getLastCommittedTxWhenTransactionStarted()
{
return lastCommittedTxWhenTransactionStarted;
}
/**
* @return combines necessary state to get a unique checksum to identify this transaction uniquely.
*/
public long getChecksum()
{
// [4 bits combined masterId/myId][4 bits xid hashcode, which combines time/randomness]
long lowBits = xid.hashCode();
long highBits = masterId*37 + myId;
return (highBits << 32) | (lowBits & 0xFFFFFFFFL);
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return "Start[" + getIdentifier() + ",xid=" + xid + ",master=" + masterId + ",me=" + myId + ",time=" +
timestamp( timeWritten, timeZone ) + ",lastCommittedTxWhenTransactionStarted="+
lastCommittedTxWhenTransactionStarted+"]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
206
|
public static class Prepare extends LogEntry
{
private final long timeWritten;
Prepare( int identifier, long timeWritten )
{
super( identifier );
this.timeWritten = timeWritten;
}
public long getTimeWritten()
{
return timeWritten;
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return "Prepare[" + getIdentifier() + ", " + timestamp( timeWritten, timeZone ) + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
207
|
public static class Done extends LogEntry
{
Done( int identifier )
{
super( identifier );
}
@Override
public String toString()
{
return "Done[" + getIdentifier() + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
208
|
public class InterceptingXaLogicalLog extends XaLogicalLog
{
private final XaDataSource ds;
private final TransactionInterceptorProviders providers;
public InterceptingXaLogicalLog( File fileName, XaResourceManager xaRm,
XaCommandFactory cf, XaTransactionFactory xaTf,
TransactionInterceptorProviders providers,
Monitors monitors, FileSystemAbstraction fileSystem, Logging logging,
LogPruneStrategy pruneStrategy, TransactionStateFactory stateFactory,
KernelHealth kernelHealth, long rotateAtSize, InjectedTransactionValidator injectedTxValidator )
{
super( fileName, xaRm, cf, xaTf, fileSystem, monitors, logging, pruneStrategy,
stateFactory, kernelHealth, rotateAtSize, injectedTxValidator );
this.providers = providers;
this.ds = xaRm.getDataSource();
}
@Override
protected LogDeserializer getLogDeserializer( ReadableByteChannel byteChannel )
{
// This is created every time because transaction interceptors can be stateful
final TransactionInterceptor interceptor = providers.resolveChain( ds );
LogDeserializer toReturn = new LogDeserializer( byteChannel, bufferMonitor )
{
@Override
protected void intercept( List<LogEntry> entries )
{
for ( LogEntry entry : entries )
{
if ( entry instanceof LogEntry.Command )
{
LogEntry.Command commandEntry = (LogEntry.Command) entry;
if ( commandEntry.getXaCommand() instanceof Command )
{
( (Command) commandEntry.getXaCommand() ).accept( interceptor );
}
}
else if ( entry instanceof LogEntry.Start )
{
interceptor.setStartEntry( (LogEntry.Start) entry );
}
else if ( entry instanceof LogEntry.Commit )
{
interceptor.setCommitEntry( (LogEntry.Commit) entry );
}
}
interceptor.complete();
}
};
return toReturn;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_InterceptingXaLogicalLog.java
|
209
|
public static abstract class Commit extends LogEntry
{
private final long txId;
private final long timeWritten;
protected final String name;
Commit( int identifier, long txId, long timeWritten, String name )
{
super( identifier );
this.txId = txId;
this.timeWritten = timeWritten;
this.name = name;
}
public long getTxId()
{
return txId;
}
public long getTimeWritten()
{
return timeWritten;
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return name + "[" + getIdentifier() + ", txId=" + getTxId() + ", " + timestamp( getTimeWritten(), timeZone ) + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
210
|
public static class Command extends LogEntry
{
private final XaCommand command;
Command( int identifier, XaCommand command )
{
super( identifier );
this.command = command;
}
public XaCommand getXaCommand()
{
return command;
}
@Override
public String toString()
{
return "Command[" + getIdentifier() + ", " + command + "]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
211
|
public abstract class LogEntry
{
/* version 1 as of 2011-02-22
* version 2 as of 2011-10-17
* version 3 as of 2013-02-09: neo4j 2.0 Labels & Indexing
*/
static final byte CURRENT_VERSION = (byte) 3;
// empty record due to memory mapped file
public static final byte EMPTY = (byte) 0;
public static final byte TX_START = (byte) 1;
public static final byte TX_PREPARE = (byte) 2;
public static final byte COMMAND = (byte) 3;
public static final byte DONE = (byte) 4;
public static final byte TX_1P_COMMIT = (byte) 5;
public static final byte TX_2P_COMMIT = (byte) 6;
private int identifier;
LogEntry( int identifier )
{
this.identifier = identifier;
}
public int getIdentifier()
{
return identifier;
}
public String toString( TimeZone timeZone )
{
return toString();
}
public static class
Start extends LogEntry
{
private final Xid xid;
private final int masterId;
private final int myId;
private final long timeWritten;
private final long lastCommittedTxWhenTransactionStarted;
private long startPosition;
Start( Xid xid, int identifier, int masterId, int myId, long startPosition, long timeWritten,
long lastCommittedTxWhenTransactionStarted )
{
super( identifier );
this.xid = xid;
this.masterId = masterId;
this.myId = myId;
this.startPosition = startPosition;
this.timeWritten = timeWritten;
this.lastCommittedTxWhenTransactionStarted = lastCommittedTxWhenTransactionStarted;
}
public Xid getXid()
{
return xid;
}
public int getMasterId()
{
return masterId;
}
public int getLocalId()
{
return myId;
}
public long getStartPosition()
{
return startPosition;
}
void setStartPosition( long position )
{
this.startPosition = position;
}
public long getTimeWritten()
{
return timeWritten;
}
public long getLastCommittedTxWhenTransactionStarted()
{
return lastCommittedTxWhenTransactionStarted;
}
/**
* @return combines necessary state to get a unique checksum to identify this transaction uniquely.
*/
public long getChecksum()
{
// [4 bits combined masterId/myId][4 bits xid hashcode, which combines time/randomness]
long lowBits = xid.hashCode();
long highBits = masterId*37 + myId;
return (highBits << 32) | (lowBits & 0xFFFFFFFFL);
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return "Start[" + getIdentifier() + ",xid=" + xid + ",master=" + masterId + ",me=" + myId + ",time=" +
timestamp( timeWritten, timeZone ) + ",lastCommittedTxWhenTransactionStarted="+
lastCommittedTxWhenTransactionStarted+"]";
}
}
public static class Prepare extends LogEntry
{
private final long timeWritten;
Prepare( int identifier, long timeWritten )
{
super( identifier );
this.timeWritten = timeWritten;
}
public long getTimeWritten()
{
return timeWritten;
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return "Prepare[" + getIdentifier() + ", " + timestamp( timeWritten, timeZone ) + "]";
}
}
public static abstract class Commit extends LogEntry
{
private final long txId;
private final long timeWritten;
protected final String name;
Commit( int identifier, long txId, long timeWritten, String name )
{
super( identifier );
this.txId = txId;
this.timeWritten = timeWritten;
this.name = name;
}
public long getTxId()
{
return txId;
}
public long getTimeWritten()
{
return timeWritten;
}
@Override
public String toString()
{
return toString( Format.DEFAULT_TIME_ZONE );
}
@Override
public String toString( TimeZone timeZone )
{
return name + "[" + getIdentifier() + ", txId=" + getTxId() + ", " + timestamp( getTimeWritten(), timeZone ) + "]";
}
}
public static class OnePhaseCommit extends Commit
{
OnePhaseCommit( int identifier, long txId, long timeWritten )
{
super( identifier, txId, timeWritten, "1PC" );
}
}
public static class TwoPhaseCommit extends Commit
{
TwoPhaseCommit( int identifier, long txId, long timeWritten )
{
super( identifier, txId, timeWritten, "2PC" );
}
}
public static class Done extends LogEntry
{
Done( int identifier )
{
super( identifier );
}
@Override
public String toString()
{
return "Done[" + getIdentifier() + "]";
}
}
public static class Command extends LogEntry
{
private final XaCommand command;
Command( int identifier, XaCommand command )
{
super( identifier );
this.command = command;
}
public XaCommand getXaCommand()
{
return command;
}
@Override
public String toString()
{
return "Command[" + getIdentifier() + ", " + command + "]";
}
}
public void setIdentifier( int newXidIdentifier )
{
identifier = newXidIdentifier;
}
public String timestamp( long timeWritten, TimeZone timeZone )
{
return Format.date( timeWritten, timeZone ) + "/" + timeWritten;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogEntry.java
|
212
|
return logicalLog.createLogWriter( new Function<Config, File>(){
@Override
public File apply( Config config )
{
return new File(testDir.directory(), "my.log");
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogBackedXaDataSourceLogBufferFactoryTest.java
|
213
|
{
private XaLogicalLog logicalLog = new XaLogicalLog( new File( testDir.directory(), "my.log" ), null, null,
null, new DefaultFileSystemAbstraction(), new Monitors(), new TestLogging(), null, null,
mock( KernelHealth.class ), 100, null );
@Override
public XaConnection getXaConnection()
{
return null;
}
@Override
public LogBufferFactory createLogBufferFactory()
{
return logicalLog.createLogWriter( new Function<Config, File>(){
@Override
public File apply( Config config )
{
return new File(testDir.directory(), "my.log");
}
} );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogBackedXaDataSourceLogBufferFactoryTest.java
|
214
|
public class LogBackedXaDataSourceLogBufferFactoryTest
{
@Rule
public TargetDirectory.TestDirectory testDir = TargetDirectory.testDirForTest( getClass() );
@Test
public void shouldAllowWritingLogicalLog() throws Exception
{
// Given
ByteBuffer scratch = ByteBuffer.allocate( 1024 );
LogBackedXaDataSource ds = new LogBackedXaDataSource("irrelephant".getBytes(), "irrelephant")
{
private XaLogicalLog logicalLog = new XaLogicalLog( new File( testDir.directory(), "my.log" ), null, null,
null, new DefaultFileSystemAbstraction(), new Monitors(), new TestLogging(), null, null,
mock( KernelHealth.class ), 100, null );
@Override
public XaConnection getXaConnection()
{
return null;
}
@Override
public LogBufferFactory createLogBufferFactory()
{
return logicalLog.createLogWriter( new Function<Config, File>(){
@Override
public File apply( Config config )
{
return new File(testDir.directory(), "my.log");
}
} );
}
};
LogBufferFactory logBufferFactory = ds.createLogBufferFactory();
// When
LogBuffer logFile = null;
try
{
logFile = logBufferFactory.createActiveLogFile(
new Config(stringMap( store_dir.name(), testDir.absolutePath())), -1 );
logFile.putLong( 1337l );
logFile.force();
// Then the header should be correct
StoreChannel channel = logFile.getFileChannel();
channel.position( 0 );
long[] headerLongs = LogIoUtils.readLogHeader( scratch, channel, true );
assertThat(headerLongs[0], equalTo(0l));
assertThat(headerLongs[1], equalTo(-1l));
// And the data I wrote should immediately follow
scratch.clear();
channel.read( scratch );
scratch.flip();
assertThat(scratch.getLong(), equalTo(1337l));
} finally
{
if (logFile != null)
logFile.getFileChannel().close();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_xaframework_LogBackedXaDataSourceLogBufferFactoryTest.java
|
215
|
public static abstract class Configuration
{
// TODO This config should be split into a boolean and a string (keep_logical_logs vs kept_logical_logs)
public static final Setting<String> keep_logical_logs = GraphDatabaseSettings.keep_logical_logs;
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogBackedXaDataSource.java
|
216
|
public abstract class LogBackedXaDataSource extends XaDataSource
{
public static abstract class Configuration
{
// TODO This config should be split into a boolean and a string (keep_logical_logs vs kept_logical_logs)
public static final Setting<String> keep_logical_logs = GraphDatabaseSettings.keep_logical_logs;
}
private XaLogicalLog logicalLog;
public LogBackedXaDataSource( byte branchId[], String name)
{
super( branchId, name );
}
/**
* Sets the {@link XaLogicalLog} at creation time (in constructor). It is
* done with this method because it can be so problematic in so many ways
* to have a subclass pass in this to the constructor.
* @param logicalLog the {@link XaLogicalLog} to set.
*/
protected void setLogicalLogAtCreationTime( XaLogicalLog logicalLog )
{
if ( this.logicalLog != null )
{
throw new RuntimeException( "Logical log already set for " + this );
}
this.logicalLog = logicalLog;
}
@Override
public void stop()
{
if ( logicalLog == null )
{
return;
}
try
{
logicalLog.close();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
logicalLog = null;
}
@Override
public boolean deleteLogicalLog( long version )
{
return logicalLog.deleteLogicalLog( version );
}
@Override
public ReadableByteChannel getLogicalLog( long version ) throws IOException
{
return logicalLog.getLogicalLogOrMyselfCommitted( version, 0 );
}
@Override
public long getLogicalLogLength( long version )
{
return logicalLog.getLogicalLogLength( version );
}
@Override
public boolean hasLogicalLog( long version )
{
return logicalLog.hasLogicalLog( version );
}
@Override
public long rotateLogicalLog() throws IOException
{
// Go through XaResourceManager so that all paths which rotates the
// logical log will go through its lock
return getXaContainer().getResourceManager().rotateLogicalLog();
}
@Override
public void setAutoRotate( boolean rotate )
{
logicalLog.setAutoRotateLogs( rotate );
}
@Override
public void setLogicalLogTargetSize( long size )
{
logicalLog.setLogicalLogTargetSize( size );
}
@Override
public File getFileName( long version )
{
return logicalLog.getFileName( version );
}
@Override
public ReadableByteChannel getPreparedTransaction( int identifier ) throws IOException
{
return logicalLog.getPreparedTransaction( identifier );
}
@Override
public void getPreparedTransaction( int identifier, LogBuffer targetBuffer ) throws IOException
{
logicalLog.getPreparedTransaction( identifier, targetBuffer );
}
@Override
public Pair<Integer,Long> getMasterForCommittedTx( long txId ) throws IOException
{
return logicalLog.getMasterForCommittedTransaction( txId );
}
@Override
public LogExtractor getLogExtractor( long startTxId, long endTxIdHint ) throws IOException
{
return logicalLog.getLogExtractor( startTxId, endTxIdHint );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_LogBackedXaDataSource.java
|
217
|
{
@Override
protected void intercept( List<LogEntry> entries )
{
for ( LogEntry entry : entries )
{
if ( entry instanceof LogEntry.Command )
{
LogEntry.Command commandEntry = (LogEntry.Command) entry;
if ( commandEntry.getXaCommand() instanceof Command )
{
( (Command) commandEntry.getXaCommand() ).accept( interceptor );
}
}
else if ( entry instanceof LogEntry.Start )
{
interceptor.setStartEntry( (LogEntry.Start) entry );
}
else if ( entry instanceof LogEntry.Commit )
{
interceptor.setCommitEntry( (LogEntry.Commit) entry );
}
}
interceptor.complete();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_InterceptingXaLogicalLog.java
|
218
|
class RWLock implements Visitor<LineLogger, RuntimeException>
{
private final Object resource; // the resource this RWLock locks
private final LinkedList<WaitElement> waitingThreadList = new LinkedList<>();
private final ArrayMap<Transaction,TxLockElement> txLockElementMap = new ArrayMap<>( (byte)5, false, true );
private final RagManager ragManager;
// access to these is guarded by synchronized blocks
private int totalReadCount;
private int totalWriteCount;
private int marked; // synch helper in LockManager
RWLock( Object resource, RagManager ragManager )
{
this.resource = resource;
this.ragManager = ragManager;
}
// keeps track of a transactions read and write lock count on this RWLock
private static class TxLockElement
{
private final Transaction tx;
// access to these is guarded by synchronized blocks
private int readCount;
private int writeCount;
private boolean movedOn;
TxLockElement( Transaction tx )
{
this.tx = tx;
}
boolean isFree()
{
return readCount == 0 && writeCount == 0;
}
}
// keeps track of what type of lock a thread is waiting for
private static class WaitElement
{
private final TxLockElement element;
private final LockType lockType;
private final Thread waitingThread;
private final long since = System.currentTimeMillis();
WaitElement( TxLockElement element, LockType lockType, Thread thread )
{
this.element = element;
this.lockType = lockType;
this.waitingThread = thread;
}
}
synchronized void mark()
{
this.marked++;
}
synchronized boolean isMarked()
{
return marked > 0;
}
/**
* Tries to acquire read lock for a given transaction. If
* <CODE>this.writeCount</CODE> is greater than the currents tx's write
* count the transaction has to wait and the {@link RagManager#checkWaitOn}
* method is invoked for deadlock detection.
* <p>
* If the lock can be acquired the lock count is updated on <CODE>this</CODE>
* and the transaction lock element (tle).
*
* @throws DeadlockDetectedException
* if a deadlock is detected
*/
synchronized void acquireReadLock( Transaction tx ) throws DeadlockDetectedException
{
TxLockElement tle = getOrCreateLockElement( tx );
try
{
tle.movedOn = false;
while ( totalWriteCount > tle.writeCount )
{
deadlockGuardedWait( tx, tle, READ );
}
registerReadLockAcquired( tx, tle );
}
finally
{
// if deadlocked, remove marking so lock is removed when empty
tle.movedOn = true;
marked--;
}
}
synchronized boolean tryAcquireReadLock( Transaction tx )
{
TxLockElement tle = getOrCreateLockElement( tx );
try
{
tle.movedOn = false;
if ( totalWriteCount > tle.writeCount )
{
return false;
}
registerReadLockAcquired( tx, tle );
return true;
}
finally
{
// if deadlocked, remove marking so lock is removed when empty
tle.movedOn = true;
marked--;
}
}
/**
* Releases the read lock held by the provided transaction. If it is null then
* an attempt to acquire the current transaction will be made. This is to
* make safe calling the method from the context of an
* <code>afterCompletion()</code> hook where the tx is locally stored and
* not necessarily available through the tm. If there are waiting
* transactions in the queue they will be interrupted if they can acquire
* the lock.
*/
synchronized void releaseReadLock( Transaction tx ) throws LockNotFoundException
{
TxLockElement tle = getLockElement( tx );
if ( tle.readCount == 0 )
{
throw new LockNotFoundException( "" + tx + " don't have readLock" );
}
totalReadCount--;
tle.readCount--;
if ( tle.isFree() )
{
if ( !this.isMarked() )
{
txLockElementMap.remove( tx );
}
ragManager.lockReleased( this, tx );
}
if ( waitingThreadList.size() > 0 )
{
WaitElement we = waitingThreadList.getLast();
if ( we.lockType == LockType.WRITE )
{
// this one is tricky...
// if readCount > 0 we either have to find a waiting read lock
// in the queue or a waiting write lock that has all read
// locks, if none of these are found it means that there
// is a (are) thread(s) that will release read lock(s) in the
// near future...
if ( totalReadCount == we.element.readCount )
{
// found a write lock with all read locks
waitingThreadList.removeLast();
if ( !we.element.movedOn )
{
we.waitingThread.interrupt();
}
}
else
{
ListIterator<WaitElement> listItr = waitingThreadList.listIterator(
waitingThreadList.lastIndexOf( we ) );
// hm am I doing the first all over again?
// think I am if cursor is at lastIndex + 0.5 oh well...
while ( listItr.hasPrevious() )
{
we = listItr.previous();
if ( we.lockType == LockType.WRITE && totalReadCount == we.element.readCount )
{
// found a write lock with all read locks
listItr.remove();
if ( !we.element.movedOn )
{
we.waitingThread.interrupt();
// ----
break;
}
}
else if ( we.lockType == LockType.READ )
{
// found a read lock, let it do the job...
listItr.remove();
if ( !we.element.movedOn )
{
we.waitingThread.interrupt();
}
}
}
}
}
else
{
// some thread may have the write lock and released a read lock
// if writeCount is down to zero we can interrupt the waiting
// read lock
if ( totalWriteCount == 0 )
{
waitingThreadList.removeLast();
if ( !we.element.movedOn )
{
we.waitingThread.interrupt();
}
}
}
}
}
/**
* Calls {@link #acquireWriteLock(Transaction)} with the
* transaction associated with the current thread.
* @throws DeadlockDetectedException
*/
void acquireWriteLock() throws DeadlockDetectedException
{
acquireWriteLock( null );
}
/**
* Tries to acquire write lock for a given transaction. If
* <CODE>this.writeCount</CODE> is greater than the currents tx's write
* count or the read count is greater than the currents tx's read count the
* transaction has to wait and the {@link RagManager#checkWaitOn} method is
* invoked for deadlock detection.
* <p>
* If the lock can be acquires the lock count is updated on <CODE>this</CODE>
* and the transaction lock element (tle).
*
* @throws DeadlockDetectedException
* if a deadlock is detected
*/
synchronized void acquireWriteLock( Transaction tx ) throws DeadlockDetectedException
{
TxLockElement tle = getOrCreateLockElement( tx );
try
{
tle.movedOn = false;
while ( totalWriteCount > tle.writeCount || totalReadCount > tle.readCount )
{
deadlockGuardedWait( tx, tle, WRITE );
}
registerWriteLockAcquired( tx, tle );
}
finally
{
// if deadlocked, remove marking so lock is removed when empty
tle.movedOn = true;
marked--;
}
}
synchronized boolean tryAcquireWriteLock( Transaction tx )
{
TxLockElement tle = getOrCreateLockElement( tx );
try
{
tle.movedOn = false;
if ( totalWriteCount > tle.writeCount || totalReadCount > tle.readCount )
{
return false;
}
registerWriteLockAcquired( tx, tle );
return true;
}
finally
{
// if deadlocked, remove marking so lock is removed when empty
tle.movedOn = true;
marked--;
}
}
/**
* Releases the write lock held by the provided tx. If it is null then an
* attempt to acquire the current transaction from the transaction manager
* will be made. This is to make safe calling this method as an
* <code>afterCompletion()</code> hook where the transaction context is not
* necessarily available. If write count is zero and there are waiting
* transactions in the queue they will be interrupted if they can acquire
* the lock.
*/
synchronized void releaseWriteLock( Transaction tx ) throws LockNotFoundException
{
TxLockElement tle = getLockElement( tx );
if ( tle.writeCount == 0 )
{
throw new LockNotFoundException( "" + tx + " don't have writeLock" );
}
totalWriteCount--;
tle.writeCount--;
if ( tle.isFree() )
{
if ( !this.isMarked() )
{
txLockElementMap.remove( tx );
}
ragManager.lockReleased( this, tx );
}
// the threads in the waitingList cannot be currentThread
// so we only have to wake other elements if writeCount is down to zero
// (that is: If writeCount > 0 a waiting thread in the queue cannot be
// the thread that holds the write locks because then it would never
// have been put into wait mode)
if ( totalWriteCount == 0 && waitingThreadList.size() > 0 )
{
// wake elements in queue until a write lock is found or queue is
// empty
do
{
WaitElement we = waitingThreadList.removeLast();
if ( !we.element.movedOn )
{
we.waitingThread.interrupt();
if ( we.lockType == LockType.WRITE )
{
break;
}
}
}
while ( waitingThreadList.size() > 0 );
}
}
int getWriteCount()
{
return totalWriteCount;
}
int getReadCount()
{
return totalReadCount;
}
synchronized int getWaitingThreadsCount()
{
return waitingThreadList.size();
}
@Override
public synchronized boolean visit( LineLogger logger )
{
logger.logLine( "Total lock count: readCount=" + totalReadCount
+ " writeCount=" + totalWriteCount + " for " + resource );
logger.logLine( "Waiting list:" );
Iterator<WaitElement> wElements = waitingThreadList.iterator();
while ( wElements.hasNext() )
{
WaitElement we = wElements.next();
logger.logLine( "[" + we.waitingThread + "("
+ we.element.readCount + "r," + we.element.writeCount + "w),"
+ we.lockType + "]" );
if ( wElements.hasNext() )
{
logger.logLine( "," );
}
else
{
logger.logLine( "" );
}
}
logger.logLine( "Locking transactions:" );
Iterator<TxLockElement> lElements = txLockElementMap.values().iterator();
while ( lElements.hasNext() )
{
TxLockElement tle = lElements.next();
logger.logLine( "" + tle.tx + "(" + tle.readCount + "r,"
+ tle.writeCount + "w)" );
}
return true;
}
synchronized LockInfo info()
{
Set<LockingTransaction> lockingTxs = new HashSet<>();
Set<WaitingThread> waitingTxs = new HashSet<>();
for ( TxLockElement tle : txLockElementMap.values() )
{
lockingTxs.add( new LockingTransaction( tle.tx.toString(), tle.readCount, tle.writeCount ) );
}
for ( WaitElement thread : waitingThreadList )
{
waitingTxs.add( WaitingThread.create( thread.element.tx.toString(),
thread.element.readCount, thread.element.writeCount, thread.waitingThread, thread.since,
thread.lockType == LockType.WRITE ) );
}
ResourceType type;
String id;
if ( resource instanceof Node )
{
type = ResourceType.NODE;
id = Long.toString( ( (Node) resource ).getId() );
}
else if ( resource instanceof Relationship )
{
type = ResourceType.NODE;
id = Long.toString( ( (Relationship) resource ).getId() );
}
else
{
type = ResourceType.OTHER;
id = resource.toString();
}
return new LockInfo( type, id, totalReadCount, totalWriteCount,
new ArrayList<>( lockingTxs ), new ArrayList<>( waitingTxs ) );
}
synchronized boolean acceptVisitorIfWaitedSinceBefore( Visitor<LockInfo, RuntimeException> visitor, long waitStart )
{
for ( WaitElement thread : waitingThreadList )
{
if ( thread.since <= waitStart )
{
return visitor.visit( info() );
}
}
return false;
}
@Override
public String toString()
{
return "RWLock[" + resource + "]";
}
private void registerReadLockAcquired( Transaction tx, TxLockElement tle )
{
registerLockAcquired( tx, tle );
totalReadCount++;
tle.readCount++;
}
private void registerWriteLockAcquired( Transaction tx, TxLockElement tle )
{
registerLockAcquired( tx, tle );
totalWriteCount++;
tle.writeCount++;
}
private void registerLockAcquired( Transaction tx, TxLockElement tle )
{
if ( tle.isFree() )
{
ragManager.lockAcquired( this, tx );
}
}
private TxLockElement getLockElement( Transaction tx )
{
TxLockElement tle = txLockElementMap.get( tx );
if ( tle == null )
{
throw new LockNotFoundException( "No transaction lock element found for " + tx );
}
return tle;
}
private void assertTransaction( Transaction tx )
{
if ( tx == null )
{
throw new IllegalArgumentException();
}
}
private void deadlockGuardedWait( Transaction tx, TxLockElement tle, LockType lockType )
{ // given: we must be in a synchronized block here
ragManager.checkWaitOn( this, tx );
waitingThreadList.addFirst( new WaitElement( tle, lockType, currentThread() ) );
try
{
wait();
}
catch ( InterruptedException e )
{
interrupted();
}
ragManager.stopWaitOn( this, tx );
}
private TxLockElement getOrCreateLockElement( Transaction tx )
{
assertTransaction( tx );
TxLockElement tle = txLockElementMap.get( tx );
if ( tle == null )
{
txLockElementMap.put( tx, tle = new TxLockElement( tx ) );
}
return tle;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_RWLock.java
|
219
|
public static class ResourceObject
{
private final String name;
ResourceObject( String name )
{
this.name = name;
}
@Override
public String toString()
{
return this.name;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
220
|
{
@Override
public LockElement acquireReadLock( Object resource )
{
try
{
Transaction tx = getTransaction();
lockManager.getReadLock( resource, tx );
return new LockElement( resource, tx, LockType.READ, lockManager );
}
catch ( Exception e )
{
throw launderedException( e );
}
}
@Override
public LockElement acquireWriteLock( Object resource )
{
try
{
Transaction tx = getTransaction();
lockManager.getWriteLock( resource, tx );
return new LockElement( resource, tx, LockType.WRITE, lockManager );
}
catch ( SystemException e )
{
throw launderedException( e );
}
}
@Override
public TxIdGenerator getTxIdGenerator()
{
return txIdGenerator;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_PlaceboTm.java
|
221
|
public class NodeLabelsFieldTest
{
@Test
public void shouldInlineOneLabel() throws Exception
{
// GIVEN
long labelId = 10;
NodeRecord node = nodeRecordWithInlinedLabels();
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
nodeLabels.add( labelId, null );
// THEN
assertEquals( inlinedLabelsLongRepresentation( labelId ), node.getLabelField() );
}
@Test
public void shouldInlineOneLabelWithHighId() throws Exception
{
// GIVEN
long labelId = 10000;
NodeRecord node = nodeRecordWithInlinedLabels();
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
nodeLabels.add( labelId, null );
// THEN
assertEquals( inlinedLabelsLongRepresentation( labelId ), node.getLabelField() );
}
@Test
public void shouldInlineTwoSmallLabels() throws Exception
{
// GIVEN
long labelId1 = 10, labelId2 = 30;
NodeRecord node = nodeRecordWithInlinedLabels( labelId1 );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
nodeLabels.add( labelId2, null );
// THEN
assertEquals( inlinedLabelsLongRepresentation( labelId1, labelId2 ), node.getLabelField() );
}
@Test
public void shouldInlineThreeSmallLabels() throws Exception
{
// GIVEN
long labelId1 = 10, labelId2 = 30, labelId3 = 4095;
NodeRecord node = nodeRecordWithInlinedLabels( labelId1, labelId2 );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
nodeLabels.add( labelId3, null );
// THEN
assertEquals( inlinedLabelsLongRepresentation( labelId1, labelId2, labelId3 ), node.getLabelField() );
}
@Test
public void shouldInlineFourSmallLabels() throws Exception
{
// GIVEN
long labelId1 = 10, labelId2 = 30, labelId3 = 45, labelId4 = 60;
NodeRecord node = nodeRecordWithInlinedLabels( labelId1, labelId2, labelId3 );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
nodeLabels.add( labelId4, null );
// THEN
assertEquals( inlinedLabelsLongRepresentation( labelId1, labelId2, labelId3, labelId4 ), node.getLabelField() );
}
@Test
public void shouldInlineFiveSmallLabels() throws Exception
{
// GIVEN
long labelId1 = 10, labelId2 = 30, labelId3 = 45, labelId4 = 60, labelId5 = 61;
NodeRecord node = nodeRecordWithInlinedLabels( labelId1, labelId2, labelId3, labelId4 );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
nodeLabels.add( labelId5, null );
// THEN
assertEquals( inlinedLabelsLongRepresentation( labelId1, labelId2, labelId3, labelId4, labelId5 ),
node.getLabelField() );
}
@Test
public void shouldSpillOverToDynamicRecordIfExceedsInlinedSpace() throws Exception
{
// GIVEN -- the upper limit for a label ID for 3 labels would be 36b/3 - 1 = 12b - 1 = 4095
long labelId1 = 10, labelId2 = 30, labelId3 = 4096;
NodeRecord node = nodeRecordWithInlinedLabels( labelId1, labelId2 );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
Collection<DynamicRecord> changedDynamicRecords = nodeLabels.add( labelId3, nodeStore );
// THEN
assertEquals( 1, count( changedDynamicRecords ) );
assertEquals( dynamicLabelsLongRepresentation( changedDynamicRecords ), node.getLabelField() );
assertTrue( Arrays.equals( new long[] {labelId1, labelId2, labelId3},
nodeStore.getDynamicLabelsArray( changedDynamicRecords ) ) );
}
@Test
public void oneDynamicRecordShouldExtendIntoAnAdditionalIfTooManyLabels() throws Exception
{
// GIVEN
// will occupy 60B of data, i.e. one dynamic record
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, oneByteLongs( 56 ) );
Collection<DynamicRecord> initialRecords = node.getDynamicLabelRecords();
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
Set<DynamicRecord> changedDynamicRecords = asSet( nodeLabels.add( 1, nodeStore ) );
// THEN
assertTrue( changedDynamicRecords.containsAll( initialRecords ) );
assertEquals( initialRecords.size()+1, changedDynamicRecords.size() );
}
@Test
public void oneDynamicRecordShouldStoreItsOwner() throws Exception
{
// GIVEN
// will occupy 60B of data, i.e. one dynamic record
Long nodeId = 24l;
NodeRecord node = nodeRecordWithDynamicLabels( nodeId, nodeStore, oneByteLongs(56) );
Collection<DynamicRecord> initialRecords = node.getDynamicLabelRecords();
// WHEN
Pair<Long,long[]> pair = nodeStore.getDynamicLabelsArrayAndOwner( initialRecords );
// THEN
assertEquals( nodeId, pair.first() );
}
@Test
public void twoDynamicRecordsShouldShrinkToOneWhenRemoving() throws Exception
{
// GIVEN
// will occupy 61B of data, i.e. just two dynamic records
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, oneByteLongs( 57 ) );
Collection<DynamicRecord> initialRecords = node.getDynamicLabelRecords();
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
List<DynamicRecord> changedDynamicRecords = addToCollection(
nodeLabels.remove( 255 /*Initial labels go from 255 and down to 255-58*/, nodeStore ),
new ArrayList<DynamicRecord>() );
// THEN
assertEquals( initialRecords, changedDynamicRecords );
assertTrue( changedDynamicRecords.get( 0 ).inUse() );
assertFalse( changedDynamicRecords.get( 1 ).inUse() );
}
@Test
public void twoDynamicRecordsShouldShrinkToOneWhenRemovingWithoutChangingItsOwner() throws Exception
{
// GIVEN
// will occupy 61B of data, i.e. just two dynamic records
Long nodeId = 42l;
NodeRecord node = nodeRecordWithDynamicLabels( nodeId, nodeStore, oneByteLongs( 57 ) );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
List<DynamicRecord> changedDynamicRecords = addToCollection(
nodeLabels.remove( 255 /*Initial labels go from 255 and down to 255-58*/, nodeStore ),
new ArrayList<DynamicRecord>() );
// WHEN
Pair<Long,long[]> changedPair = nodeStore.getDynamicLabelsArrayAndOwner( changedDynamicRecords );
// THEN
assertEquals( nodeId, changedPair.first() );
}
@Test
public void oneDynamicRecordShouldShrinkIntoInlinedWhenRemoving() throws Exception
{
// GIVEN
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, oneByteLongs( 5 ) );
Collection<DynamicRecord> initialRecords = node.getDynamicLabelRecords();
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
Collection<DynamicRecord> changedDynamicRecords = asCollection( nodeLabels.remove( 255, nodeStore ) );
// THEN
assertEquals( initialRecords, changedDynamicRecords );
assertFalse( single( changedDynamicRecords ).inUse() );
assertEquals( inlinedLabelsLongRepresentation( 251, 252, 253, 254 ), node.getLabelField() );
}
@Test
public void shouldReadIdOfDynamicRecordFromDynamicLabelsField() throws Exception
{
// GIVEN
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, oneByteLongs( 5 ) );
DynamicRecord dynamicRecord = node.getDynamicLabelRecords().iterator().next();
// WHEN
Long dynRecordId = NodeLabelsField.fieldDynamicLabelRecordId( node.getLabelField() );
// THEN
assertEquals( (Long) dynamicRecord.getLongId(), dynRecordId );
}
@Test
public void shouldReadNullDynamicRecordFromInlineLabelsField() throws Exception
{
// GIVEN
NodeRecord node = nodeRecordWithInlinedLabels( 23l );
// WHEN
Long dynRecordId = NodeLabelsField.fieldDynamicLabelRecordId( node.getLabelField() );
// THEN
assertNull( dynRecordId );
}
@Test
public void maximumOfSevenInlinedLabels() throws Exception
{
// GIVEN
long[] labels = new long[] {0, 1, 2, 3, 4, 5, 6};
NodeRecord node = nodeRecordWithInlinedLabels( labels );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
Iterable<DynamicRecord> changedDynamicRecords = nodeLabels.add( 23, nodeStore );
// THEN
assertEquals( dynamicLabelsLongRepresentation( changedDynamicRecords ), node.getLabelField() );
assertEquals( 1, count( changedDynamicRecords ) );
}
@Test
public void addingAnAlreadyAddedLabelWhenLabelsAreInlinedShouldFail() throws Exception
{
// GIVEN
int labelId = 1;
NodeRecord node = nodeRecordWithInlinedLabels( labelId );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
try
{
nodeLabels.add( labelId, nodeStore );
fail( "Should have thrown exception" );
}
catch ( IllegalStateException e )
{
// THEN
}
}
@Test
public void addingAnAlreadyAddedLabelWhenLabelsAreInDynamicRecordsShouldFail() throws Exception
{
// GIVEN
long[] labels = oneByteLongs( 20 );
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, labels );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
try
{
nodeLabels.add( safeCastLongToInt( labels[0] ), nodeStore );
fail( "Should have thrown exception" );
}
catch ( IllegalStateException e )
{
// THEN
}
}
@Test
public void removingNonExistentInlinedLabelShouldFail() throws Exception
{
// GIVEN
int labelId1 = 1, labelId2 = 2;
NodeRecord node = nodeRecordWithInlinedLabels( labelId1 );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
try
{
nodeLabels.remove( labelId2, nodeStore );
fail( "Should have thrown exception" );
}
catch ( IllegalStateException e )
{
// THEN
}
}
@Test
public void removingNonExistentLabelInDynamicRecordsShouldFail() throws Exception
{
// GIVEN
long[] labels = oneByteLongs( 20 );
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, labels );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
try
{
nodeLabels.remove( 123456, nodeStore );
fail( "Should have thrown exception" );
}
catch ( IllegalStateException e )
{
// THEN
}
}
@Test
public void shouldReallocateSomeOfPreviousDynamicRecords() throws Exception
{
// GIVEN
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, oneByteLongs( 5 ) );
Set<DynamicRecord> initialRecords = asUniqueSet( node.getDynamicLabelRecords() );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
Set<DynamicRecord> reallocatedRecords = asUniqueSet( nodeLabels.put( fourByteLongs( 100 ), nodeStore ) );
// THEN
assertTrue( reallocatedRecords.containsAll( initialRecords ) );
assertTrue( reallocatedRecords.size() > initialRecords.size() );
}
@Test
public void shouldReallocateAllOfPreviousDynamicRecordsAndThenSome() throws Exception
{
// GIVEN
NodeRecord node = nodeRecordWithDynamicLabels( nodeStore, fourByteLongs( 100 ) );
Set<DynamicRecord> initialRecords = asSet( cloned( node.getDynamicLabelRecords(), DynamicRecord.class ) );
NodeLabels nodeLabels = NodeLabelsField.parseLabelsField( node );
// WHEN
Set<DynamicRecord> reallocatedRecords = asUniqueSet( nodeLabels.put( fourByteLongs( 5 ), nodeStore ) );
// THEN
assertTrue( "initial:" + initialRecords + ", reallocated:" + reallocatedRecords ,
initialRecords.containsAll( used( reallocatedRecords ) ) );
assertTrue( used( reallocatedRecords ).size() < initialRecords.size() );
}
private long dynamicLabelsLongRepresentation( Iterable<DynamicRecord> records )
{
return 0x8000000000L|first( records ).getId();
}
private long inlinedLabelsLongRepresentation( long... labelIds )
{
long header = (long)labelIds.length << 36;
byte bitsPerLabel = (byte) (36/labelIds.length);
Bits bits = bits( 5 );
for ( long labelId : labelIds )
{
bits.put( labelId, bitsPerLabel );
}
return header|bits.getLongs()[0];
}
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private NodeStore nodeStore;
@Before
public void startUp()
{
StoreFactory storeFactory = new StoreFactory( new Config(), new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL,
new DefaultTxHook() );
File storeFile = new File( "store" );
storeFactory.createNodeStore( storeFile );
nodeStore = storeFactory.newNodeStore( storeFile );
}
@After
public void cleanUp()
{
if ( nodeStore != null )
{
nodeStore.close();
}
}
private NodeRecord nodeRecordWithInlinedLabels( long... labels )
{
NodeRecord node = new NodeRecord( 0, 0, 0 );
if ( labels.length > 0 )
{
node.setLabelField( inlinedLabelsLongRepresentation( labels ), Collections.<DynamicRecord>emptyList() );
}
return node;
}
private NodeRecord nodeRecordWithDynamicLabels( NodeStore nodeStore, long... labels )
{
return nodeRecordWithDynamicLabels( 0, nodeStore, labels );
}
private NodeRecord nodeRecordWithDynamicLabels( long nodeId, NodeStore nodeStore, long... labels )
{
NodeRecord node = new NodeRecord( nodeId, 0, 0 );
Collection<DynamicRecord> initialRecords = allocateAndApply( nodeStore, node.getId(), labels );
node.setLabelField( dynamicLabelsLongRepresentation( initialRecords ), initialRecords );
return node;
}
private Collection<DynamicRecord> allocateAndApply( NodeStore nodeStore, long nodeId, long[] longs )
{
Collection<DynamicRecord> records = nodeStore.allocateRecordsForDynamicLabels( nodeId, longs );
nodeStore.updateDynamicLabelRecords( records );
return records;
}
private long[] oneByteLongs( int numberOfLongs )
{
long[] result = new long[numberOfLongs];
for ( int i = 0; i < numberOfLongs; i++ )
{
result[i] = 255-i;
}
Arrays.sort( result );
return result;
}
private long[] fourByteLongs( int numberOfLongs )
{
long[] result = new long[numberOfLongs];
for ( int i = 0; i < numberOfLongs; i++ )
{
result[i] = Integer.MAX_VALUE-i;
}
Arrays.sort( result );
return result;
}
private Set<DynamicRecord> used( Set<DynamicRecord> reallocatedRecords )
{
Set<DynamicRecord> used = new HashSet<>();
for ( DynamicRecord record : reallocatedRecords )
{
if ( record.inUse() )
{
used.add( record );
}
}
return used;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NodeLabelsFieldTest.java
|
222
|
{
@Override
public boolean matchesSafely( T item )
{
return comparison.compare( record, item );
}
@Override
public void describeTo( Description description )
{
description.appendValue( record );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_TransactionWriterTest.java
|
223
|
public class TransactionWriterTest
{
@Test
public void shouldWriteTransaction() throws Exception
{
// given
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
TransactionWriter writer = new TransactionWriter( buffer, 1, -1 );
NodeRecord node = new NodeRecord( 0, -1, -1 );
node.setLabelField( 0, Collections.<DynamicRecord>emptyList() );
RelationshipRecord relationship = new RelationshipRecord( 0, 1, 1, 6 );
// when
writer.start( 1, 1, 0 );
writer.create( node );
writer.update( relationship );
writer.delete( propertyRecordWithOneIntProperty( 3, 10, 45 ), new PropertyRecord( 3 ) );
writer.prepare();
writer.commit( false, 17 );
writer.done();
// then
TransactionReader.Visitor visitor = visited( buffer );
InOrder order = inOrder( visitor );
order.verify( visitor ).visitStart( eq( 1 ), any( byte[].class ), eq( 1 ), eq( 1 ), anyLong() );
order.verify( visitor ).visitUpdateNode( eq( 1 ), argThat( matchesRecord( node ) ) );
order.verify( visitor ).visitUpdateRelationship( eq( 1 ), argThat( matchesRecord( relationship ) ) );
order.verify( visitor ).visitDeleteProperty( eq( 1 ), eq( 3l ) );
order.verify( visitor ).visitPrepare( eq( 1 ), anyLong() );
order.verify( visitor ).visitCommit( eq( 1 ), eq( false ), eq( 17l ), anyLong() );
order.verify( visitor ).visitDone( eq( 1 ) );
verifyNoMoreInteractions( visitor );
}
private PropertyRecord propertyRecordWithOneIntProperty( long id, int keyId, int value )
{
PropertyRecord record = new PropertyRecord( id );
record.setInUse( true );
PropertyBlock block = new PropertyBlock();
// Logic copied from PropertyStore#encodeValue
block.setSingleBlock( keyId | (((long) PropertyType.INT.intValue()) << 24)
| (value << 28) );
record.addPropertyBlock( block );
return record;
}
private static TransactionReader.Visitor visited( ReadableByteChannel source ) throws IOException
{
TransactionReader.Visitor visitor = mock( TransactionReader.Visitor.class );
new TransactionReader().read( source, visitor );
return visitor;
}
private final Map<Class<?>, Comparison> comparisons = new HashMap<Class<?>, Comparison>();
@SuppressWarnings("unchecked")
private <T extends AbstractBaseRecord> Matcher<T> matchesRecord( final T record )
{
final Comparison comparison = comparison( record.getClass() );
return new TypeSafeMatcher<T>( (Class) record.getClass() )
{
@Override
public boolean matchesSafely( T item )
{
return comparison.compare( record, item );
}
@Override
public void describeTo( Description description )
{
description.appendValue( record );
}
};
}
private Comparison comparison( Class<?> type )
{
Comparison comparison = comparisons.get( type );
if ( comparison == null )
{
comparisons.put( type, comparison = new Comparison( type ) );
}
return comparison;
}
private static class Comparison
{
private final Collection<Field> fields = new ArrayList<Field>();
Comparison( Class<?> type )
{
for ( Field field : type.getDeclaredFields() )
{
if ( field.getDeclaringClass() == type )
{
field.setAccessible( true );
fields.add( field );
}
}
}
boolean compare( Object expected, Object actual )
{
try
{
for ( Field field : fields )
{
if ( !equal( field.get( expected ), field.get( actual ) ) )
{
return false;
}
}
return true;
}
catch ( Exception failure )
{
return false;
}
}
private static boolean equal( Object a, Object b )
{
return a == b || (a != null && (a.equals( b ) || (b != null && deepEquals( a, b ))));
}
private static boolean deepEquals( Object a, Object b )
{
if (a.getClass() == b.getClass() && a.getClass().isArray())
{
if ( a instanceof Object[] )
{
return Arrays.deepEquals( (Object[]) a, (Object[]) b );
}
if ( a instanceof byte[] )
{
return Arrays.equals( (byte[]) a, (byte[]) b );
}
if ( a instanceof char[] )
{
return Arrays.equals( (char[]) a, (char[])b );
}
}
return false;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_TransactionWriterTest.java
|
224
|
public class TransactionWriter
{
private final LogBuffer buffer;
private final int identifier;
private final int localId;
public TransactionWriter( LogBuffer buffer, int identifier, int localId )
{
this.buffer = buffer;
this.identifier = identifier;
this.localId = localId;
}
// Transaction coordination
public void start( int masterId, int myId, long latestCommittedTxWhenTxStarted ) throws IOException
{
start( getNewGlobalId( DEFAULT_SEED, localId ), masterId, myId, currentTimeMillis(), latestCommittedTxWhenTxStarted );
}
public void start( byte[] globalId, int masterId, int myId, long startTimestamp,
long latestCommittedTxWhenTxStarted ) throws IOException
{
Xid xid = new XidImpl( globalId, NeoStoreXaDataSource.BRANCH_ID );
LogIoUtils.writeStart( buffer, this.identifier, xid, masterId, myId, startTimestamp, latestCommittedTxWhenTxStarted );
}
public void prepare() throws IOException
{
prepare( System.currentTimeMillis() );
}
public void prepare( long prepareTimestamp ) throws IOException
{
LogIoUtils.writePrepare( buffer, identifier, prepareTimestamp );
}
public void commit( boolean twoPhase, long txId ) throws IOException
{
commit( twoPhase, txId, System.currentTimeMillis() );
}
public void commit( boolean twoPhase, long txId, long commitTimestamp ) throws IOException
{
LogIoUtils.writeCommit( twoPhase, buffer, identifier, txId, commitTimestamp );
}
public void done() throws IOException
{
LogIoUtils.writeDone( buffer, identifier );
}
// Transaction data
public void propertyKey( int id, String key, int... dynamicIds ) throws IOException
{
write( new Command.PropertyKeyTokenCommand( null, withName( new PropertyKeyTokenRecord( id ), dynamicIds, key ) ) );
}
public void label( int id, String name, int... dynamicIds ) throws IOException
{
write( new Command.LabelTokenCommand( null, withName( new LabelTokenRecord( id ), dynamicIds, name ) ) );
}
public void relationshipType( int id, String label, int... dynamicIds ) throws IOException
{
write( new Command.RelationshipTypeTokenCommand( null,
withName( new RelationshipTypeTokenRecord( id ), dynamicIds, label ) ) );
}
public void update( NeoStoreRecord record ) throws IOException
{
write( new Command.NeoStoreCommand( null, record ) );
}
public void create( NodeRecord node ) throws IOException
{
node.setCreated();
update( new NodeRecord( node.getId(), NO_PREV_RELATIONSHIP.intValue(), NO_NEXT_PROPERTY.intValue() ), node );
}
public void update( NodeRecord before, NodeRecord node ) throws IOException
{
node.setInUse( true );
add( before, node );
}
public void delete( NodeRecord node ) throws IOException
{
node.setInUse( false );
add( node, new NodeRecord( node.getId(), NO_PREV_RELATIONSHIP.intValue(), NO_NEXT_PROPERTY.intValue() ) );
}
public void create( RelationshipRecord relationship ) throws IOException
{
relationship.setCreated();
update( relationship );
}
public void createSchema( Collection<DynamicRecord> beforeRecord, Collection<DynamicRecord> afterRecord ) throws IOException
{
for ( DynamicRecord record : afterRecord )
{
record.setCreated();
}
updateSchema( beforeRecord, afterRecord );
}
public void updateSchema(Collection<DynamicRecord> beforeRecords, Collection<DynamicRecord> afterRecords) throws IOException
{
for ( DynamicRecord record : afterRecords )
{
record.setInUse( true );
}
addSchema( beforeRecords, afterRecords );
}
public void update( RelationshipRecord relationship ) throws IOException
{
relationship.setInUse( true );
add( relationship );
}
public void delete( RelationshipRecord relationship ) throws IOException
{
relationship.setInUse( false );
add( relationship );
}
public void create( PropertyRecord property ) throws IOException
{
property.setCreated();
PropertyRecord before = new PropertyRecord( property.getLongId() );
if ( property.isNodeSet() )
before.setNodeId( property.getNodeId() );
if ( property.isRelSet() )
before.setRelId( property.getRelId() );
update( before, property );
}
public void update( PropertyRecord before, PropertyRecord after ) throws IOException
{
after.setInUse(true);
add( before, after );
}
public void delete( PropertyRecord before, PropertyRecord after ) throws IOException
{
after.setInUse(false);
add( before, after );
}
// Internals
private void addSchema( Collection<DynamicRecord> beforeRecords, Collection<DynamicRecord> afterRecords ) throws IOException
{
write( new Command.SchemaRuleCommand( null, null, null, beforeRecords, afterRecords, null, Long.MAX_VALUE ) );
}
public void add( NodeRecord before, NodeRecord after ) throws IOException
{
write( new Command.NodeCommand( null, before, after ) );
}
public void add( RelationshipRecord relationship ) throws IOException
{
write( new Command.RelationshipCommand( null, relationship ) );
}
public void add( PropertyRecord before, PropertyRecord property ) throws IOException
{
write( new Command.PropertyCommand( null, before, property ) );
}
public void add( RelationshipTypeTokenRecord record ) throws IOException
{
write( new Command.RelationshipTypeTokenCommand( null, record ) );
}
public void add( PropertyKeyTokenRecord record ) throws IOException
{
write( new Command.PropertyKeyTokenCommand( null, record ) );
}
public void add( NeoStoreRecord record ) throws IOException
{
write( new Command.NeoStoreCommand( null, record ) );
}
private void write( Command command ) throws IOException
{
LogIoUtils.writeCommand( buffer, identifier, command );
}
private static <T extends TokenRecord> T withName( T record, int[] dynamicIds, String name )
{
if ( dynamicIds == null || dynamicIds.length == 0 )
{
throw new IllegalArgumentException( "No dynamic records for storing the name." );
}
record.setInUse( true );
byte[] data = PropertyStore.encodeString( name );
if ( data.length > dynamicIds.length * NAME_STORE_BLOCK_SIZE )
{
throw new IllegalArgumentException(
String.format( "[%s] is too long to fit in %d blocks", name, dynamicIds.length ) );
}
else if ( data.length <= (dynamicIds.length - 1) * NAME_STORE_BLOCK_SIZE )
{
throw new IllegalArgumentException(
String.format( "[%s] is to short to fill %d blocks", name, dynamicIds.length ) );
}
for ( int i = 0; i < dynamicIds.length; i++ )
{
byte[] part = new byte[Math.min( NAME_STORE_BLOCK_SIZE, data.length - i * NAME_STORE_BLOCK_SIZE )];
System.arraycopy( data, i * NAME_STORE_BLOCK_SIZE, part, 0, part.length );
DynamicRecord dynamicRecord = new DynamicRecord( dynamicIds[i] );
dynamicRecord.setInUse( true );
dynamicRecord.setData( part );
dynamicRecord.setCreated();
record.addNameRecord( dynamicRecord );
}
record.setNameId( dynamicIds[0] );
return record;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_TransactionWriter.java
|
225
|
public class TestCommandMode
{
@Test
public void shouldInferCorrectModes() throws Exception
{
assertThat( fromRecordState( /* create */true, /* inUse */true ), equalTo(Command.Mode.CREATE));
assertThat( fromRecordState( /* create */false, /* inUse */true ), equalTo(Command.Mode.UPDATE));
assertThat( fromRecordState( /* create */false, /* inUse */false ), equalTo(Command.Mode.DELETE));
assertThat( fromRecordState( /* create */true, /* inUse */false ), equalTo(Command.Mode.DELETE));
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_TestCommandMode.java
|
226
|
public final class SimpleNeoStoreProvider implements NeoStoreProvider
{
private NeoStore neoStore;
public SimpleNeoStoreProvider( NeoStore neoStore )
{
this.neoStore = neoStore;
}
@Override
public NeoStore evaluate()
{
return neoStore;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_SimpleNeoStoreProvider.java
|
227
|
public class SchemaRuleCommandTest
{
@Test
public void shouldWriteCreatedSchemaRuleToStore() throws Exception
{
// GIVEN
Collection<DynamicRecord> beforeRecords = serialize( rule, id, false, false);
Collection<DynamicRecord> afterRecords = serialize( rule, id, true, true);
SchemaRuleCommand command = new SchemaRuleCommand( neoStore, store, indexes,
beforeRecords, afterRecords, rule, txId );
// WHEN
command.execute();
// THEN
verify( store ).updateRecord( first( afterRecords ) );
verify( indexes ).createIndex( rule );
}
@Test
public void shouldSetLatestConstraintRule() throws Exception
{
// Given
Collection<DynamicRecord> beforeRecords = serialize( rule, id, true, true);
Collection<DynamicRecord> afterRecords = serialize( rule, id, true, false);
SchemaRuleCommand command = new SchemaRuleCommand( neoStore, store, indexes, beforeRecords, afterRecords,
uniquenessConstraintRule( id, labelId, propertyKey, 0 ), txId );
// WHEN
command.execute();
// THEN
verify( store ).updateRecord( first( afterRecords ) );
verify( neoStore ).setLatestConstraintIntroducingTx( txId );
}
@Test
public void shouldDropSchemaRuleFromStore() throws Exception
{
// GIVEN
Collection<DynamicRecord> beforeRecords = serialize( rule, id, true, true);
Collection<DynamicRecord> afterRecords = serialize( rule, id, false, false);
SchemaRuleCommand command = new SchemaRuleCommand( neoStore, store, indexes, beforeRecords, afterRecords, rule, txId );
// WHEN
command.execute();
// THEN
verify( store ).updateRecord( first( afterRecords ) );
verify( indexes ).dropIndex( rule );
}
@Test
public void shouldWriteSchemaRuleToLog() throws Exception
{
// GIVEN
Collection<DynamicRecord> beforeRecords = serialize( rule, id, false, false);
Collection<DynamicRecord> afterRecords = serialize( rule, id, true, true);
SchemaRuleCommand command = new SchemaRuleCommand( neoStore, store, indexes, beforeRecords, afterRecords, rule, txId );
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
when( neoStore.getSchemaStore() ).thenReturn( store );
// WHEN
command.writeToFile( buffer );
Command readCommand = readCommand( neoStore, indexes, buffer, allocate( 1000 ) );
// THEN
assertThat( readCommand, instanceOf( SchemaRuleCommand.class ) );
SchemaRuleCommand readSchemaCommand = (SchemaRuleCommand)readCommand;
assertThat(readSchemaCommand.getTxId(), equalTo(txId));
}
@Test
public void shouldRecreateSchemaRuleWhenDeleteCommandReadFromDisk() throws Exception
{
// GIVEN
Collection<DynamicRecord> beforeRecords = serialize( rule, id, true, true);
Collection<DynamicRecord> afterRecords = serialize( rule, id, false, false);
SchemaRuleCommand command = new SchemaRuleCommand( neoStore, store, indexes, beforeRecords, afterRecords, rule, txId );
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
when( neoStore.getSchemaStore() ).thenReturn( store );
// WHEN
command.writeToFile( buffer );
Command readCommand = readCommand( neoStore, indexes, buffer, allocate( 1000 ) );
// THEN
assertThat( readCommand, instanceOf( SchemaRuleCommand.class ) );
SchemaRuleCommand readSchemaCommand = (SchemaRuleCommand)readCommand;
assertThat(readSchemaCommand.getTxId(), equalTo(txId));
assertThat(readSchemaCommand.getSchemaRule(), equalTo((SchemaRule)rule));
}
private final NeoStore neoStore = mock( NeoStore.class );
private final SchemaStore store = mock( SchemaStore.class );
private final IndexingService indexes = mock( IndexingService.class );
private final int labelId = 2;
private final int propertyKey = 8;
private final long id = 0;
private final long txId = 1337l;
private final IndexRule rule = IndexRule.indexRule( id, labelId, propertyKey, PROVIDER_DESCRIPTOR );
private Collection<DynamicRecord> serialize( SchemaRule rule, long id, boolean inUse, boolean created )
{
RecordSerializer serializer = new RecordSerializer();
serializer = serializer.append( rule );
DynamicRecord record = new DynamicRecord( id );
record.setData( serializer.serialize() );
if ( created )
{
record.setCreated();
}
if ( inUse )
{
record.setInUse( true );
}
return Arrays.asList( record );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_SchemaRuleCommandTest.java
|
228
|
public static class RecordChange<KEY,RECORD,ADDITIONAL>
{
private final Map<KEY, RecordChange<KEY, RECORD, ADDITIONAL>> allChanges;
private final ADDITIONAL additionalData;
private RECORD before;
private final RECORD record;
private final Loader<KEY,RECORD,ADDITIONAL> loader;
private boolean changed;
private final boolean created;
private final KEY key;
private final boolean manageBeforeState;
public RecordChange(Map<KEY, RecordChange<KEY, RECORD, ADDITIONAL>> allChanges, KEY key, RECORD record,
Loader<KEY, RECORD, ADDITIONAL> loader, boolean manageBeforeState, boolean created, ADDITIONAL additionalData)
{
this.allChanges = allChanges;
this.key = key;
this.record = record;
this.loader = loader;
this.manageBeforeState = manageBeforeState;
this.created = created;
this.additionalData = additionalData;
}
KEY getKey()
{
return key;
}
RECORD forChangingLinkage()
{
return prepareForChange();
}
RECORD forChangingData()
{
ensureHeavy();
return prepareForChange();
}
private RECORD prepareForChange()
{
ensureHasBeforeRecordImage();
if ( !this.changed )
{
this.allChanges.put( key, this );
this.changed = true;
}
return this.record;
}
private void ensureHeavy()
{
if ( !created )
{
loader.ensureHeavy( record );
if ( before != null )
loader.ensureHeavy( before );
}
}
RECORD forReadingLinkage()
{
return this.record;
}
RECORD forReadingData()
{
ensureHeavy();
return this.record;
}
public boolean isChanged()
{
return this.changed;
}
public RECORD getBefore()
{
ensureHasBeforeRecordImage();
if ( !manageBeforeState )
throw new UnsupportedOperationException( "This RecordChanges instance doesn't manage before-state" );
return before;
}
@SuppressWarnings( "unchecked" )
private void ensureHasBeforeRecordImage()
{
if ( manageBeforeState && this.before == null )
{
this.before = loader.clone( record );
}
}
public boolean isCreated()
{
return created;
}
public ADDITIONAL getAdditionalData() {
return additionalData;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_RecordChanges.java
|
229
|
{
@Override
public boolean accept( RecordChange<KEY, RECORD, ADDITIONAL> item )
{
return item.isChanged();
}
}, recordChanges.values() );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_RecordChanges.java
|
230
|
public class RecordChanges<KEY,RECORD,ADDITIONAL>
{
private final Map<KEY, RecordChange<KEY,RECORD,ADDITIONAL>> recordChanges = new HashMap<>();
private final Loader<KEY,RECORD,ADDITIONAL> loader;
private final boolean manageBeforeState;
public RecordChanges( Loader<KEY,RECORD,ADDITIONAL> loader, boolean manageBeforeState )
{
this.loader = loader;
this.manageBeforeState = manageBeforeState;
}
public RecordChange<KEY,RECORD,ADDITIONAL> getIfLoaded( KEY key )
{
return recordChanges.get( key );
}
public RecordChange<KEY,RECORD,ADDITIONAL> getOrLoad( KEY key, ADDITIONAL additionalData )
{
RecordChange<KEY,RECORD,ADDITIONAL> result = recordChanges.get( key );
if ( result == null )
{
result = new RecordChange<>( recordChanges, key,
loader.load( key, additionalData ), loader, manageBeforeState, false, additionalData );
}
return result;
}
public int changeSize()
{
// TODO optimize?
return count( changes() );
}
public void clear()
{
recordChanges.clear();
}
public RecordChange<KEY,RECORD,ADDITIONAL> create( KEY key, ADDITIONAL additionalData )
{
if ( recordChanges.containsKey( key ) )
throw new IllegalStateException( key + " already exists" );
RECORD record = loader.newUnused( key, additionalData );
RecordChange<KEY, RECORD, ADDITIONAL> change = new RecordChange<>(
recordChanges, key, record, loader, manageBeforeState, true, additionalData);
recordChanges.put( key, change );
return change;
}
public Iterable<RecordChange<KEY,RECORD,ADDITIONAL>> changes()
{
return Iterables.filter( new Predicate<RecordChange<KEY,RECORD,ADDITIONAL>>()
{
@Override
public boolean accept( RecordChange<KEY, RECORD, ADDITIONAL> item )
{
return item.isChanged();
}
}, recordChanges.values() );
}
public static class RecordChange<KEY,RECORD,ADDITIONAL>
{
private final Map<KEY, RecordChange<KEY, RECORD, ADDITIONAL>> allChanges;
private final ADDITIONAL additionalData;
private RECORD before;
private final RECORD record;
private final Loader<KEY,RECORD,ADDITIONAL> loader;
private boolean changed;
private final boolean created;
private final KEY key;
private final boolean manageBeforeState;
public RecordChange(Map<KEY, RecordChange<KEY, RECORD, ADDITIONAL>> allChanges, KEY key, RECORD record,
Loader<KEY, RECORD, ADDITIONAL> loader, boolean manageBeforeState, boolean created, ADDITIONAL additionalData)
{
this.allChanges = allChanges;
this.key = key;
this.record = record;
this.loader = loader;
this.manageBeforeState = manageBeforeState;
this.created = created;
this.additionalData = additionalData;
}
KEY getKey()
{
return key;
}
RECORD forChangingLinkage()
{
return prepareForChange();
}
RECORD forChangingData()
{
ensureHeavy();
return prepareForChange();
}
private RECORD prepareForChange()
{
ensureHasBeforeRecordImage();
if ( !this.changed )
{
this.allChanges.put( key, this );
this.changed = true;
}
return this.record;
}
private void ensureHeavy()
{
if ( !created )
{
loader.ensureHeavy( record );
if ( before != null )
loader.ensureHeavy( before );
}
}
RECORD forReadingLinkage()
{
return this.record;
}
RECORD forReadingData()
{
ensureHeavy();
return this.record;
}
public boolean isChanged()
{
return this.changed;
}
public RECORD getBefore()
{
ensureHasBeforeRecordImage();
if ( !manageBeforeState )
throw new UnsupportedOperationException( "This RecordChanges instance doesn't manage before-state" );
return before;
}
@SuppressWarnings( "unchecked" )
private void ensureHasBeforeRecordImage()
{
if ( manageBeforeState && this.before == null )
{
this.before = loader.clone( record );
}
}
public boolean isCreated()
{
return created;
}
public ADDITIONAL getAdditionalData() {
return additionalData;
}
}
public interface Loader<KEY,RECORD,ADDITIONAL>
{
RECORD newUnused( KEY key, ADDITIONAL additionalData );
RECORD load( KEY key, ADDITIONAL additionalData );
void ensureHeavy( RECORD record );
RECORD clone( RECORD record );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_RecordChanges.java
|
231
|
public class NodeCommandTest
{
private NodeStore nodeStore;
@Rule
public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
@Test
public void shouldSerializeAndDeserializeUnusedRecords() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
NodeRecord after = new NodeRecord( 12, 2, 1 );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeCreatedRecord() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setCreated();
after.setInUse( true );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeUpdatedRecord() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeInlineLabels() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
NodeLabels nodeLabels = parseLabelsField( after );
nodeLabels.add( 1337, nodeStore );
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeDynamicRecordLabels() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
NodeLabels nodeLabels = parseLabelsField( after );
for ( int i = 10; i < 100; i++ )
{
nodeLabels.add( i, nodeStore );
}
// When
assertSerializationWorksFor( new Command.NodeCommand( null, before, after ) );
}
@Test
public void shouldSerializeDynamicRecordsRemoved() throws Exception
{
// Given
NodeRecord before = new NodeRecord( 12, 1, 2 );
before.setInUse( true );
List<DynamicRecord> beforeDyn = asList( dynamicRecord( 0, true, true, -1l, LONG.intValue(), new byte[]{1,2,3,4,5,6,7,8}));
before.setLabelField( dynamicPointer( beforeDyn ), beforeDyn );
NodeRecord after = new NodeRecord( 12, 2, 1 );
after.setInUse( true );
List<DynamicRecord> dynamicRecords = asList( dynamicRecord( 0, false, true, -1l, LONG.intValue(), new byte[]{1,2,3,4,5,6,7,8}));
after.setLabelField( dynamicPointer( dynamicRecords ), dynamicRecords );
// When
Command.NodeCommand cmd = new Command.NodeCommand( null, before, after );
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
cmd.writeToFile( buffer );
Command.NodeCommand result = (Command.NodeCommand) readCommand( null, null, buffer, allocate( 64 ) );
// Then
assertThat( result, equalTo( cmd ) );
assertThat( result.getMode(), equalTo( cmd.getMode() ) );
assertThat( result.getBefore(), equalTo( cmd.getBefore() ) );
assertThat( result.getAfter(), equalTo( cmd.getAfter() ) );
// And dynamic records should be the same
assertThat( result.getBefore().getDynamicLabelRecords(), equalTo( cmd.getBefore().getDynamicLabelRecords()));
assertThat( result.getAfter().getDynamicLabelRecords(), equalTo( cmd.getAfter().getDynamicLabelRecords() ) );
}
private void assertSerializationWorksFor( Command.NodeCommand cmd ) throws IOException
{
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
cmd.writeToFile( buffer );
Command.NodeCommand result = (Command.NodeCommand) readCommand( null, null, buffer, allocate( 64 ) );
// Then
assertThat( result, equalTo( cmd ) );
assertThat( result.getMode(), equalTo( cmd.getMode() ) );
assertThat( result.getBefore(), equalTo( cmd.getBefore() ) );
assertThat( result.getAfter(), equalTo( cmd.getAfter() ) );
// And labels should be the same
assertThat( labels( result.getBefore() ), equalTo( labels( cmd.getBefore() ) ) );
assertThat( labels( result.getAfter() ), equalTo( labels( cmd.getAfter() ) ) );
// And dynamic records should be the same
assertThat( result.getBefore().getDynamicLabelRecords(), equalTo( result.getBefore().getDynamicLabelRecords()));
assertThat( result.getAfter().getDynamicLabelRecords(), equalTo( result.getAfter().getDynamicLabelRecords() ) );
}
private Set<Integer> labels( NodeRecord record )
{
long[] rawLabels = parseLabelsField( record ).get( nodeStore );
Set<Integer> labels = new HashSet<>( rawLabels.length );
for ( long label : rawLabels )
{
labels.add( safeCastLongToInt( label ) );
}
return labels;
}
@Before
public void before() throws Exception
{
@SuppressWarnings("deprecation")
StoreFactory storeFactory = new StoreFactory( new Config(), new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL, new DefaultTxHook() );
File storeFile = new File( "nodestore" );
storeFactory.createNodeStore( storeFile );
nodeStore = storeFactory.newNodeStore( storeFile );
}
@After
public void after() throws Exception
{
nodeStore.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NodeCommandTest.java
|
232
|
public class WriteTransactionCommandOrderingTest
{
private final AtomicReference<List<String>> currentRecording = new AtomicReference<>();
private final NeoStore store = mock( NeoStore.class );
private final RecordingRelationshipStore relationshipStore = new RecordingRelationshipStore( currentRecording );
private final RecordingNodeStore nodeStore = new RecordingNodeStore( currentRecording );
private final RecordingPropertyStore propertyStore = new RecordingPropertyStore( currentRecording );
public WriteTransactionCommandOrderingTest()
{
when( store.getPropertyStore() ).thenReturn( propertyStore );
when( store.getNodeStore() ).thenReturn( nodeStore );
when( store.getRelationshipStore() ).thenReturn( relationshipStore );
}
@Test
public void shouldExecuteCommandsInTheSameOrderRegardlessOfItBeingRecoveredOrNot() throws Exception
{
// Given
List<String> nonRecoveredRecording = new ArrayList<>();
NeoStoreTransaction nonRecoveredTx = newWriteTransaction();
injectAllPossibleCommands( nonRecoveredTx );
List<String> recoveredRecording = new ArrayList<>();
NeoStoreTransaction recoveredTx = newWriteTransaction();
recoveredTx.setRecovered();
injectAllPossibleCommands( recoveredTx );
// When
currentRecording.set( nonRecoveredRecording );
nonRecoveredTx.doPrepare();
nonRecoveredTx.doCommit();
currentRecording.set( recoveredRecording );
recoveredTx.doPrepare();
recoveredTx.doCommit();
// Then
assertThat(nonRecoveredRecording, equalTo(recoveredRecording)); // ordering is the same in both cases
assertThat(new HashSet<>( recoveredRecording ).size(), is( 9 )); // we have included all possible commands
}
private void injectAllPossibleCommands( NeoStoreTransaction tx )
{
tx.injectCommand( new Command.NodeCommand( nodeStore, inUseNode(), inUseNode() ) ); // update
tx.injectCommand( new Command.NodeCommand( nodeStore, inUseNode(), missingNode() ) ); // delete
tx.injectCommand( new Command.NodeCommand( nodeStore, missingNode(), createdNode() ) ); // create
tx.injectCommand( new Command.PropertyCommand( propertyStore, inUseProperty(), inUseProperty() ) ); // update
tx.injectCommand( new Command.PropertyCommand( propertyStore, inUseProperty(), missingProperty() ) ); // delete
tx.injectCommand( new Command.PropertyCommand( propertyStore, missingProperty(), createdProperty() ) ); // create
tx.injectCommand( new Command.RelationshipCommand( relationshipStore, inUseRelationship() ) ); // update
tx.injectCommand( new Command.RelationshipCommand( relationshipStore, missingRelationship() ) ); // delete
tx.injectCommand( new Command.RelationshipCommand( relationshipStore, createdRelationship() ) ); // create
}
private static RelationshipRecord missingRelationship()
{
return new RelationshipRecord( -1 );
}
private static RelationshipRecord createdRelationship()
{
RelationshipRecord record = new RelationshipRecord( 2 );
record.setInUse( true );
record.setCreated();
return record;
}
private static RelationshipRecord inUseRelationship()
{
RelationshipRecord record = new RelationshipRecord( 1 );
record.setInUse( true );
return record;
}
private static PropertyRecord missingProperty()
{
return new PropertyRecord( -1 );
}
private static PropertyRecord createdProperty()
{
PropertyRecord record = new PropertyRecord( 2 );
record.setInUse( true );
record.setCreated();
return record;
}
private static PropertyRecord inUseProperty()
{
PropertyRecord record = new PropertyRecord( 1 );
record.setInUse( true );
return record;
}
private static NodeRecord missingNode()
{
return new NodeRecord(-1, -1, -1);
}
private static NodeRecord createdNode()
{
NodeRecord record = new NodeRecord( 2, -1, -1 );
record.setInUse( true );
record.setCreated();
return record;
}
private static NodeRecord inUseNode()
{
NodeRecord record = new NodeRecord( 1, -1, -1 );
record.setInUse( true );
return record;
}
private NeoStoreTransaction newWriteTransaction() {
NeoStoreTransaction tx = new NeoStoreTransaction( 0l, mock( XaLogicalLog.class ), TransactionState.NO_STATE,
store, mock( CacheAccessBackDoor.class ), mock( IndexingService.class ),
WriteTransactionTest.NO_LABEL_SCAN_STORE, mock( IntegrityValidator.class ),
mock( KernelTransactionImplementation.class ), mock( LockService.class, RETURNS_MOCKS ) );
tx.setCommitTxId( store.getLastCommittedTx() + 1 );
return tx;
}
private static String commandActionToken( AbstractBaseRecord record )
{
if ( !record.inUse() )
{
return "deleted";
}
if ( record.isCreated() )
{
return "created";
}
return "updated";
}
private static class RecordingPropertyStore extends PropertyStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingPropertyStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(PropertyRecord record) {
currentRecording.get().add(commandActionToken(record) + " property");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
}
private static class RecordingNodeStore extends NodeStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingNodeStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(NodeRecord record) {
currentRecording.get().add(commandActionToken(record) + " node");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
@Override
public NodeRecord getRecord(long id) {
NodeRecord record = new NodeRecord(id, -1, -1);
record.setInUse(true);
return record;
}
}
private static class RecordingRelationshipStore extends RelationshipStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingRelationshipStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(RelationshipRecord record) {
currentRecording.get().add(commandActionToken(record) + " relationship");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionCommandOrderingTest.java
|
233
|
public class NioNeoDbPersistenceSource extends LifecycleAdapter implements PersistenceSource, EntityIdGenerator
{
private final String dataSourceName = null;
private final XaDataSourceManager xaDataSourceManager;
public NioNeoDbPersistenceSource(XaDataSourceManager xaDataSourceManager)
{
assert(xaDataSourceManager != null);
this.xaDataSourceManager = xaDataSourceManager;
}
@Override
public NeoStoreTransaction createTransaction( XaConnection connection )
{
return ((NeoStoreXaConnection) connection).createTransaction();
}
@Override
public String toString()
{
return "A persistence source to [" + dataSourceName + "]";
}
@Override
public long nextId( Class<?> clazz )
{
return xaDataSourceManager.getNeoStoreDataSource().nextId( clazz );
}
@Override
public long getHighestPossibleIdInUse( Class<?> clazz )
{
return xaDataSourceManager.getNeoStoreDataSource().getHighestPossibleIdInUse( clazz );
}
@Override
public long getNumberOfIdsInUse( Class<?> clazz )
{
return xaDataSourceManager.getNeoStoreDataSource().getNumberOfIdsInUse( clazz );
}
@Override
public XaDataSource getXaDataSource()
{
return xaDataSourceManager.getNeoStoreDataSource();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NioNeoDbPersistenceSource.java
|
234
|
private class TransactionFactory extends XaTransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state )
{
return new NeoStoreTransaction( lastCommittedTxWhenTransactionStarted, getLogicalLog(), state,
neoStore, cacheAccess, indexingService, labelScanStore, integrityValidator,
(KernelTransactionImplementation)kernel.newTransaction(), locks );
}
@Override
public void recoveryComplete()
{
msgLog.debug( "Recovery complete, "
+ "all transactions have been resolved" );
msgLog.debug( "Rebuilding id generators as needed. "
+ "This can take a while for large stores..." );
forceEverything();
neoStore.makeStoreOk();
neoStore.setVersion( xaContainer.getLogicalLog().getHighestLogVersion() );
msgLog.debug( "Rebuild of id generators complete." );
}
@Override
public long getCurrentVersion()
{
return neoStore.getVersion();
}
@Override
public long getAndSetNewVersion()
{
return neoStore.incrementVersion();
}
@Override
public void setVersion( long version )
{
neoStore.setVersion( version );
}
@Override
public void flushAll()
{
forceEverything();
}
@Override
public long getLastCommittedTx()
{
return neoStore.getLastCommittedTx();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
235
|
private class InterceptingTransactionFactory extends TransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state )
{
TransactionInterceptor first = providers.resolveChain( NeoStoreXaDataSource.this );
return new InterceptingWriteTransaction( lastCommittedTxWhenTransactionStarted, getLogicalLog(),
neoStore, state, cacheAccess, indexingService, labelScanStore, first, integrityValidator,
(KernelTransactionImplementation)kernel.newTransaction(), locks );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
236
|
{
@Override
public boolean visit( StringLogger.LineLogger logger )
{
dump( source, logger );
return false;
}
}, true );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
237
|
PERSISTENCE_WINDOW_POOL_STATS( "Persistence Window Pool stats:" )
{
@Override
void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log )
{
source.neoStore.logAllWindowPoolStats( log );
}
@Override
boolean applicable( DiagnosticsPhase phase )
{
return phase.isExplicitlyRequested();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
238
|
NEO_STORE_ID_USAGE( "Id usage:" )
{
@Override
void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log )
{
source.neoStore.logIdUsage( log );
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
239
|
NEO_STORE_VERSIONS( "Store versions:" )
{
@Override
void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log )
{
source.neoStore.logVersions( log );
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
240
|
@SuppressWarnings("deprecation")
public static abstract class Configuration extends LogBackedXaDataSource.Configuration
{
public static final Setting<Boolean> read_only= GraphDatabaseSettings.read_only;
public static final Setting<File> store_dir = InternalAbstractGraphDatabase.Configuration.store_dir;
public static final Setting<File> neo_store = InternalAbstractGraphDatabase.Configuration.neo_store;
public static final Setting<File> logical_log = InternalAbstractGraphDatabase.Configuration.logical_log;
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
241
|
private static class CommandFactory extends XaCommandFactory
{
private final NeoStore neoStore;
private final IndexingService indexingService;
CommandFactory( NeoStore neoStore, IndexingService indexingService )
{
this.neoStore = neoStore;
this.indexingService = indexingService;
}
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
return Command.readCommand( neoStore, indexingService, byteChannel, buffer );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
242
|
private static class Comparison
{
private final Collection<Field> fields = new ArrayList<Field>();
Comparison( Class<?> type )
{
for ( Field field : type.getDeclaredFields() )
{
if ( field.getDeclaringClass() == type )
{
field.setAccessible( true );
fields.add( field );
}
}
}
boolean compare( Object expected, Object actual )
{
try
{
for ( Field field : fields )
{
if ( !equal( field.get( expected ), field.get( actual ) ) )
{
return false;
}
}
return true;
}
catch ( Exception failure )
{
return false;
}
}
private static boolean equal( Object a, Object b )
{
return a == b || (a != null && (a.equals( b ) || (b != null && deepEquals( a, b ))));
}
private static boolean deepEquals( Object a, Object b )
{
if (a.getClass() == b.getClass() && a.getClass().isArray())
{
if ( a instanceof Object[] )
{
return Arrays.deepEquals( (Object[]) a, (Object[]) b );
}
if ( a instanceof byte[] )
{
return Arrays.equals( (byte[]) a, (byte[]) b );
}
if ( a instanceof char[] )
{
return Arrays.equals( (char[]) a, (char[])b );
}
}
return false;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_TransactionWriterTest.java
|
243
|
private static class RecordingNodeStore extends NodeStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingNodeStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(NodeRecord record) {
currentRecording.get().add(commandActionToken(record) + " node");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
@Override
public NodeRecord getRecord(long id) {
NodeRecord record = new NodeRecord(id, -1, -1);
record.setInUse(true);
return record;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionCommandOrderingTest.java
|
244
|
{
@Override
public NeoStore instance()
{
return getNeoStore();
}
}, persistenceCache, schemaCache, providerMap, labelScanStore, readOnly ));
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
245
|
private class CapturingIndexingService extends IndexingService
{
private final Set<NodePropertyUpdate> updates = new HashSet<>();
public CapturingIndexingService()
{
super( null,
new DefaultSchemaIndexProviderMap( NO_INDEX_PROVIDER ),
new NeoStoreIndexStoreView( locks, neoStore ),
null,
new KernelSchemaStateStore(),
new SingleLoggingService( DEV_NULL ), IndexingService.NO_MONITOR
);
}
@Override
public void updateIndexes( IndexUpdates updates )
{
this.updates.addAll( asCollection( updates ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
246
|
public class StoreRecoverer
{
private final FileSystemAbstraction fs;
public StoreRecoverer()
{
this( new DefaultFileSystemAbstraction() );
}
public StoreRecoverer( FileSystemAbstraction fs )
{
this.fs = fs;
}
public boolean recoveryNeededAt( File dataDir, Map<String, String> params ) throws IOException
{
// We need config to determine where the logical log files are
params.put( GraphDatabaseSettings.store_dir.name(), dataDir.getPath() );
Config config = new Config( params, GraphDatabaseSettings.class );
File baseLogPath = config.get( GraphDatabaseSettings.logical_log );
XaLogicalLogFiles logFiles = new XaLogicalLogFiles( baseLogPath, fs );
File log;
switch ( logFiles.determineState() )
{
case CLEAN:
return false;
case NO_ACTIVE_FILE:
case DUAL_LOGS_LOG_1_ACTIVE:
case DUAL_LOGS_LOG_2_ACTIVE:
return true;
case LEGACY_WITHOUT_LOG_ROTATION:
log = baseLogPath;
break;
case LOG_1_ACTIVE:
log = logFiles.getLog1FileName();
break;
case LOG_2_ACTIVE:
log = logFiles.getLog2FileName();
break;
default:
return true;
}
StoreChannel logChannel = null;
try
{
logChannel = fs.open( log, "r" );
return new XaLogicalLogRecoveryCheck( logChannel ).recoveryRequired();
}
finally
{
if ( logChannel != null )
{
logChannel.close();
}
}
}
public void recover( File dataDir, Map<String, String> params ) throws IOException
{
// For now, just launch a full embedded database on top of the
// directory.
// In a perfect world, to be expanded to only do recovery, and to be
// used
// as a component of the database, rather than something that is bolted
// on outside it like this.
GraphDatabaseService db =
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( dataDir.getCanonicalPath() )
.setConfig( params ).newGraphDatabase();
db.shutdown();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_recovery_StoreRecoverer.java
|
247
|
class ResourceAcquisitionFailedException extends RuntimeException
{
ResourceAcquisitionFailedException( XAResource resource )
{
super( "Unable to enlist '" + resource + "' in " + "transaction" );
}
ResourceAcquisitionFailedException( RollbackException cause )
{
super( "The transaction is marked for rollback only.", cause );
}
ResourceAcquisitionFailedException( Throwable cause )
{
super( "TM encountered an unexpected error condition.", cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_persistence_ResourceAcquisitionFailedException.java
|
248
|
public static class ResourceHolder
{
private final TxEventSyncHookFactory syncHookFactory;
private final Transaction tx;
private final XaConnection connection;
private final NeoStoreTransaction resource;
private boolean enlisted;
ResourceHolder( TxEventSyncHookFactory syncHookFactory,
Transaction tx, XaConnection connection, NeoStoreTransaction resource )
{
this.syncHookFactory = syncHookFactory;
this.tx = tx;
this.connection = connection;
this.resource = resource;
}
public NeoStoreTransaction forReading()
{
return resource;
}
public NeoStoreTransaction forWriting()
{
if ( !enlisted )
{
enlist();
enlisted = true;
}
return resource;
}
private void enlist()
{
try
{
XAResource xaResource = connection.getXaResource();
if ( !tx.enlistResource( xaResource ) )
{
throw new ResourceAcquisitionFailedException( xaResource );
}
TransactionEventsSyncHook hook = syncHookFactory.create();
if ( hook != null )
{
tx.registerSynchronization( hook );
}
}
catch ( RollbackException re )
{
throw new ResourceAcquisitionFailedException( re );
}
catch ( SystemException se )
{
throw new ResourceAcquisitionFailedException( se );
}
}
public void delist()
{
if ( enlisted )
{
try
{
connection.delistResource( tx, XAResource.TMSUCCESS );
}
catch ( SystemException e )
{
throw new TransactionFailureException(
"Failed to delist resource '" + resource + "' from current transaction.", e );
}
}
}
void destroy()
{
connection.destroy();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_persistence_PersistenceManager.java
|
249
|
private class ResourceCleanupHook implements Synchronization
{
private final Transaction tx;
private final TransactionState state;
private final ResourceHolder resourceHolder;
ResourceCleanupHook( Transaction tx, TransactionState state, ResourceHolder resourceHolder )
{
this.tx = tx;
this.state = state;
this.resourceHolder = resourceHolder;
}
@Override
public void afterCompletion( int param )
{
try
{
releaseConnections( tx );
// Release locks held in the old transaction state
if ( param == Status.STATUS_COMMITTED )
{
state.commit();
}
else
{
state.rollback();
}
}
finally
{
// Release locks held by the kernel API stack
try
{
resourceHolder.resource.kernelTransaction().release();
}
catch ( ReleaseLocksFailedKernelException e )
{
msgLog.error( "Error releasing resources for " + tx, e );
}
}
}
@Override
public void beforeCompletion()
{
resourceHolder.delist();
}
private void releaseConnections( Transaction tx )
{
try
{
releaseResourceConnectionsForTransaction( tx, state );
}
catch ( Throwable t )
{
msgLog.error( "Error releasing resources for " + tx, t );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_persistence_PersistenceManager.java
|
250
|
public class PersistenceManager
{
private final PersistenceSource persistenceSource;
private final StringLogger msgLog;
private final AbstractTransactionManager transactionManager;
private final TxEventSyncHookFactory syncHookFactory;
public PersistenceManager( StringLogger msgLog, AbstractTransactionManager transactionManager,
PersistenceSource persistenceSource,
TxEventSyncHookFactory syncHookFactory )
{
this.msgLog = msgLog;
this.transactionManager = transactionManager;
this.persistenceSource = persistenceSource;
this.syncHookFactory = syncHookFactory;
}
public NodeRecord loadLightNode( long id )
{
return getResource().forReading().nodeLoadLight( id );
}
public long getRelationshipChainPosition( long nodeId )
{
return getResource().forReading().getRelationshipChainPosition( nodeId );
}
public Pair<Map<DirectionWrapper, Iterable<RelationshipRecord>>, Long> getMoreRelationships(
long nodeId, long position )
{
return getResource().forReading().getMoreRelationships( nodeId, position );
}
public void loadNodeProperties( long nodeId, boolean light, PropertyReceiver receiver )
{
getResource().forReading().nodeLoadProperties( nodeId, light, receiver );
}
public void loadRelProperties( long relId, boolean light, PropertyReceiver receiver )
{
getResource().forReading().relLoadProperties( relId, light, receiver );
}
public RelationshipRecord loadLightRelationship( long id )
{
return getResource().forReading().relLoadLight( id );
}
public ArrayMap<Integer,DefinedProperty> nodeDelete( long nodeId )
{
return getResource().forWriting().nodeDelete( nodeId );
}
public DefinedProperty nodeAddProperty( long nodeId, int propertyKey, Object value )
{
return getResource().forWriting().nodeAddProperty( nodeId, propertyKey, value );
}
public DefinedProperty nodeChangeProperty( long nodeId, int propertyKey, Object value )
{
return getResource().forWriting().nodeChangeProperty( nodeId, propertyKey, value );
}
public void nodeRemoveProperty( long nodeId, int propertyKey )
{
getResource().forWriting().nodeRemoveProperty( nodeId, propertyKey );
}
public void nodeCreate( long id )
{
getResource().forWriting().nodeCreate( id );
}
public void relationshipCreate( long id, int typeId, long startNodeId,
long endNodeId )
{
getResource().forWriting().relationshipCreate( id, typeId, startNodeId, endNodeId );
}
public ArrayMap<Integer,DefinedProperty> relDelete( long relId )
{
return getResource().forWriting().relDelete( relId );
}
public DefinedProperty relAddProperty( long relId, int propertyKey, Object value )
{
return getResource().forWriting().relAddProperty( relId, propertyKey, value );
}
public DefinedProperty relChangeProperty( long relId, int propertyKey, Object value )
{
return getResource().forWriting().relChangeProperty( relId, propertyKey, value );
}
public void relRemoveProperty( long relId, int propertyKey )
{
getResource().forWriting().relRemoveProperty( relId, propertyKey );
}
public DefinedProperty graphAddProperty( int propertyKey, Object value )
{
return getResource().forWriting().graphAddProperty( propertyKey, value );
}
public DefinedProperty graphChangeProperty( int propertyKey, Object value )
{
return getResource().forWriting().graphChangeProperty( propertyKey, value );
}
public void graphRemoveProperty( int propertyKey )
{
getResource().forWriting().graphRemoveProperty( propertyKey );
}
public void graphLoadProperties( boolean light, PropertyReceiver receiver )
{
getResource().forReading().graphLoadProperties( light, receiver );
}
public void createPropertyKeyToken( String key, int id )
{
getResource().forWriting().createPropertyKeyToken( key, id );
}
public void createLabelId( String name, int id )
{
getResource().forWriting().createLabelToken( name, id );
}
public void createRelationshipType( int id, String name )
{
getResource().forWriting().createRelationshipTypeToken( id, name );
}
public void dropSchemaRule( SchemaRule rule )
{
getResource().forWriting().dropSchemaRule( rule );
}
public void setConstraintIndexOwner( IndexRule constraintIndex, long constraintId )
{
getResource().forWriting().setConstraintIndexOwner( constraintIndex, constraintId );
}
public void createSchemaRule( SchemaRule rule )
{
getResource().forWriting().createSchemaRule( rule );
}
public void addLabelToNode( int labelId, long nodeId )
{
getResource().forWriting().addLabelToNode( labelId, nodeId );
}
public void removeLabelFromNode( int labelId, long nodeId )
{
getResource().forWriting().removeLabelFromNode( labelId, nodeId );
}
public PrimitiveLongIterator getLabelsForNode( long nodeId )
{
return getResource().forReading().getLabelsForNode( nodeId );
}
public KernelTransaction currentKernelTransactionForReading()
{
return getResource().forReading().kernelTransaction();
}
public KernelTransaction currentKernelTransactionForWriting()
{
return getResource().forWriting().kernelTransaction();
}
public void ensureKernelIsEnlisted()
{
getResource();
}
public ResourceHolder getResource()
{
TransactionState txState = transactionManager.getTransactionState();
ResourceHolder resource = txState.getNeoStoreTransaction();
if ( resource == null )
{
txState.setNeoStoreTransaction( resource = createResource( getCurrentTransaction() ) );
}
return resource;
}
private ResourceHolder createResource( Transaction tx )
{
try
{
XaConnection xaConnection = persistenceSource.getXaDataSource().getXaConnection();
NeoStoreTransaction resource = persistenceSource.createTransaction( xaConnection );
ResourceHolder result = new ResourceHolder( syncHookFactory, tx, xaConnection, resource );
TransactionState state = transactionManager.getTransactionState();
tx.registerSynchronization( new ResourceCleanupHook( tx, state, result ) );
return result;
}
catch ( RollbackException e )
{
throw new ResourceAcquisitionFailedException( e );
}
catch ( SystemException e )
{
throw new ResourceAcquisitionFailedException( e );
}
}
public Transaction getCurrentTransaction()
throws NotInTransactionException
{
try
{
Transaction tx = transactionManager.getTransaction();
if ( tx == null )
{
throw new NotInTransactionException();
}
return tx;
}
catch ( SystemException se )
{
throw new TransactionFailureException( "Error fetching transaction "
+ "for current thread", se );
}
}
private class ResourceCleanupHook implements Synchronization
{
private final Transaction tx;
private final TransactionState state;
private final ResourceHolder resourceHolder;
ResourceCleanupHook( Transaction tx, TransactionState state, ResourceHolder resourceHolder )
{
this.tx = tx;
this.state = state;
this.resourceHolder = resourceHolder;
}
@Override
public void afterCompletion( int param )
{
try
{
releaseConnections( tx );
// Release locks held in the old transaction state
if ( param == Status.STATUS_COMMITTED )
{
state.commit();
}
else
{
state.rollback();
}
}
finally
{
// Release locks held by the kernel API stack
try
{
resourceHolder.resource.kernelTransaction().release();
}
catch ( ReleaseLocksFailedKernelException e )
{
msgLog.error( "Error releasing resources for " + tx, e );
}
}
}
@Override
public void beforeCompletion()
{
resourceHolder.delist();
}
private void releaseConnections( Transaction tx )
{
try
{
releaseResourceConnectionsForTransaction( tx, state );
}
catch ( Throwable t )
{
msgLog.error( "Error releasing resources for " + tx, t );
}
}
}
void releaseResourceConnectionsForTransaction( Transaction tx, TransactionState state )
throws NotInTransactionException
{
ResourceHolder resource = state.getNeoStoreTransaction();
if ( resource != null )
{
resource.destroy();
}
}
public static class ResourceHolder
{
private final TxEventSyncHookFactory syncHookFactory;
private final Transaction tx;
private final XaConnection connection;
private final NeoStoreTransaction resource;
private boolean enlisted;
ResourceHolder( TxEventSyncHookFactory syncHookFactory,
Transaction tx, XaConnection connection, NeoStoreTransaction resource )
{
this.syncHookFactory = syncHookFactory;
this.tx = tx;
this.connection = connection;
this.resource = resource;
}
public NeoStoreTransaction forReading()
{
return resource;
}
public NeoStoreTransaction forWriting()
{
if ( !enlisted )
{
enlist();
enlisted = true;
}
return resource;
}
private void enlist()
{
try
{
XAResource xaResource = connection.getXaResource();
if ( !tx.enlistResource( xaResource ) )
{
throw new ResourceAcquisitionFailedException( xaResource );
}
TransactionEventsSyncHook hook = syncHookFactory.create();
if ( hook != null )
{
tx.registerSynchronization( hook );
}
}
catch ( RollbackException re )
{
throw new ResourceAcquisitionFailedException( re );
}
catch ( SystemException se )
{
throw new ResourceAcquisitionFailedException( se );
}
}
public void delist()
{
if ( enlisted )
{
try
{
connection.delistResource( tx, XAResource.TMSUCCESS );
}
catch ( SystemException e )
{
throw new TransactionFailureException(
"Failed to delist resource '" + resource + "' from current transaction.", e );
}
}
}
void destroy()
{
connection.destroy();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_persistence_PersistenceManager.java
|
251
|
public class IdGenerationFailedException extends java.lang.RuntimeException
{
public IdGenerationFailedException( Throwable cause )
{
super( cause );
}
public IdGenerationFailedException( String s )
{
super( s );
}
public IdGenerationFailedException( String s, Throwable cause )
{
super( s, cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_persistence_IdGenerationFailedException.java
|
252
|
private static class VerifyingXaLogicalLog extends XaLogicalLog
{
private final Visitor<XaCommand, RuntimeException> verifier;
public VerifyingXaLogicalLog( FileSystemAbstraction fs, Visitor<XaCommand, RuntimeException> verifier )
{
super( new File( "log" ), null, null, null, fs, new Monitors(), new SingleLoggingService( DEV_NULL ),
LogPruneStrategies.NO_PRUNING, null, mock( KernelHealth.class ), 25*1024*1024, ALLOW_ALL );
this.verifier = verifier;
}
@Override
public synchronized void writeCommand( XaCommand command, int identifier ) throws IOException
{
this.verifier.visit( command );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
253
|
private class IteratorCollector<T> implements Answer<Object>
{
private final int arg;
private final List<T> elements = new ArrayList<>();
public IteratorCollector( int arg )
{
this.arg = arg;
}
@SafeVarargs
public final void assertContent( T... expected )
{
assertEquals( Arrays.asList( expected ), elements );
}
@Override
@SuppressWarnings("unchecked")
public Object answer( InvocationOnMock invocation ) throws Throwable
{
Object iterator = invocation.getArguments()[arg];
if ( iterator instanceof Iterable )
{
iterator = ((Iterable) iterator).iterator();
}
if ( iterator instanceof Iterator )
{
collect( (Iterator) iterator );
}
return null;
}
private void collect( Iterator<T> iterator )
{
while ( iterator.hasNext() )
{
elements.add( iterator.next() );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
254
|
private static class CommandCapturingVisitor implements Visitor<XaCommand,RuntimeException>
{
private final Collection<XaCommand> commands = new ArrayList<>();
@Override
public boolean visit( XaCommand element ) throws RuntimeException
{
commands.add( element );
return true;
}
public void injectInto( NeoStoreTransaction tx )
{
for ( XaCommand command : commands )
{
tx.injectCommand( command );
}
}
public void visitCapturedCommands( Visitor<XaCommand, RuntimeException> visitor )
{
for ( XaCommand command : commands )
{
visitor.visit( command );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
255
|
{
@Override
public LabelScanReader newReader()
{
return LabelScanReader.EMPTY;
}
@Override
public LabelScanWriter newWriter()
{
return LabelScanWriter.EMPTY;
}
@Override
public void stop()
{ // Do nothing
}
@Override
public void start()
{ // Do nothing
}
@Override
public void shutdown()
{ // Do nothing
}
@Override
public void recover( Iterator<NodeLabelUpdate> updates )
{ // Do nothing
}
@Override
public AllEntriesLabelScanReader newAllEntriesReader()
{
return null;
}
@Override
public ResourceIterator<File> snapshotStoreFiles()
{
return emptyIterator();
}
@Override
public void init()
{ // Do nothing
}
@Override
public void force()
{ // Do nothing
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
256
|
private static class RecordingPropertyStore extends PropertyStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingPropertyStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(PropertyRecord record) {
currentRecording.get().add(commandActionToken(record) + " property");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionCommandOrderingTest.java
|
257
|
{
@Override
public boolean visit( XaCommand element )
{
for ( DynamicRecord record : ((SchemaRuleCommand) element).getRecordsAfter() )
{
assertFalse( record + " should have been heavy", record.isLight() );
}
return true;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
258
|
{
@Override
public boolean visit( XaCommand element )
{
return true;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
259
|
{
@Override
public synchronized Object answer( InvocationOnMock invocation ) throws Throwable
{
Lock mock = mock( Lock.class );
lockMocks.add( mock );
return mock;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
260
|
{
@Override
public boolean visit( XaCommand element )
{
if ( element instanceof PropertyCommand )
{
// THEN
PropertyCommand propertyCommand = (PropertyCommand) element;
verifyPropertyRecord( propertyCommand.getBefore() );
verifyPropertyRecord( propertyCommand.getAfter() );
return true;
}
return false;
}
private void verifyPropertyRecord( PropertyRecord record )
{
if ( record.getPrevProp() != Record.NO_NEXT_PROPERTY.intValue() )
{
for ( PropertyBlock block : record.getPropertyBlocks() )
{
assertTrue( block.isLight() );
}
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
261
|
{
@Override
public boolean visit( XaCommand element )
{
if ( element instanceof PropertyCommand )
{
PropertyRecord before = ((PropertyCommand) element).getBefore();
assertFalse( before.inUse() );
assertEquals( Collections.<PropertyBlock>emptyList(), before.getPropertyBlocks() );
PropertyRecord after = ((PropertyCommand) element).getAfter();
assertTrue( after.inUse() );
assertEquals( 1, count( after.getPropertyBlocks() ) );
}
return true;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
262
|
{
@Override
public boolean visit( XaCommand element ) throws RuntimeException
{
if( element instanceof Command.NodeCommand )
{
Command.NodeCommand cmd = (Command.NodeCommand)element;
DynamicRecord before = cmd.getBefore().getDynamicLabelRecords().iterator().next();
DynamicRecord after = cmd.getAfter().getDynamicLabelRecords().iterator().next();
assertThat( before.getId(), equalTo(after.getId()) );
assertThat( after.inUse(), equalTo(true) );
}
return true;
}
});
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
263
|
{
@Override
public boolean visit( XaCommand element ) throws RuntimeException
{
if( element instanceof Command.NodeCommand )
{
Command.NodeCommand cmd = (Command.NodeCommand)element;
Collection<DynamicRecord> beforeDynLabels = cmd.getAfter().getDynamicLabelRecords();
assertThat( beforeDynLabels.size(), equalTo(1) );
assertThat( beforeDynLabels.iterator().next().inUse(), equalTo(false) );
}
return true;
}
});
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
264
|
public class WriteTransactionTest
{
public static final String LONG_STRING = "string value long enough not to be stored as a short string";
@Test
public void shouldValidateConstraintIndexAsPartOfPrepare() throws Exception
{
// GIVEN
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
final long indexId = neoStore.getSchemaStore().nextId();
final long constraintId = neoStore.getSchemaStore().nextId();
writeTransaction.createSchemaRule( uniquenessConstraintRule( constraintId, 1, 1, indexId ) );
// WHEN
writeTransaction.prepare();
// THEN
verify( mockIndexing ).validateIndex( indexId );
}
@Test
public void shouldAddSchemaRuleToCacheWhenApplyingTransactionThatCreatesOne() throws Exception
{
// GIVEN
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
// WHEN
final long ruleId = neoStore.getSchemaStore().nextId();
IndexRule schemaRule = indexRule( ruleId, 10, 8, PROVIDER_DESCRIPTOR );
writeTransaction.createSchemaRule( schemaRule );
writeTransaction.prepare();
writeTransaction.commit();
// THEN
verify( cacheAccessBackDoor ).addSchemaRule( schemaRule );
}
@Test
public void shouldRemoveSchemaRuleFromCacheWhenApplyingTransactionThatDeletesOne() throws Exception
{
// GIVEN
SchemaStore schemaStore = neoStore.getSchemaStore();
int labelId = 10, propertyKey = 10;
IndexRule rule = indexRule( schemaStore.nextId(), labelId, propertyKey, PROVIDER_DESCRIPTOR );
Collection<DynamicRecord> records = schemaStore.allocateFrom( rule );
for ( DynamicRecord record : records )
{
schemaStore.updateRecord( record );
}
long ruleId = first( records ).getId();
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
// WHEN
writeTransaction.dropSchemaRule( rule );
writeTransaction.prepare();
writeTransaction.commit();
// THEN
verify( cacheAccessBackDoor ).removeSchemaRuleFromCache( ruleId );
}
@Test
public void shouldMarkDynamicLabelRecordsAsNotInUseWhenLabelsAreReInlined() throws Exception
{
// GIVEN
final long nodeId = neoStore.getNodeStore().nextId();
// A transaction that creates labels that just barely fit to be inlined
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
writeTransaction.nodeCreate( nodeId );
writeTransaction.addLabelToNode( 7, nodeId );
writeTransaction.addLabelToNode( 11, nodeId );
writeTransaction.addLabelToNode( 12, nodeId );
writeTransaction.addLabelToNode( 15, nodeId );
writeTransaction.addLabelToNode( 23, nodeId );
writeTransaction.addLabelToNode( 27, nodeId );
writeTransaction.addLabelToNode( 50, nodeId );
writeTransaction.prepare();
writeTransaction.commit();
// And given that I now start recording the commands in the log
CommandCapturingVisitor commandCapture = new CommandCapturingVisitor();
// WHEN
// I then remove multiple labels
writeTransaction = newWriteTransaction( mockIndexing, commandCapture);
writeTransaction.removeLabelFromNode( 11, nodeId );
writeTransaction.removeLabelFromNode( 23, nodeId );
writeTransaction.prepare();
writeTransaction.commit();
// THEN
// The dynamic label record should be part of what is logged, and it should be set to not in use anymore.
commandCapture.visitCapturedCommands( new Visitor<XaCommand, RuntimeException>()
{
@Override
public boolean visit( XaCommand element ) throws RuntimeException
{
if( element instanceof Command.NodeCommand )
{
Command.NodeCommand cmd = (Command.NodeCommand)element;
Collection<DynamicRecord> beforeDynLabels = cmd.getAfter().getDynamicLabelRecords();
assertThat( beforeDynLabels.size(), equalTo(1) );
assertThat( beforeDynLabels.iterator().next().inUse(), equalTo(false) );
}
return true;
}
});
}
@Test
public void shouldReUseOriginalDynamicRecordWhenInlinedAndThenExpandedLabelsInSameTx() throws Exception
{
// GIVEN
final long nodeId = neoStore.getNodeStore().nextId();
// A transaction that creates labels that just barely fit to be inlined
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
writeTransaction.nodeCreate( nodeId );
writeTransaction.addLabelToNode( 16, nodeId );
writeTransaction.addLabelToNode( 29, nodeId );
writeTransaction.addLabelToNode( 32, nodeId );
writeTransaction.addLabelToNode( 41, nodeId );
writeTransaction.addLabelToNode( 44, nodeId );
writeTransaction.addLabelToNode( 45, nodeId );
writeTransaction.addLabelToNode( 50, nodeId );
writeTransaction.addLabelToNode( 51, nodeId );
writeTransaction.addLabelToNode( 52, nodeId );
writeTransaction.prepare();
writeTransaction.commit();
// And given that I now start recording the commands in the log
CommandCapturingVisitor commandCapture = new CommandCapturingVisitor();
// WHEN
// I remove enough labels to inline them, but then add enough new labels to expand it back to dynamic
writeTransaction = newWriteTransaction( mockIndexing, commandCapture);
writeTransaction.removeLabelFromNode( 50, nodeId );
writeTransaction.removeLabelFromNode( 51, nodeId );
writeTransaction.removeLabelFromNode( 52, nodeId );
writeTransaction.addLabelToNode( 60, nodeId );
writeTransaction.addLabelToNode( 61, nodeId );
writeTransaction.addLabelToNode( 62, nodeId );
writeTransaction.prepare();
writeTransaction.commit();
// THEN
// The dynamic label record in before should be the same id as in after, and should be in use
commandCapture.visitCapturedCommands( new Visitor<XaCommand, RuntimeException>()
{
@Override
public boolean visit( XaCommand element ) throws RuntimeException
{
if( element instanceof Command.NodeCommand )
{
Command.NodeCommand cmd = (Command.NodeCommand)element;
DynamicRecord before = cmd.getBefore().getDynamicLabelRecords().iterator().next();
DynamicRecord after = cmd.getAfter().getDynamicLabelRecords().iterator().next();
assertThat( before.getId(), equalTo(after.getId()) );
assertThat( after.inUse(), equalTo(true) );
}
return true;
}
});
}
@Test
public void shouldRemoveSchemaRuleWhenRollingBackTransaction() throws Exception
{
// GIVEN
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
// WHEN
final long ruleId = neoStore.getSchemaStore().nextId();
writeTransaction.createSchemaRule( indexRule( ruleId, 10, 7, PROVIDER_DESCRIPTOR ) );
writeTransaction.prepare();
writeTransaction.rollback();
// THEN
verifyNoMoreInteractions( cacheAccessBackDoor );
}
@Test
public void shouldWriteProperBeforeAndAfterPropertyRecordsWhenAddingProperty() throws Exception
{
// THEN
Visitor<XaCommand, RuntimeException> verifier = new Visitor<XaCommand, RuntimeException>()
{
@Override
public boolean visit( XaCommand element )
{
if ( element instanceof PropertyCommand )
{
PropertyRecord before = ((PropertyCommand) element).getBefore();
assertFalse( before.inUse() );
assertEquals( Collections.<PropertyBlock>emptyList(), before.getPropertyBlocks() );
PropertyRecord after = ((PropertyCommand) element).getAfter();
assertTrue( after.inUse() );
assertEquals( 1, count( after.getPropertyBlocks() ) );
}
return true;
}
};
// GIVEN
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing, verifier );
int nodeId = 1;
writeTransaction.setCommitTxId( nodeId );
writeTransaction.nodeCreate( nodeId );
int propertyKey = 1;
Object value = 5;
// WHEN
writeTransaction.nodeAddProperty( nodeId, propertyKey, value );
writeTransaction.doPrepare();
}
// TODO change property record
// TODO remove property record
@Test
public void shouldConvertAddedPropertyToNodePropertyUpdates() throws Exception
{
// GIVEN
long nodeId = 0;
CapturingIndexingService indexingService = new CapturingIndexingService();
NeoStoreTransaction writeTransaction = newWriteTransaction( indexingService );
int propertyKey1 = 1, propertyKey2 = 2;
Object value1 = "first", value2 = 4;
// WHEN
writeTransaction.nodeCreate( nodeId );
writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
add( nodeId, propertyKey1, value1, none ),
add( nodeId, propertyKey2, value2, none ) ),
indexingService.updates );
}
@Test
public void shouldConvertChangedPropertyToNodePropertyUpdates() throws Exception
{
// GIVEN
int nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2;
Object value1 = "first", value2 = 4;
writeTransaction.nodeCreate( nodeId );
DefinedProperty property1 = writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
DefinedProperty property2 = writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
Object newValue1 = "new", newValue2 = "new 2";
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.nodeChangeProperty( nodeId, property1.propertyKeyId(), newValue1 );
writeTransaction.nodeChangeProperty( nodeId, property2.propertyKeyId(), newValue2 );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
change( nodeId, propertyKey1, value1, none, newValue1, none ),
change( nodeId, propertyKey2, value2, none, newValue2, none ) ),
indexingService.updates );
}
@Test
public void shouldConvertRemovedPropertyToNodePropertyUpdates() throws Exception
{
// GIVEN
int nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2;
Object value1 = "first", value2 = 4;
writeTransaction.nodeCreate( nodeId );
DefinedProperty property1 = writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
DefinedProperty property2 = writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.nodeRemoveProperty( nodeId, property1.propertyKeyId() );
writeTransaction.nodeRemoveProperty( nodeId, property2.propertyKeyId() );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
remove( nodeId, propertyKey1, value1, none ),
remove( nodeId, propertyKey2, value2, none ) ),
indexingService.updates );
}
@Test
public void shouldConvertLabelAdditionToNodePropertyUpdates() throws Exception
{
// GIVEN
long nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2, labelId = 3;
long[] labelIds = new long[] {labelId};
Object value1 = LONG_STRING, value2 = LONG_STRING.getBytes();
writeTransaction.nodeCreate( nodeId );
writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.addLabelToNode( labelId, nodeId );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
add( nodeId, propertyKey1, value1, labelIds ),
add( nodeId, propertyKey2, value2, labelIds ) ),
indexingService.updates );
}
@Test
public void shouldConvertMixedLabelAdditionAndSetPropertyToNodePropertyUpdates() throws Exception
{
// GIVEN
long nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2, labelId1 = 3, labelId2 = 4;
Object value1 = "first", value2 = 4;
writeTransaction.nodeCreate( nodeId );
writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
writeTransaction.addLabelToNode( labelId1, nodeId );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
writeTransaction.addLabelToNode( labelId2, nodeId );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
add( nodeId, propertyKey1, value1, new long[] {labelId2} ),
add( nodeId, propertyKey2, value2, new long[]{labelId2} ),
add( nodeId, propertyKey2, value2, new long[]{labelId1, labelId2} ) ),
indexingService.updates );
}
@Test
public void shouldConvertLabelRemovalToNodePropertyUpdates() throws Exception
{
// GIVEN
long nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2, labelId = 3;
long[] labelIds = new long[] {labelId};
Object value1 = "first", value2 = 4;
writeTransaction.nodeCreate( nodeId );
writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
writeTransaction.addLabelToNode( labelId, nodeId );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.removeLabelFromNode( labelId, nodeId );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
remove( nodeId, propertyKey1, value1, labelIds ),
remove( nodeId, propertyKey2, value2, labelIds ) ),
indexingService.updates );
}
@Test
public void shouldConvertMixedLabelRemovalAndRemovePropertyToNodePropertyUpdates() throws Exception
{
// GIVEN
long nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2, labelId1 = 3, labelId2 = 4;
Object value1 = "first", value2 = 4;
writeTransaction.nodeCreate( nodeId );
DefinedProperty property1 = writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
writeTransaction.addLabelToNode( labelId1, nodeId );
writeTransaction.addLabelToNode( labelId2, nodeId );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.nodeRemoveProperty( nodeId, property1.propertyKeyId() );
writeTransaction.removeLabelFromNode( labelId2, nodeId );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
remove( nodeId, propertyKey1, value1, new long[] {labelId1, labelId2} ),
remove( nodeId, propertyKey2, value2, new long[] {labelId2} ) ),
indexingService.updates );
}
@Test
public void shouldConvertMixedLabelRemovalAndAddPropertyToNodePropertyUpdates() throws Exception
{
// GIVEN
long nodeId = 0;
NeoStoreTransaction writeTransaction = newWriteTransaction( mockIndexing );
int propertyKey1 = 1, propertyKey2 = 2, labelId1 = 3, labelId2 = 4;
Object value1 = "first", value2 = 4;
writeTransaction.nodeCreate( nodeId );
writeTransaction.nodeAddProperty( nodeId, propertyKey1, value1 );
writeTransaction.addLabelToNode( labelId1, nodeId );
writeTransaction.addLabelToNode( labelId2, nodeId );
prepareAndCommit( writeTransaction );
// WHEN
CapturingIndexingService indexingService = new CapturingIndexingService();
writeTransaction = newWriteTransaction( indexingService );
writeTransaction.nodeAddProperty( nodeId, propertyKey2, value2 );
writeTransaction.removeLabelFromNode( labelId2, nodeId );
prepareAndCommit( writeTransaction );
// THEN
assertEquals( asSet(
add( nodeId, propertyKey2, value2, new long[]{labelId1} ),
remove( nodeId, propertyKey1, value1, new long[]{labelId2} ),
remove( nodeId, propertyKey2, value2, new long[]{labelId2} ) ),
indexingService.updates );
}
@Test
public void shouldUpdateHighIdsOnRecoveredTransaction() throws Exception
{
// GIVEN
NeoStoreTransaction tx = newWriteTransaction( mockIndexing );
int nodeId = 5, relId = 10, relationshipType = 3, propertyKeyId = 4, ruleId = 8;
// WHEN
tx.nodeCreate( nodeId );
tx.createRelationshipTypeToken( relationshipType, "type" );
tx.relationshipCreate( relId, 0, nodeId, nodeId );
tx.relAddProperty( relId, propertyKeyId,
new long[] {1l << 60, 1l << 60, 1l << 60, 1l << 60, 1l << 60, 1l << 60, 1l << 60, 1l << 60, 1l << 60, 1l << 60} );
tx.createPropertyKeyToken( "key", propertyKeyId );
tx.nodeAddProperty( nodeId, propertyKeyId,
"something long and nasty that requires dynamic records for sure I would think and hope. Ok then åäö%!=" );
for ( int i = 0; i < 10; i++ )
{
tx.addLabelToNode( 10000 + i, nodeId );
}
tx.createSchemaRule( indexRule( ruleId, 100, propertyKeyId, PROVIDER_DESCRIPTOR ) );
prepareAndCommitRecovered( tx );
// THEN
assertEquals( "NodeStore", nodeId+1, neoStore.getNodeStore().getHighId() );
assertEquals( "DynamicNodeLabelStore", 2, neoStore.getNodeStore().getDynamicLabelStore().getHighId() );
assertEquals( "RelationshipStore", relId+1, neoStore.getRelationshipStore().getHighId() );
assertEquals( "RelationshipTypeStore", relationshipType+1, neoStore.getRelationshipTypeStore().getHighId() );
assertEquals( "RelationshipType NameStore", 2, neoStore.getRelationshipTypeStore().getNameStore().getHighId() );
assertEquals( "PropertyStore", 2, neoStore.getPropertyStore().getHighId() );
assertEquals( "PropertyStore DynamicStringStore", 2, neoStore.getPropertyStore().getStringStore().getHighId() );
assertEquals( "PropertyStore DynamicArrayStore", 2, neoStore.getPropertyStore().getArrayStore().getHighId() );
assertEquals( "PropertyIndexStore", propertyKeyId+1, neoStore.getPropertyStore().getPropertyKeyTokenStore().getHighId() );
assertEquals( "PropertyKeyToken NameStore", 2, neoStore.getPropertyStore().getPropertyKeyTokenStore().getNameStore().getHighId() );
assertEquals( "SchemaStore", ruleId+1, neoStore.getSchemaStore().getHighId() );
}
@Test
public void createdSchemaRuleRecordMustBeWrittenHeavy() throws Exception
{
// THEN
Visitor<XaCommand, RuntimeException> verifier = heavySchemaRuleVerifier();
// GIVEN
NeoStoreTransaction tx = newWriteTransaction( mockIndexing, verifier );
long ruleId = 0;
int labelId = 5, propertyKeyId = 7;
SchemaRule rule = indexRule( ruleId, labelId, propertyKeyId, PROVIDER_DESCRIPTOR );
// WHEN
tx.createSchemaRule( rule );
prepareAndCommit( tx );
}
@Test
public void shouldWriteProperPropertyRecordsWhenOnlyChangingLinkage() throws Exception
{
/* There was an issue where GIVEN:
*
* Legend: () = node, [] = property record
*
* ()-->[0:block{size:1}]
*
* WHEN adding a new property record in front of if, not changing any data in that record i.e:
*
* ()-->[1:block{size:4}]-->[0:block{size:1}]
*
* The state of property record 0 would be that it had loaded value records for that block,
* but those value records weren't heavy, so writing that record to the log would fail
* w/ an assertion data != null.
*/
// GIVEN
NeoStoreTransaction tx = newWriteTransaction( mockIndexing );
int nodeId = 0;
tx.nodeCreate( nodeId );
int index = 0;
tx.nodeAddProperty( nodeId, index, string( 70 ) ); // will require a block of size 1
prepareAndCommit( tx );
// WHEN
Visitor<XaCommand, RuntimeException> verifier = new Visitor<XaCommand, RuntimeException>()
{
@Override
public boolean visit( XaCommand element )
{
if ( element instanceof PropertyCommand )
{
// THEN
PropertyCommand propertyCommand = (PropertyCommand) element;
verifyPropertyRecord( propertyCommand.getBefore() );
verifyPropertyRecord( propertyCommand.getAfter() );
return true;
}
return false;
}
private void verifyPropertyRecord( PropertyRecord record )
{
if ( record.getPrevProp() != Record.NO_NEXT_PROPERTY.intValue() )
{
for ( PropertyBlock block : record.getPropertyBlocks() )
{
assertTrue( block.isLight() );
}
}
}
};
tx = newWriteTransaction( mockIndexing, verifier );
int index2 = 1;
tx.nodeAddProperty( nodeId, index2, string( 40 ) ); // will require a block of size 4
prepareAndCommit( tx );
}
@Test
public void shouldCreateEqualNodePropertyUpdatesOnRecoveryOfCreatedNode() throws Exception
{
/* There was an issue where recovering a tx where a node with a label and a property
* was created resulted in two exact copies of NodePropertyUpdates. */
// GIVEN
long nodeId = 0;
int labelId = 5, propertyKeyId = 7;
NodePropertyUpdate expectedUpdate = NodePropertyUpdate.add( nodeId, propertyKeyId, "Neo", new long[] {labelId} );
// -- an index
long ruleId = 0;
NeoStoreTransaction tx = newWriteTransaction( mockIndexing );
SchemaRule rule = indexRule( ruleId, labelId, propertyKeyId, PROVIDER_DESCRIPTOR );
tx.createSchemaRule( rule );
prepareAndCommit( tx );
// -- and a tx creating a node with that label and property key
IndexingService index = mock( IndexingService.class );
IteratorCollector<NodePropertyUpdate> indexUpdates = new IteratorCollector<>( 0 );
doAnswer( indexUpdates ).when( index ).updateIndexes( any( IndexUpdates.class ) );
CommandCapturingVisitor commandCapturingVisitor = new CommandCapturingVisitor();
tx = newWriteTransaction( index, commandCapturingVisitor );
tx.nodeCreate( nodeId );
tx.addLabelToNode( labelId, nodeId );
tx.nodeAddProperty( nodeId, propertyKeyId, "Neo" );
prepareAndCommit( tx );
verify( index, times( 1 ) ).updateIndexes( any( IndexUpdates.class ) );
indexUpdates.assertContent( expectedUpdate );
reset( index );
indexUpdates = new IteratorCollector<>( 0 );
doAnswer( indexUpdates ).when( index ).updateIndexes( any( IndexUpdates.class ) );
// WHEN
// -- later recovering that tx, there should be only one update
tx = newWriteTransaction( index );
commandCapturingVisitor.injectInto( tx );
prepareAndCommitRecovered( tx );
verify( index, times( 1 ) ).updateIndexes( any( IndexUpdates.class ) );
indexUpdates.assertContent( expectedUpdate );
}
@Test
public void shouldLockUpdatedNodes() throws Exception
{
// given
NodeStore nodeStore = neoStore.getNodeStore();
long[] nodes = { // allocate ids
nodeStore.nextId(),
nodeStore.nextId(),
nodeStore.nextId(),
nodeStore.nextId(),
nodeStore.nextId(),
nodeStore.nextId(),
nodeStore.nextId(),
};
// create the node records that we will modify in our main tx.
{
NeoStoreTransaction tx = newWriteTransaction( mockIndexing );
for ( int i = 1; i < nodes.length - 1; i++ )
{
tx.nodeCreate( nodes[i] );
}
tx.nodeAddProperty( nodes[3], 0, "old" );
tx.nodeAddProperty( nodes[4], 0, "old" );
prepareAndCommit( tx );
reset( locks ); // reset the lock counts
}
// These are the changes we want to assert locking on
NeoStoreTransaction tx = newWriteTransaction( mockIndexing );
tx.nodeCreate( nodes[0] );
tx.addLabelToNode( 0, nodes[1] );
tx.nodeAddProperty( nodes[2], 0, "value" );
tx.nodeChangeProperty( nodes[3], 0, "value" );
tx.nodeRemoveProperty( nodes[4], 0 );
tx.nodeDelete( nodes[5] );
tx.nodeCreate( nodes[6] );
tx.addLabelToNode( 0, nodes[6] );
tx.nodeAddProperty( nodes[6], 0, "value" );
// when
prepareAndCommit( tx );
// then
// create node, NodeCommand == 1 update
verify( locks, times( 1 ) ).acquireNodeLock( nodes[0], LockService.LockType.WRITE_LOCK );
// add label, NodeCommand == 1 update
verify( locks, times( 1 ) ).acquireNodeLock( nodes[1], LockService.LockType.WRITE_LOCK );
// add property, NodeCommand and PropertyCommand == 2 updates
verify( locks, times( 2 ) ).acquireNodeLock( nodes[2], LockService.LockType.WRITE_LOCK );
// update property, in place, PropertyCommand == 1 update
verify( locks, times( 1 ) ).acquireNodeLock( nodes[3], LockService.LockType.WRITE_LOCK );
// remove property, updates the Node and the Property == 2 updates
verify( locks, times( 2 ) ).acquireNodeLock( nodes[4], LockService.LockType.WRITE_LOCK );
// delete node, single NodeCommand == 1 update
verify( locks, times( 1 ) ).acquireNodeLock( nodes[5], LockService.LockType.WRITE_LOCK );
// create and add-label goes into the NodeCommand, add property is a PropertyCommand == 2 updates
verify( locks, times( 2 ) ).acquireNodeLock( nodes[6], LockService.LockType.WRITE_LOCK );
}
private String string( int length )
{
StringBuilder result = new StringBuilder();
char ch = 'a';
for ( int i = 0; i < length; i++ )
{
result.append( (char)((ch + (i%10))) );
}
return result.toString();
}
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private TransactionState transactionState;
private final Config config = new Config( stringMap() );
@SuppressWarnings("deprecation")
private final DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory();
private final DefaultWindowPoolFactory windowPoolFactory = new DefaultWindowPoolFactory();
private NeoStore neoStore;
private LockService locks;
private CacheAccessBackDoor cacheAccessBackDoor;
private final List<Lock> lockMocks = new ArrayList<>();
@Before
public void before() throws Exception
{
transactionState = TransactionState.NO_STATE;
@SuppressWarnings("deprecation")
StoreFactory storeFactory = new StoreFactory( config, idGeneratorFactory, windowPoolFactory,
fs.get(), DEV_NULL, new DefaultTxHook() );
neoStore = storeFactory.createNeoStore( new File( "neostore" ) );
locks = mock( LockService.class, new Answer()
{
@Override
public synchronized Object answer( InvocationOnMock invocation ) throws Throwable
{
Lock mock = mock( Lock.class );
lockMocks.add( mock );
return mock;
}
} );
cacheAccessBackDoor = mock( CacheAccessBackDoor.class );
}
@After
public void shouldReleaseAllLocks()
{
for ( Lock lock : lockMocks )
{
verify( lock ).release();
}
}
private static class VerifyingXaLogicalLog extends XaLogicalLog
{
private final Visitor<XaCommand, RuntimeException> verifier;
public VerifyingXaLogicalLog( FileSystemAbstraction fs, Visitor<XaCommand, RuntimeException> verifier )
{
super( new File( "log" ), null, null, null, fs, new Monitors(), new SingleLoggingService( DEV_NULL ),
LogPruneStrategies.NO_PRUNING, null, mock( KernelHealth.class ), 25*1024*1024, ALLOW_ALL );
this.verifier = verifier;
}
@Override
public synchronized void writeCommand( XaCommand command, int identifier ) throws IOException
{
this.verifier.visit( command );
}
}
private static class CommandCapturingVisitor implements Visitor<XaCommand,RuntimeException>
{
private final Collection<XaCommand> commands = new ArrayList<>();
@Override
public boolean visit( XaCommand element ) throws RuntimeException
{
commands.add( element );
return true;
}
public void injectInto( NeoStoreTransaction tx )
{
for ( XaCommand command : commands )
{
tx.injectCommand( command );
}
}
public void visitCapturedCommands( Visitor<XaCommand, RuntimeException> visitor )
{
for ( XaCommand command : commands )
{
visitor.visit( command );
}
}
}
private final IndexingService mockIndexing = mock( IndexingService.class );
private final KernelTransactionImplementation kernelTransaction = mock( KernelTransactionImplementation.class );
private NeoStoreTransaction newWriteTransaction( IndexingService indexing )
{
return newWriteTransaction( indexing, nullVisitor );
}
private NeoStoreTransaction newWriteTransaction( IndexingService indexing, Visitor<XaCommand,
RuntimeException> verifier )
{
VerifyingXaLogicalLog log = new VerifyingXaLogicalLog( fs.get(), verifier );
NeoStoreTransaction result = new NeoStoreTransaction( 0l, log, transactionState, neoStore,
cacheAccessBackDoor, indexing, NO_LABEL_SCAN_STORE, new IntegrityValidator(neoStore, indexing ),
kernelTransaction, locks );
result.setIdentifier( 0 );
result.setCommitTxId( neoStore.getLastCommittedTx()+1 );
return result;
}
private class CapturingIndexingService extends IndexingService
{
private final Set<NodePropertyUpdate> updates = new HashSet<>();
public CapturingIndexingService()
{
super( null,
new DefaultSchemaIndexProviderMap( NO_INDEX_PROVIDER ),
new NeoStoreIndexStoreView( locks, neoStore ),
null,
new KernelSchemaStateStore(),
new SingleLoggingService( DEV_NULL ), IndexingService.NO_MONITOR
);
}
@Override
public void updateIndexes( IndexUpdates updates )
{
this.updates.addAll( asCollection( updates ) );
}
}
private static final long[] none = new long[0];
private static final Visitor<XaCommand, RuntimeException> nullVisitor = new Visitor<XaCommand, RuntimeException>()
{
@Override
public boolean visit( XaCommand element )
{
return true;
}
};
private Visitor<XaCommand, RuntimeException> heavySchemaRuleVerifier()
{
return new Visitor<XaCommand, RuntimeException>()
{
@Override
public boolean visit( XaCommand element )
{
for ( DynamicRecord record : ((SchemaRuleCommand) element).getRecordsAfter() )
{
assertFalse( record + " should have been heavy", record.isLight() );
}
return true;
}
};
}
private void prepareAndCommitRecovered( NeoStoreTransaction tx ) throws Exception
{
tx.setRecovered();
prepareAndCommit( tx );
}
private void prepareAndCommit( NeoStoreTransaction tx ) throws Exception
{
tx.doPrepare();
tx.doCommit();
}
public static final LabelScanStore NO_LABEL_SCAN_STORE = new LabelScanStore()
{
@Override
public LabelScanReader newReader()
{
return LabelScanReader.EMPTY;
}
@Override
public LabelScanWriter newWriter()
{
return LabelScanWriter.EMPTY;
}
@Override
public void stop()
{ // Do nothing
}
@Override
public void start()
{ // Do nothing
}
@Override
public void shutdown()
{ // Do nothing
}
@Override
public void recover( Iterator<NodeLabelUpdate> updates )
{ // Do nothing
}
@Override
public AllEntriesLabelScanReader newAllEntriesReader()
{
return null;
}
@Override
public ResourceIterator<File> snapshotStoreFiles()
{
return emptyIterator();
}
@Override
public void init()
{ // Do nothing
}
@Override
public void force()
{ // Do nothing
}
};
private class IteratorCollector<T> implements Answer<Object>
{
private final int arg;
private final List<T> elements = new ArrayList<>();
public IteratorCollector( int arg )
{
this.arg = arg;
}
@SafeVarargs
public final void assertContent( T... expected )
{
assertEquals( Arrays.asList( expected ), elements );
}
@Override
@SuppressWarnings("unchecked")
public Object answer( InvocationOnMock invocation ) throws Throwable
{
Object iterator = invocation.getArguments()[arg];
if ( iterator instanceof Iterable )
{
iterator = ((Iterable) iterator).iterator();
}
if ( iterator instanceof Iterator )
{
collect( (Iterator) iterator );
}
return null;
}
private void collect( Iterator<T> iterator )
{
while ( iterator.hasNext() )
{
elements.add( iterator.next() );
}
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionTest.java
|
265
|
private static class RecordingRelationshipStore extends RelationshipStore
{
private final AtomicReference<List<String>> currentRecording;
public RecordingRelationshipStore( AtomicReference<List<String>> currentRecording )
{
super( null, null, null, null, null, null );
this.currentRecording = currentRecording;
}
@Override
public void updateRecord(RelationshipRecord record) {
currentRecording.get().add(commandActionToken(record) + " relationship");
}
@Override
protected void checkStorage() {
}
@Override
protected void checkVersion() {
}
@Override
protected void loadStorage() {
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_WriteTransactionCommandOrderingTest.java
|
266
|
{
@Override
public File apply( Config config )
{
return config.get( Configuration.logical_log );
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
267
|
{
@Override
public GraphPropertiesImpl evaluate()
{
return nodeManager.getGraphProperties();
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
268
|
public class PlaceboTm extends AbstractTransactionManager
{
private LockManager lockManager;
private final TxIdGenerator txIdGenerator;
private final Transaction tx = new PlaceboTransaction();
public PlaceboTm( LockManager lockManager, TxIdGenerator txIdGenerator )
{
this.lockManager = lockManager;
this.txIdGenerator = txIdGenerator;
}
public void setLockManager( LockManager lockManager )
{
this.lockManager = lockManager;
}
@Override
public void begin() throws NotSupportedException, SystemException
{
}
@Override
public void commit() throws RollbackException, HeuristicMixedException,
HeuristicRollbackException, SecurityException, IllegalStateException,
SystemException
{
}
@Override
public int getStatus() throws SystemException
{
return Status.STATUS_ACTIVE;
}
@Override
public Transaction getTransaction() throws SystemException
{
return tx;
}
@Override
public void resume( Transaction arg0 ) throws InvalidTransactionException,
IllegalStateException, SystemException
{
}
@Override
public void rollback() throws IllegalStateException, SecurityException,
SystemException
{
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException
{
}
@Override
public void setTransactionTimeout( int arg0 ) throws SystemException
{
}
@Override
public Transaction suspend() throws SystemException
{
return null;
}
@Override
public void init()
{
}
@Override
public void start()
throws Throwable
{
}
@Override
public void stop()
{
}
@Override
public void shutdown()
throws Throwable
{
}
@Override
public int getEventIdentifier()
{
return 0;
}
@Override
public void doRecovery() throws Throwable
{
}
@Override
public TransactionState getTransactionState()
{
return new NoTransactionState()
{
@Override
public LockElement acquireReadLock( Object resource )
{
try
{
Transaction tx = getTransaction();
lockManager.getReadLock( resource, tx );
return new LockElement( resource, tx, LockType.READ, lockManager );
}
catch ( Exception e )
{
throw launderedException( e );
}
}
@Override
public LockElement acquireWriteLock( Object resource )
{
try
{
Transaction tx = getTransaction();
lockManager.getWriteLock( resource, tx );
return new LockElement( resource, tx, LockType.WRITE, lockManager );
}
catch ( SystemException e )
{
throw launderedException( e );
}
}
@Override
public TxIdGenerator getTxIdGenerator()
{
return txIdGenerator;
}
};
}
private static class PlaceboTransaction implements Transaction
{
@Override
public void commit() throws HeuristicMixedException, HeuristicRollbackException, RollbackException,
SecurityException, SystemException
{
}
@Override
public boolean delistResource( XAResource xaRes, int flag ) throws IllegalStateException, SystemException
{
return true;
}
@Override
public boolean enlistResource( XAResource xaRes ) throws IllegalStateException, RollbackException,
SystemException
{
return true;
}
@Override
public int getStatus() throws SystemException
{
return Status.STATUS_ACTIVE;
}
@Override
public void registerSynchronization( Synchronization synch ) throws IllegalStateException, RollbackException,
SystemException
{
}
@Override
public void rollback() throws IllegalStateException, SystemException
{
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException
{
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_PlaceboTm.java
|
269
|
public class CommandMatchers
{
public static Matcher<? extends LogEntry> nodeCommandEntry( final int identifier, final int nodeId)
{
return new TypeSafeMatcher<LogEntry.Command>() {
@Override
public boolean matchesSafely( LogEntry.Command entry )
{
if( entry != null
&& entry.getIdentifier() == identifier
&& entry.getXaCommand() != null
&& entry.getXaCommand() instanceof Command.NodeCommand)
{
Command.NodeCommand cmd = (Command.NodeCommand) entry.getXaCommand();
return cmd.getKey() == nodeId;
}
return false;
}
@Override
public void describeTo( Description description )
{
description.appendText( String.format( "Command[%d, Node[%d,used=<Any boolean>,rel=<Any relchain>,prop=<Any relchain>]]",
identifier, nodeId ) );
}
};
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_CommandMatchers.java
|
270
|
public class NeoStoreFileListing
{
private final XaContainer xaContainer;
private final File storeDir;
private final LabelScanStore labelScanStore;
private final IndexingService indexingService;
private Pattern logFilePattern;
public NeoStoreFileListing(XaContainer xaContainer, File storeDir, LabelScanStore labelScanStore, IndexingService indexingService)
{
this.xaContainer = xaContainer;
this.storeDir = storeDir;
this.labelScanStore = labelScanStore;
this.indexingService = indexingService;
// storing this so we only do Pattern.compile once
this.logFilePattern = xaContainer.getLogicalLog().getHistoryFileNamePattern();
}
public ResourceIterator<File> listStoreFiles( boolean includeLogicalLogs ) throws IOException
{
Collection<File> files = new ArrayList<>();
gatherNeoStoreFiles( includeLogicalLogs, files );
Resource labelScanStoreSnapshot = gatherLabelScanStoreFiles( files );
Resource schemaIndexSnapshots = gatherSchemaIndexFiles( files );
return new StoreSnapshot( files.iterator(), labelScanStoreSnapshot, schemaIndexSnapshots );
}
public ResourceIterator<File> listStoreFiles() throws IOException
{
Collection<File> files = new ArrayList<>();
gatherNeoStoreFiles( false, files );
Resource labelScanStoreSnapshot = gatherLabelScanStoreFiles( files );
Resource schemaIndexSnapshots = gatherSchemaIndexFiles( files );
return new StoreSnapshot( files.iterator(), labelScanStoreSnapshot, schemaIndexSnapshots );
}
public ResourceIterator<File> listLogicalLogs()
{
Collection<File> files = new ArrayList<>();
for ( File dbFile : nonNull( storeDir.listFiles() ) )
{
if ( dbFile.isFile() )
{
if ( isLogicalLog( dbFile ) )
{
files.add( dbFile );
}
}
}
return new StoreSnapshot( files.iterator() );
}
private boolean isLogicalLog( File dbFile )
{
return logFilePattern.matcher( dbFile.getName() ).matches();
}
private Resource gatherSchemaIndexFiles(Collection<File> targetFiles) throws IOException
{
ResourceIterator<File> snapshot = indexingService.snapshotStoreFiles();
IteratorUtil.addToCollection(snapshot, targetFiles);
// Intentionally don't close the snapshot here, return it for closing by the consumer of
// the targetFiles list.
return snapshot;
}
private Resource gatherLabelScanStoreFiles( Collection<File> targetFiles ) throws IOException
{
ResourceIterator<File> snapshot = labelScanStore.snapshotStoreFiles();
IteratorUtil.addToCollection(snapshot, targetFiles);
// Intentionally don't close the snapshot here, return it for closing by the consumer of
// the targetFiles list.
return snapshot;
}
private void gatherNeoStoreFiles( boolean includeLogicalLogs, final Collection<File> targetFiles )
{
File neostoreFile = null;
for ( File dbFile : nonNull( storeDir.listFiles() ) )
{
String name = dbFile.getName();
// To filter for "neostore" is quite future proof, but the "index.db" file
// maybe should be
if ( dbFile.isFile() )
{
if ( name.equals( NeoStore.DEFAULT_NAME ) )
{
neostoreFile = dbFile;
}
else if ( neoStoreFile( name ) )
{
targetFiles.add( dbFile );
}
else if ( includeLogicalLogs && isLogicalLog( dbFile ) )
{
targetFiles.add( dbFile );
}
}
}
targetFiles.add( neostoreFile );
}
private boolean neoStoreFile( String name )
{
return (name.startsWith( NeoStore.DEFAULT_NAME ) || name.equals( IndexStore.INDEX_DB_FILE_NAME ))
&& !name.endsWith( ".id" );
}
private static class StoreSnapshot extends PrefetchingIterator<File> implements ResourceIterator<File>
{
private final Iterator<File> files;
private final Resource[] thingsToCloseWhenDone;
StoreSnapshot( Iterator<File> files, Resource... thingsToCloseWhenDone )
{
this.files = files;
this.thingsToCloseWhenDone = thingsToCloseWhenDone;
}
@Override
protected File fetchNextOrNull()
{
return files.hasNext() ? files.next() : null;
}
@Override
public void close()
{
for ( Resource resource : thingsToCloseWhenDone )
{
resource.close();
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreFileListing.java
|
271
|
public class InMemoryLogBuffer implements LogBuffer, ReadableByteChannel
{
private byte[] bytes = new byte[1000];
private int writeIndex;
private int readIndex;
private ByteBuffer bufferForConversions = ByteBuffer.wrap( new byte[100] );
public InMemoryLogBuffer()
{
}
public void reset()
{
writeIndex = readIndex = 0;
}
public void truncateTo( int bytes )
{
writeIndex = bytes;
}
public int bytesWritten()
{
return writeIndex;
}
private void ensureArrayCapacityPlus( int plus )
{
while ( writeIndex+plus > bytes.length )
{
byte[] tmp = bytes;
bytes = new byte[bytes.length*2];
System.arraycopy( tmp, 0, bytes, 0, tmp.length );
}
}
private LogBuffer flipAndPut()
{
ensureArrayCapacityPlus( bufferForConversions.limit() );
System.arraycopy( bufferForConversions.flip().array(), 0, bytes, writeIndex,
bufferForConversions.limit() );
writeIndex += bufferForConversions.limit();
return this;
}
public LogBuffer put( byte b ) throws IOException
{
ensureArrayCapacityPlus( 1 );
bytes[writeIndex++] = b;
return this;
}
public LogBuffer putShort( short s ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putShort( s );
return flipAndPut();
}
public LogBuffer putInt( int i ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putInt( i );
return flipAndPut();
}
public LogBuffer putLong( long l ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putLong( l );
return flipAndPut();
}
public LogBuffer putFloat( float f ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putFloat( f );
return flipAndPut();
}
public LogBuffer putDouble( double d ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putDouble( d );
return flipAndPut();
}
public LogBuffer put( byte[] bytes ) throws IOException
{
ensureArrayCapacityPlus( bytes.length );
System.arraycopy( bytes, 0, this.bytes, writeIndex, bytes.length );
writeIndex += bytes.length;
return this;
}
public LogBuffer put( char[] chars ) throws IOException
{
ensureConversionBufferCapacity( chars.length*2 );
bufferForConversions.clear();
for ( char ch : chars )
{
bufferForConversions.putChar( ch );
}
return flipAndPut();
}
private void ensureConversionBufferCapacity( int length )
{
if ( bufferForConversions.capacity() < length )
{
bufferForConversions = ByteBuffer.wrap( new byte[length*2] );
}
}
@Override
public void writeOut() throws IOException
{
}
public void force() throws IOException
{
}
public long getFileChannelPosition() throws IOException
{
return this.readIndex;
}
public StoreChannel getFileChannel()
{
throw new UnsupportedOperationException();
}
public boolean isOpen()
{
return true;
}
public void close() throws IOException
{
}
public int read( ByteBuffer dst ) throws IOException
{
if ( readIndex >= writeIndex )
{
return -1;
}
int actualLengthToRead = Math.min( dst.limit(), writeIndex-readIndex );
try
{
dst.put( bytes, readIndex, actualLengthToRead );
return actualLengthToRead;
}
finally
{
readIndex += actualLengthToRead;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_LogTruncationTest.java
|
272
|
public class LogTruncationTest
{
InMemoryLogBuffer inMemoryBuffer = new InMemoryLogBuffer();
@Test
public void testSerializationInFaceOfLogTruncation() throws Exception
{
// TODO: add support for other commands and permutations as well...
assertHandlesLogTruncation( new Command.NodeCommand( null,
new NodeRecord( 12l, 13l, 13l ),
new NodeRecord( 0,0,0 ) ) );
assertHandlesLogTruncation( new Command.LabelTokenCommand( null, new LabelTokenRecord( 1 )) );
assertHandlesLogTruncation( new Command.NeoStoreCommand( null, new NeoStoreRecord() ) );
// assertHandlesLogTruncation( new Command.PropertyCommand( null,
// new PropertyRecord( 1, true, new NodeRecord(1, 12, 12, true) ),
// new PropertyRecord( 1, true, new NodeRecord(1, 12, 12, true) ) ) );
}
private void assertHandlesLogTruncation( XaCommand cmd ) throws IOException
{
inMemoryBuffer.reset();
cmd.writeToFile( inMemoryBuffer );
int bytesSuccessfullyWritten = inMemoryBuffer.bytesWritten();
assertEquals( cmd, Command.readCommand( null, null, inMemoryBuffer, ByteBuffer.allocate( 100 ) ));
bytesSuccessfullyWritten--;
while(bytesSuccessfullyWritten --> 0)
{
inMemoryBuffer.reset();
cmd.writeToFile( inMemoryBuffer );
inMemoryBuffer.truncateTo( bytesSuccessfullyWritten );
Command deserialized = Command.readCommand( null, null, inMemoryBuffer, ByteBuffer.allocate( 100 ) );
assertNull( "Deserialization did not detect log truncation! Record: " + cmd +
", deserialized: " + deserialized, deserialized );
}
}
public class InMemoryLogBuffer implements LogBuffer, ReadableByteChannel
{
private byte[] bytes = new byte[1000];
private int writeIndex;
private int readIndex;
private ByteBuffer bufferForConversions = ByteBuffer.wrap( new byte[100] );
public InMemoryLogBuffer()
{
}
public void reset()
{
writeIndex = readIndex = 0;
}
public void truncateTo( int bytes )
{
writeIndex = bytes;
}
public int bytesWritten()
{
return writeIndex;
}
private void ensureArrayCapacityPlus( int plus )
{
while ( writeIndex+plus > bytes.length )
{
byte[] tmp = bytes;
bytes = new byte[bytes.length*2];
System.arraycopy( tmp, 0, bytes, 0, tmp.length );
}
}
private LogBuffer flipAndPut()
{
ensureArrayCapacityPlus( bufferForConversions.limit() );
System.arraycopy( bufferForConversions.flip().array(), 0, bytes, writeIndex,
bufferForConversions.limit() );
writeIndex += bufferForConversions.limit();
return this;
}
public LogBuffer put( byte b ) throws IOException
{
ensureArrayCapacityPlus( 1 );
bytes[writeIndex++] = b;
return this;
}
public LogBuffer putShort( short s ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putShort( s );
return flipAndPut();
}
public LogBuffer putInt( int i ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putInt( i );
return flipAndPut();
}
public LogBuffer putLong( long l ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putLong( l );
return flipAndPut();
}
public LogBuffer putFloat( float f ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putFloat( f );
return flipAndPut();
}
public LogBuffer putDouble( double d ) throws IOException
{
((ByteBuffer) bufferForConversions.clear()).putDouble( d );
return flipAndPut();
}
public LogBuffer put( byte[] bytes ) throws IOException
{
ensureArrayCapacityPlus( bytes.length );
System.arraycopy( bytes, 0, this.bytes, writeIndex, bytes.length );
writeIndex += bytes.length;
return this;
}
public LogBuffer put( char[] chars ) throws IOException
{
ensureConversionBufferCapacity( chars.length*2 );
bufferForConversions.clear();
for ( char ch : chars )
{
bufferForConversions.putChar( ch );
}
return flipAndPut();
}
private void ensureConversionBufferCapacity( int length )
{
if ( bufferForConversions.capacity() < length )
{
bufferForConversions = ByteBuffer.wrap( new byte[length*2] );
}
}
@Override
public void writeOut() throws IOException
{
}
public void force() throws IOException
{
}
public long getFileChannelPosition() throws IOException
{
return this.readIndex;
}
public StoreChannel getFileChannel()
{
throw new UnsupportedOperationException();
}
public boolean isOpen()
{
return true;
}
public void close() throws IOException
{
}
public int read( ByteBuffer dst ) throws IOException
{
if ( readIndex >= writeIndex )
{
return -1;
}
int actualLengthToRead = Math.min( dst.limit(), writeIndex-readIndex );
try
{
dst.put( bytes, readIndex, actualLengthToRead );
return actualLengthToRead;
}
finally
{
readIndex += actualLengthToRead;
}
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_LogTruncationTest.java
|
273
|
class LazyIndexUpdates implements IndexUpdates
{
private final NodeStore nodeStore;
private final PropertyStore propertyStore;
private final Collection<List<PropertyCommand>> propCommands;
private final Map<Long, NodeCommand> nodeCommands;
private Collection<NodePropertyUpdate> updates;
public LazyIndexUpdates( NodeStore nodeStore, PropertyStore propertyStore,
Collection<List<PropertyCommand>> propCommands, Map<Long, NodeCommand> nodeCommands )
{
this.nodeStore = nodeStore;
this.propertyStore = propertyStore;
this.propCommands = propCommands;
this.nodeCommands = nodeCommands;
}
@Override
public Iterator<NodePropertyUpdate> iterator()
{
if ( updates == null )
{
updates = gatherPropertyAndLabelUpdates();
}
return updates.iterator();
}
@Override
public Set<Long> changedNodeIds()
{
Set<Long> nodeIds = new HashSet<>( nodeCommands.keySet() );
for ( List<PropertyCommand> propCmd : propCommands )
{
PropertyRecord record = propCmd.get( 0 ).getAfter();
if ( record.isNodeSet() )
{
nodeIds.add( record.getNodeId() );
}
}
return nodeIds;
}
private Collection<NodePropertyUpdate> gatherPropertyAndLabelUpdates()
{
Collection<NodePropertyUpdate> propertyUpdates = new HashSet<>();
Map<Pair<Long, Integer>, NodePropertyUpdate> propertyChanges = new HashMap<>();
gatherUpdatesFromPropertyCommands( propertyUpdates, propertyChanges );
gatherUpdatesFromNodeCommands( propertyUpdates, propertyChanges );
return propertyUpdates;
}
private void gatherUpdatesFromPropertyCommands( Collection<NodePropertyUpdate> updates,
Map<Pair<Long, Integer>, NodePropertyUpdate> propertyLookup )
{
for ( List<PropertyCommand> propertyCommands : propCommands )
{
// Let after state of first command here be representative of the whole group
PropertyRecord representative = propertyCommands.get( 0 ).getAfter();
if ( !representative.isNodeSet() )
{ // These changes wasn't for a node, skip them
continue;
}
long nodeId = representative.getNodeId();
long[] nodeLabelsBefore, nodeLabelsAfter;
NodeCommand nodeChanges = nodeCommands.get( nodeId );
if ( nodeChanges != null )
{
nodeLabelsBefore = parseLabelsField( nodeChanges.getBefore() ).get( nodeStore );
nodeLabelsAfter = parseLabelsField( nodeChanges.getAfter() ).get( nodeStore );
}
else
{
/* If the node doesn't exist here then we've most likely encountered this scenario:
* - TX1: Node N exists and has property record P
* - rotate log
* - TX2: P gets changed
* - TX3: N gets deleted (also P, but that's irrelevant for this scenario)
* - N is persisted to disk for some reason
* - crash
* - recover
* - TX2: P has changed and updates to indexes are gathered. As part of that it tries to read
* the labels of N (which does not exist a.t.m.).
*
* We can actually (if we disregard any potential inconsistencies) just assume that
* if this happens and we're in recovery mode that the node in question will be deleted
* in an upcoming transaction, so just skip this update.
*/
NodeRecord nodeRecord = nodeStore.getRecord( nodeId );
nodeLabelsBefore = nodeLabelsAfter = parseLabelsField( nodeRecord ).get( nodeStore );
}
propertyStore.toLogicalUpdates( updates,
Iterables.<PropertyRecordChange,PropertyCommand>cast( propertyCommands ),
nodeLabelsBefore, nodeLabelsAfter );
}
for ( NodePropertyUpdate update : updates )
{
if ( update.getUpdateMode() == UpdateMode.CHANGED )
{
propertyLookup.put( Pair.of( update.getNodeId(), update.getPropertyKeyId() ), update );
}
}
}
private void gatherUpdatesFromNodeCommands( Collection<NodePropertyUpdate> propertyUpdates,
Map<Pair<Long, Integer>, NodePropertyUpdate> propertyLookup )
{
for ( NodeCommand nodeCommand : nodeCommands.values() )
{
long nodeId = nodeCommand.getKey();
long[] labelsBefore = parseLabelsField( nodeCommand.getBefore() ).get( nodeStore );
long[] labelsAfter = parseLabelsField( nodeCommand.getAfter() ).get( nodeStore );
if ( nodeCommand.getMode() != Mode.UPDATE )
{
// For created and deleted nodes rely on the updates from the perspective of properties to cover it all
// otherwise we'll get duplicate update during recovery, or cannot load properties if deleted.
continue;
}
LabelChangeSummary summary = new LabelChangeSummary( labelsBefore, labelsAfter );
Iterator<DefinedProperty> properties = nodeFullyLoadProperties( nodeId );
while ( properties.hasNext() )
{
DefinedProperty property = properties.next();
int propertyKeyId = property.propertyKeyId();
if ( summary.hasAddedLabels() )
{
Object value = property.value();
propertyUpdates.add( add( nodeId, propertyKeyId, value, summary.getAddedLabels() ) );
}
if ( summary.hasRemovedLabels() )
{
NodePropertyUpdate propertyChange = propertyLookup.get( Pair.of( nodeId, propertyKeyId ) );
Object value = propertyChange == null ? property.value() : propertyChange.getValueBefore();
propertyUpdates.add( remove( nodeId, propertyKeyId, value, summary.getRemovedLabels() ) );
}
}
}
}
private Iterator<DefinedProperty> nodeFullyLoadProperties( long nodeId )
{
IteratingPropertyReceiver receiver = new IteratingPropertyReceiver();
NeoStoreTransaction.loadProperties( propertyStore, nodeCommands.get( nodeId ).getAfter().getNextProp(), receiver );
return receiver;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_LazyIndexUpdates.java
|
274
|
public class InterceptingWriteTransaction extends NeoStoreTransaction
{
private final TransactionInterceptor interceptor;
InterceptingWriteTransaction( long lastCommittedTxWhenTransactionStarted, XaLogicalLog log,
NeoStore neoStore, TransactionState state, CacheAccessBackDoor cacheAccess,
IndexingService indexingService, LabelScanStore labelScanStore,
TransactionInterceptor interceptor, IntegrityValidator validator,
KernelTransactionImplementation kernelTransaction, LockService locks )
{
super( lastCommittedTxWhenTransactionStarted, log, state, neoStore, cacheAccess, indexingService,
labelScanStore, validator, kernelTransaction, locks );
this.interceptor = interceptor;
}
@Override
protected void intercept( List<Command> commands )
{
super.intercept( commands );
for ( Command command : commands )
{
command.accept( interceptor );
}
interceptor.complete();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_InterceptingWriteTransaction.java
|
275
|
public class IntegrityValidatorTest
{
@Test
public void shouldValidateUniquenessIndexes() throws Exception
{
// Given
NeoStore store = mock( NeoStore.class );
IndexingService indexes = mock(IndexingService.class);
IntegrityValidator validator = new IntegrityValidator(store, indexes);
doThrow( new ConstraintVerificationFailedKernelException( null, new RuntimeException() ))
.when( indexes ).validateIndex( 2l );
UniquenessConstraintRule record = uniquenessConstraintRule( 1l, 1, 1, 2l );
// When
try
{
validator.validateSchemaRule( record );
fail("Should have thrown integrity error.");
}
catch(XAException e)
{
assertThat(e.errorCode, equalTo(XAException.XA_RBINTEGRITY));
}
}
@Test
public void deletingNodeWithRelationshipsIsNotAllowed() throws Exception
{
// Given
NeoStore store = mock( NeoStore.class );
IndexingService indexes = mock(IndexingService.class);
IntegrityValidator validator = new IntegrityValidator(store, indexes );
NodeRecord record = new NodeRecord( 1l, 1l, -1l );
record.setInUse( false );
// When
try
{
validator.validateNodeRecord( record );
fail("Should have thrown integrity error.");
}
catch(XAException e)
{
assertThat(e.errorCode, equalTo(XAException.XA_RBINTEGRITY));
}
}
@Test
public void transactionsStartedBeforeAConstraintWasCreatedAreDisallowed() throws Exception
{
// Given
NeoStore store = mock( NeoStore.class );
IndexingService indexes = mock(IndexingService.class);
when(store.getLatestConstraintIntroducingTx()).thenReturn( 10l );
IntegrityValidator validator = new IntegrityValidator( store, indexes );
// When
try
{
validator.validateTransactionStartKnowledge( 1 );
fail("Should have thrown integrity error.");
}
catch(XAException e)
{
assertThat(e.errorCode, equalTo(XAException.XA_RBINTEGRITY));
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_IntegrityValidatorTest.java
|
276
|
public class IntegrityValidator
{
private final NeoStore neoStore;
private final IndexingService indexes;
public IntegrityValidator( NeoStore neoStore, IndexingService indexes )
{
this.neoStore = neoStore;
this.indexes = indexes;
}
public void validateNodeRecord( NodeRecord record ) throws XAException
{
if ( !record.inUse() && record.getNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() )
{
throw Exceptions.withCause( new XAException( XAException.XA_RBINTEGRITY ),
new ConstraintViolationException(
"Node record " + record + " still has relationships" ) );
}
}
public void validateTransactionStartKnowledge( long lastCommittedTxWhenTransactionStarted )
throws XAException
{
if( lastCommittedTxWhenTransactionStarted < neoStore.getLatestConstraintIntroducingTx() )
{
// Constraints have changed since the transaction begun
// This should be a relatively uncommon case, window for this happening is a few milliseconds when an admin
// explicitly creates a constraint, after the index has been populated. We can improve this later on by
// replicating the constraint validation logic down here, or rethinking where we validate constraints.
// For now, we just kill these transactions.
throw Exceptions.withCause( new XAException( XAException.XA_RBINTEGRITY ),
new ConstraintViolationException(
"Database constraints have changed after this transaction started, which is not yet " +
"supported. Please retry your transaction to ensure all constraints are executed." ) );
}
}
public void validateSchemaRule( SchemaRule schemaRule ) throws XAException
{
if(schemaRule instanceof UniquenessConstraintRule )
{
try
{
indexes.validateIndex( ((UniquenessConstraintRule)schemaRule).getOwnedIndex() );
}
catch ( ConstraintVerificationFailedKernelException e )
{
throw Exceptions.withCause( new XAException( XAException.XA_RBINTEGRITY ), e);
}
catch ( IndexNotFoundKernelException | IndexPopulationFailedKernelException e )
{
// We don't expect this to occur, and if they do, it is because we are in a very bad state - out of
// disk or index corruption, or similar. This will kill the database such that it can be shut down
// and have recovery performed. It's the safest bet to avoid loosing data.
throw Exceptions.withCause( new XAException( XAException.XAER_RMERR ), e);
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_IntegrityValidator.java
|
277
|
public class DefaultSchemaIndexProviderMap implements SchemaIndexProviderMap
{
private final SchemaIndexProvider indexProvider;
public DefaultSchemaIndexProviderMap( SchemaIndexProvider indexProvider )
{
this.indexProvider = indexProvider;
}
@Override
public SchemaIndexProvider getDefaultProvider()
{
return indexProvider;
}
@Override
public SchemaIndexProvider apply( SchemaIndexProvider.Descriptor descriptor )
{
if ( indexProvider.getProviderDescriptor().getKey().equals( descriptor.getKey() ) )
return indexProvider;
throw new IllegalArgumentException( "Tried to get index provider for an existing index with provider " +
descriptor + " whereas the default and only supported provider in this session is " +
indexProvider.getProviderDescriptor() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_DefaultSchemaIndexProviderMap.java
|
278
|
return new TypeSafeMatcher<LogEntry.Command>() {
@Override
public boolean matchesSafely( LogEntry.Command entry )
{
if( entry != null
&& entry.getIdentifier() == identifier
&& entry.getXaCommand() != null
&& entry.getXaCommand() instanceof Command.NodeCommand)
{
Command.NodeCommand cmd = (Command.NodeCommand) entry.getXaCommand();
return cmd.getKey() == nodeId;
}
return false;
}
@Override
public void describeTo( Description description )
{
description.appendText( String.format( "Command[%d, Node[%d,used=<Any boolean>,rel=<Any relchain>,prop=<Any relchain>]]",
identifier, nodeId ) );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_CommandMatchers.java
|
279
|
static class SchemaRuleCommand extends Command
{
private final NeoStore neoStore;
private final IndexingService indexes;
private final SchemaStore store;
private final Collection<DynamicRecord> recordsBefore;
private final Collection<DynamicRecord> recordsAfter;
private final SchemaRule schemaRule;
private long txId;
SchemaRuleCommand( NeoStore neoStore, SchemaStore store, IndexingService indexes,
Collection<DynamicRecord> recordsBefore, Collection<DynamicRecord> recordsAfter,
SchemaRule schemaRule, long txId )
{
super( first( recordsAfter ).getId(), Mode.fromRecordState( first( recordsAfter ) ) );
this.neoStore = neoStore;
this.indexes = indexes;
this.store = store;
this.recordsBefore = recordsBefore;
this.recordsAfter = recordsAfter;
this.schemaRule = schemaRule;
this.txId = txId;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitSchemaRule( recordsAfter );
}
@Override
public String toString()
{
if ( schemaRule != null )
{
return getMode() + ":" + schemaRule.toString();
}
return "SchemaRule" + recordsAfter;
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
cacheAccess.removeSchemaRuleFromCache( getKey() );
}
Collection<DynamicRecord> getRecordsAfter()
{
return unmodifiableCollection( recordsAfter );
}
@Override
public void execute()
{
for ( DynamicRecord record : recordsAfter )
{
store.updateRecord( record );
}
if ( schemaRule instanceof IndexRule )
{
switch ( getMode() )
{
case UPDATE:
// Shouldn't we be more clear about that we are waiting for an index to come online here?
// right now we just assume that an update to index records means wait for it to be online.
if ( ((IndexRule) schemaRule).isConstraintIndex() )
{
try
{
indexes.activateIndex( schemaRule.getId() );
}
catch ( IndexNotFoundKernelException | IndexActivationFailedKernelException |
IndexPopulationFailedKernelException e )
{
throw new IllegalStateException( "Unable to enable constraint, backing index is not online.", e );
}
}
break;
case CREATE:
indexes.createIndex( (IndexRule) schemaRule );
break;
case DELETE:
indexes.dropIndex( (IndexRule)schemaRule );
break;
default:
throw new IllegalStateException( getMode().name() );
}
}
if( schemaRule instanceof UniquenessConstraintRule )
{
switch ( getMode() )
{
case UPDATE:
case CREATE:
neoStore.setLatestConstraintIntroducingTx( txId );
break;
case DELETE:
break;
default:
throw new IllegalStateException( getMode().name() );
}
}
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( SCHEMA_RULE_COMMAND );
writeDynamicRecords( buffer, recordsBefore );
writeDynamicRecords( buffer, recordsAfter );
buffer.put( first( recordsAfter ).isCreated() ? (byte) 1 : 0);
buffer.putLong( txId );
}
public SchemaRule getSchemaRule()
{
return schemaRule;
}
public long getTxId()
{
return txId;
}
public void setTxId( long txId )
{
this.txId = txId;
}
static Command readFromFile( NeoStore neoStore, IndexingService indexes, ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
Collection<DynamicRecord> recordsBefore = new ArrayList<>();
readDynamicRecords( byteChannel, buffer, recordsBefore, COLLECTION_DYNAMIC_RECORD_ADDER );
Collection<DynamicRecord> recordsAfter = new ArrayList<>();
readDynamicRecords( byteChannel, buffer, recordsAfter, COLLECTION_DYNAMIC_RECORD_ADDER );
if ( !readAndFlip( byteChannel, buffer, 1 ) )
{
throw new IllegalStateException( "Missing SchemaRule.isCreated flag in deserialization" );
}
byte isCreated = buffer.get();
if ( 1 == isCreated )
{
for ( DynamicRecord record : recordsAfter )
{
record.setCreated();
}
}
if ( !readAndFlip( byteChannel, buffer, 8 ) )
{
throw new IllegalStateException( "Missing SchemaRule.txId in deserialization" );
}
long txId = buffer.getLong();
SchemaRule rule = first( recordsAfter ).inUse() ?
readSchemaRule( recordsAfter ) :
readSchemaRule( recordsBefore );
return new SchemaRuleCommand( neoStore, neoStore != null ? neoStore.getSchemaStore() : null,
indexes, recordsBefore, recordsAfter, rule, txId );
}
private static SchemaRule readSchemaRule( Collection<DynamicRecord> recordsBefore )
{
assert first(recordsBefore).inUse() : "Asked to deserialize schema records that were not in use.";
SchemaRule rule;
ByteBuffer deserialized = AbstractDynamicStore.concatData( recordsBefore, new byte[100] );
try
{
rule = SchemaRule.Kind.deserialize( first( recordsBefore ).getId(), deserialized );
}
catch ( MalformedSchemaRuleException e )
{
// TODO This is bad. We should probably just shut down if that happens
throw launderedException( e );
}
return rule;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
280
|
public class NeoStoreFileListingTest
{
private XaContainer xaContainer;
private LabelScanStore labelScanStore;
private IndexingService indexingService;
private File storeDir;
private final static String[] STANDARD_STORE_DIR_FILES = new String[]{
"active_tx_log",
"lock",
"messages.log",
"neostore",
"neostore.id",
"neostore.labeltokenstore.db",
"neostore.labeltokenstore.db.id",
"neostore.labeltokenstore.db.names",
"neostore.labeltokenstore.db.names.id",
"neostore.nodestore.db",
"neostore.nodestore.db.id",
"neostore.nodestore.db.labels",
"neostore.nodestore.db.labels.id",
"neostore.propertystore.db",
"neostore.propertystore.db.arrays",
"neostore.propertystore.db.arrays.id",
"neostore.propertystore.db.id",
"neostore.propertystore.db.index",
"neostore.propertystore.db.index.id",
"neostore.propertystore.db.index.keys",
"neostore.propertystore.db.index.keys.id",
"neostore.propertystore.db.strings",
"neostore.propertystore.db.strings.id",
"neostore.relationshipstore.db",
"neostore.relationshipstore.db.id",
"neostore.relationshiptypestore.db",
"neostore.relationshiptypestore.db.id",
"neostore.relationshiptypestore.db.names",
"neostore.relationshiptypestore.db.names.id",
"neostore.schemastore.db",
"neostore.schemastore.db.id",
"nioneo_logical.log.1",
"nioneo_logical.log.active",
"nioneo_logical.log.v0",
"nioneo_logical.log.v1",
"nioneo_logical.log.v2",
"store_lock",
"tm_tx_log.1"};
private final static String[] STANDARD_STORE_DIR_DIRECTORIES = new String[]{ "schema", "index", "branched"};
@Before
public void setUp() throws IOException
{
xaContainer = mock( XaContainer.class );
labelScanStore = mock( LabelScanStore.class );
indexingService = mock( IndexingService.class );
storeDir = mock( File.class );
XaLogicalLog xaLogicalLog = mock( XaLogicalLog.class );
when( xaLogicalLog.getHistoryFileNamePattern()).thenReturn( getHistoryFileNamePattern( "nioneo_logical.log" ) );
when( xaContainer.getLogicalLog() ).thenReturn( xaLogicalLog );
// Defaults, overridden in individual tests
filesInStoreDirAre( new String[]{}, new String[]{} );
scanStoreFilesAre( new String[]{} );
indexFilesAre( new String[]{} );
}
@Test
public void shouldOnlyListLogicalLogs() throws Exception
{
// Given
filesInStoreDirAre( STANDARD_STORE_DIR_FILES, STANDARD_STORE_DIR_DIRECTORIES );
NeoStoreFileListing fileListing = newFileListing();
// When
ResourceIterator<File> result = fileListing.listLogicalLogs();
// Then
assertThat( asSetOfPaths( result ), equalTo( asSet(
"nioneo_logical.log.v0",
"nioneo_logical.log.v1",
"nioneo_logical.log.v2") ) );
}
@Test
public void shouldOnlyListNeoStoreFiles() throws Exception
{
// Given
filesInStoreDirAre( STANDARD_STORE_DIR_FILES, STANDARD_STORE_DIR_DIRECTORIES );
NeoStoreFileListing fileListing = newFileListing();
// When
ResourceIterator<File> result = fileListing.listStoreFiles( );
// Then
assertThat( asSetOfPaths( result ), equalTo( asSet(
"neostore.labeltokenstore.db",
"neostore.labeltokenstore.db.names",
"neostore.nodestore.db",
"neostore.nodestore.db.labels",
"neostore.propertystore.db",
"neostore.propertystore.db.arrays",
"neostore.propertystore.db.index",
"neostore.propertystore.db.index.keys",
"neostore.propertystore.db.strings",
"neostore.relationshipstore.db",
"neostore.relationshiptypestore.db",
"neostore.relationshiptypestore.db.names",
"neostore.schemastore.db",
"neostore" ) ) );
}
@Test
public void shouldListNeoStoreFiles() throws Exception
{
// Given
filesInStoreDirAre( STANDARD_STORE_DIR_FILES, STANDARD_STORE_DIR_DIRECTORIES );
NeoStoreFileListing fileListing = newFileListing();
// When
ResourceIterator<File> result = fileListing.listStoreFiles( false );
// Then
assertThat( asSetOfPaths( result ), equalTo( asSet(
"neostore.labeltokenstore.db",
"neostore.labeltokenstore.db.names",
"neostore.nodestore.db",
"neostore.nodestore.db.labels",
"neostore.propertystore.db",
"neostore.propertystore.db.arrays",
"neostore.propertystore.db.index",
"neostore.propertystore.db.index.keys",
"neostore.propertystore.db.strings",
"neostore.relationshipstore.db",
"neostore.relationshiptypestore.db",
"neostore.relationshiptypestore.db.names",
"neostore.schemastore.db",
"neostore" ) ) );
}
@Test
public void shouldListNeoStoreFilesAndLogicalLogs() throws Exception
{
// Given
filesInStoreDirAre( STANDARD_STORE_DIR_FILES, STANDARD_STORE_DIR_DIRECTORIES );
NeoStoreFileListing fileListing = newFileListing();
// When
ResourceIterator<File> result = fileListing.listStoreFiles( true );
// Then
assertThat( asSetOfPaths( result ), equalTo(asSet(
"neostore.labeltokenstore.db",
"neostore.labeltokenstore.db.names",
"neostore.nodestore.db",
"neostore.nodestore.db.labels",
"neostore.propertystore.db",
"neostore.propertystore.db.arrays",
"neostore.propertystore.db.index",
"neostore.propertystore.db.index.keys",
"neostore.propertystore.db.strings",
"neostore.relationshipstore.db",
"neostore.relationshiptypestore.db",
"neostore.relationshiptypestore.db.names",
"neostore.schemastore.db",
"neostore",
"nioneo_logical.log.v0",
"nioneo_logical.log.v1",
"nioneo_logical.log.v2" )));
}
@Test
public void shouldListLabelScanStoreAndSchemaIndexes() throws Exception
{
// Given
filesInStoreDirAre( STANDARD_STORE_DIR_FILES, STANDARD_STORE_DIR_DIRECTORIES );
scanStoreFilesAre( new String[]{"blah/scan.store", "scan.more"} );
indexFilesAre( new String[]{"schema/index/my.index", "schema/index/their.index"} );
NeoStoreFileListing fileListing = newFileListing();
// When
ResourceIterator<File> result = fileListing.listStoreFiles( false );
// Then
assertThat( asSetOfPaths( result ), equalTo(asSet(
"blah/scan.store",
"scan.more",
"schema/index/my.index",
"schema/index/their.index",
"neostore.labeltokenstore.db",
"neostore.labeltokenstore.db.names",
"neostore.nodestore.db",
"neostore.nodestore.db.labels",
"neostore.propertystore.db",
"neostore.propertystore.db.arrays",
"neostore.propertystore.db.index",
"neostore.propertystore.db.index.keys",
"neostore.propertystore.db.strings",
"neostore.relationshipstore.db",
"neostore.relationshiptypestore.db",
"neostore.relationshiptypestore.db.names",
"neostore.schemastore.db",
"neostore")));
}
@Test
public void shouldCloseIndexAndLabelScanSnapshots() throws Exception
{
// Given
filesInStoreDirAre( STANDARD_STORE_DIR_FILES, STANDARD_STORE_DIR_DIRECTORIES );
ResourceIterator<File> scanSnapshot = scanStoreFilesAre( new String[]{"blah/scan.store", "scan.more"} );
ResourceIterator<File> indexSnapshot = indexFilesAre( new String[]{"schema/index/my.index" } );
NeoStoreFileListing fileListing = newFileListing();
ResourceIterator<File> result = fileListing.listStoreFiles( false );
// When
result.close();
// Then
verify( scanSnapshot ).close();
verify( indexSnapshot ).close();
}
private NeoStoreFileListing newFileListing()
{
return new NeoStoreFileListing( xaContainer, storeDir, labelScanStore, indexingService );
}
private Set<String> asSetOfPaths( ResourceIterator<File> result )
{
List<String> fnames = new ArrayList<>();
while(result.hasNext())
{
fnames.add( result.next().getPath() );
}
return asUniqueSet( fnames );
}
private void filesInStoreDirAre( String[] filenames, String[] dirs )
{
ArrayList<File> files = new ArrayList<>();
mockFiles( filenames, files, false );
mockFiles( dirs, files, true );
when(storeDir.listFiles()).thenReturn( files.toArray( new File[files.size()] ) );
}
private ResourceIterator<File> scanStoreFilesAre( String[] fileNames ) throws IOException
{
ArrayList<File> files = new ArrayList<>();
mockFiles( fileNames, files, false );
ResourceIterator<File> snapshot = spy( asResourceIterator( files.iterator() ) );
when(labelScanStore.snapshotStoreFiles()).thenReturn( snapshot );
return snapshot;
}
private ResourceIterator<File> indexFilesAre( String[] fileNames ) throws IOException
{
ArrayList<File> files = new ArrayList<>();
mockFiles( fileNames, files, false );
ResourceIterator<File> snapshot = spy(asResourceIterator( files.iterator() ));
when(indexingService.snapshotStoreFiles()).thenReturn( snapshot );
return snapshot;
}
private void mockFiles( String[] filenames, ArrayList<File> files, boolean isDirectories )
{
for ( String filename : filenames )
{
File file = mock( File.class );
String[] fileNameParts = filename.split( "/" );
when(file.getName()).thenReturn( fileNameParts[fileNameParts.length-1] );
when(file.isFile()).thenReturn( !isDirectories );
when(file.isDirectory()).thenReturn( isDirectories );
when(file.exists()).thenReturn( true );
when(file.getPath()).thenReturn( filename );
files.add( file );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreFileListingTest.java
|
281
|
static class RelationshipTypeTokenCommand extends Command
{
private final RelationshipTypeTokenRecord record;
private final RelationshipTypeTokenStore store;
RelationshipTypeTokenCommand( RelationshipTypeTokenStore store,
RelationshipTypeTokenRecord record )
{
super( record.getId(), Mode.fromRecordState( record ) );
this.record = record;
this.store = store;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitRelationshipTypeToken( record );
}
@Override
public String toString()
{
return record.toString();
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
// no-op
}
@Override
public void execute()
{
store.updateRecord( record );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// id+in_use(byte)+type_blockId(int)+nr_type_records(int)
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
buffer.put( REL_TYPE_COMMAND );
buffer.putInt( record.getId() ).put( inUse ).putInt( record.getNameId() );
writeDynamicRecords( buffer, record.getNameRecords() );
}
public static Command readFromFile( NeoStore neoStore,
ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
// id+in_use(byte)+type_blockId(int)+nr_type_records(int)
if ( !readAndFlip( byteChannel, buffer, 13 ) )
{
return null;
}
int id = buffer.getInt();
byte inUseFlag = buffer.get();
boolean inUse = false;
if ( (inUseFlag & Record.IN_USE.byteValue()) ==
Record.IN_USE.byteValue() )
{
inUse = true;
}
else if ( inUseFlag != Record.NOT_IN_USE.byteValue() )
{
throw new IOException( "Illegal in use flag: " + inUseFlag );
}
RelationshipTypeTokenRecord record = new RelationshipTypeTokenRecord( id );
record.setInUse( inUse );
record.setNameId( buffer.getInt() );
int nrTypeRecords = buffer.getInt();
for ( int i = 0; i < nrTypeRecords; i++ )
{
DynamicRecord dr = readDynamicRecord( byteChannel, buffer );
if ( dr == null )
{
return null;
}
record.addNameRecord( dr );
}
return new RelationshipTypeTokenCommand(
neoStore == null ? null : neoStore.getRelationshipTypeStore(), record );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
282
|
static class RelationshipCommand extends Command
{
private final RelationshipRecord record;
// before update stores the record as it looked before the command is executed
private RelationshipRecord beforeUpdate;
private final RelationshipStore store;
RelationshipCommand( RelationshipStore store, RelationshipRecord record )
{
super( record.getId(), Mode.fromRecordState( record ) );
this.record = record;
// the default (common) case is that the record to be written is complete and not from recovery or HA
this.beforeUpdate = record;
this.store = store;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitRelationship( record );
}
@Override
public String toString()
{
return record.toString();
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
cacheAccess.removeRelationshipFromCache( getKey() );
/*
* If isRecovered() then beforeUpdate is the correct one UNLESS this is the second time this command
* is executed, where it might have been actually written out to disk so the fields are already -1. So
* we still need to check.
* If !isRecovered() then beforeUpdate is the same as record, so we are still ok.
* We don't check for !inUse() though because that is implicit in the call of this method.
* The above is a hand waiving proof that the conditions that lead to the patchDeletedRelationshipNodes()
* in the if below are the same as in RelationshipCommand.execute() so it should be safe.
*/
if ( beforeUpdate.getFirstNode() != -1 || beforeUpdate.getSecondNode() != -1 )
{
cacheAccess.patchDeletedRelationshipNodes( getKey(), beforeUpdate.getFirstNode(),
beforeUpdate.getFirstNextRel(), beforeUpdate.getSecondNode(), beforeUpdate.getSecondNextRel() );
}
if ( record.getFirstNode() != -1 || record.getSecondNode() != -1 )
{
cacheAccess.removeNodeFromCache( record.getFirstNode() );
cacheAccess.removeNodeFromCache( record.getSecondNode() );
}
}
@Override
public void execute()
{
if ( isRecovered() && !record.inUse() )
{
/*
* If read from a log (either on recovery or HA) then all the fields but for the Id are -1. If the
* record is deleted, then we'll need to invalidate the cache and patch the node's relationship chains.
* Therefore, we need to read the record from the store. This is not too expensive, since the window
* will be either in memory or will soon be anyway and we are just saving the write the trouble.
*/
beforeUpdate = store.forceGetRaw( record.getId() );
}
store.updateRecord( record );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
buffer.put( REL_COMMAND );
buffer.putLong( record.getId() );
buffer.put( inUse );
if ( record.inUse() )
{
buffer.putLong( record.getFirstNode() )
.putLong( record.getSecondNode() )
.putInt( record.getType() )
.putLong( record.getFirstPrevRel() )
.putLong( record.getFirstNextRel() )
.putLong( record.getSecondPrevRel() )
.putLong( record.getSecondNextRel() )
.putLong( record.getNextProp() )
;
}
}
public static Command readFromFile( NeoStore neoStore,
ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
if ( !readAndFlip( byteChannel, buffer, 9 ) )
{
return null;
}
long id = buffer.getLong();
byte inUseFlag = buffer.get();
boolean inUse = false;
if ( (inUseFlag & Record.IN_USE.byteValue()) == Record.IN_USE
.byteValue() )
{
inUse = true;
}
else if ( (inUseFlag & Record.IN_USE.byteValue()) != Record.NOT_IN_USE
.byteValue() )
{
throw new IOException( "Illegal in use flag: " + inUseFlag );
}
RelationshipRecord record;
if ( inUse )
{
if ( !readAndFlip( byteChannel, buffer, 60 ) )
{
return null;
}
record = new RelationshipRecord( id, buffer.getLong(), buffer
.getLong(), buffer.getInt() );
record.setInUse( inUse );
record.setFirstPrevRel( buffer.getLong() );
record.setFirstNextRel( buffer.getLong() );
record.setSecondPrevRel( buffer.getLong() );
record.setSecondNextRel( buffer.getLong() );
record.setNextProp( buffer.getLong() );
}
else
{
record = new RelationshipRecord( id, -1, -1, -1 );
record.setInUse( false );
}
return new RelationshipCommand( neoStore == null ? null : neoStore.getRelationshipStore(),
record );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
283
|
static class PropertyKeyTokenCommand extends Command
{
private final PropertyKeyTokenRecord record;
private final PropertyKeyTokenStore store;
PropertyKeyTokenCommand( PropertyKeyTokenStore store,
PropertyKeyTokenRecord record )
{
super( record.getId(), Mode.fromRecordState( record ) );
this.record = record;
this.store = store;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitPropertyKeyToken( record );
}
@Override
public String toString()
{
return record.toString();
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
// no-op
}
@Override
public void execute()
{
store.updateRecord( record );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// id+in_use(byte)+count(int)+key_blockId(int)+nr_key_records(int)
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
buffer.put( PROP_INDEX_COMMAND );
buffer.putInt( record.getId() );
buffer.put( inUse );
buffer.putInt( record.getPropertyCount() ).putInt( record.getNameId() );
if ( record.isLight() )
{
buffer.putInt( 0 );
}
else
{
writeDynamicRecords( buffer, record.getNameRecords() );
}
}
public static Command readFromFile( NeoStore neoStore, ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
// id+in_use(byte)+count(int)+key_blockId(int)
if ( !readAndFlip( byteChannel, buffer, 13 ) )
{
return null;
}
int id = buffer.getInt();
byte inUseFlag = buffer.get();
boolean inUse = false;
if ( (inUseFlag & Record.IN_USE.byteValue()) == Record.IN_USE
.byteValue() )
{
inUse = true;
}
else if ( inUseFlag != Record.NOT_IN_USE.byteValue() )
{
throw new IOException( "Illegal in use flag: " + inUseFlag );
}
PropertyKeyTokenRecord record = new PropertyKeyTokenRecord( id );
record.setInUse( inUse );
record.setPropertyCount( buffer.getInt() );
record.setNameId( buffer.getInt() );
if ( !readDynamicRecords( byteChannel, buffer, record, PROPERTY_INDEX_DYNAMIC_RECORD_ADDER ) )
{
return null;
}
return new PropertyKeyTokenCommand( neoStore == null ? null : neoStore.getPropertyStore()
.getPropertyKeyTokenStore(), record );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
284
|
static class PropertyCommand extends Command implements PropertyRecordChange
{
private final PropertyStore store;
private final PropertyRecord before;
private final PropertyRecord after;
// TODO as optimization the deserialized key/values could be passed in here
// so that the cost of deserializing them only applies in recovery/HA
PropertyCommand( PropertyStore store, PropertyRecord before, PropertyRecord after )
{
super( after.getId(), Mode.fromRecordState( after ) );
this.store = store;
this.before = before;
this.after = after;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitProperty( after );
}
@Override
public String toString()
{
return beforeAndAfterToString( before, after );
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
long nodeId = this.getNodeId();
long relId = this.getRelId();
if ( nodeId != -1 )
{
cacheAccess.removeNodeFromCache( nodeId );
}
else if ( relId != -1 )
{
cacheAccess.removeRelationshipFromCache( relId );
}
}
@Override
public PropertyRecord getBefore()
{
return before;
}
@Override
public PropertyRecord getAfter()
{
return after;
}
@Override
public void execute()
{
store.updateRecord( after );
}
public long getNodeId()
{
return after.getNodeId();
}
public long getRelId()
{
return after.getRelId();
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// COMMAND + ID
buffer.put( PROP_COMMAND );
buffer.putLong( getKey() ); // 8
// BEFORE
writeToFile( buffer, before );
// AFTER
writeToFile( buffer, after );
}
private void writeToFile( LogBuffer buffer, PropertyRecord record ) throws IOException
{
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
if ( record.getRelId() != -1 )
{
// Here we add 2, i.e. set the second lsb.
inUse += Record.REL_PROPERTY.byteValue();
}
buffer.put( inUse ); // 1
buffer.putLong( record.getNextProp() ).putLong(
record.getPrevProp() ); // 8 + 8
long nodeId = record.getNodeId();
long relId = record.getRelId();
if ( nodeId != -1 )
{
buffer.putLong( nodeId ); // 8 or
}
else if ( relId != -1 )
{
buffer.putLong( relId ); // 8 or
}
else
{
// means this records value has not changed, only place in
// prop chain
buffer.putLong( -1 ); // 8
}
buffer.put( (byte) record.getPropertyBlocks().size() ); // 1
for ( int i = 0; i < record.getPropertyBlocks().size(); i++ )
{
PropertyBlock block = record.getPropertyBlocks().get( i );
assert block.getSize() > 0 : record + " seems kinda broken";
writePropertyBlock( buffer, block );
}
writeDynamicRecords( buffer, record.getDeletedRecords() );
}
public static Command readFromFile( NeoStore neoStore,
ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
// ID
if ( !readAndFlip( byteChannel, buffer, 8 ) )
{
return null;
}
long id = buffer.getLong(); // 8
// BEFORE
PropertyRecord before = readPropertyRecord( id, byteChannel, buffer );
if ( before == null )
{
return null;
}
// AFTER
PropertyRecord after = readPropertyRecord( id, byteChannel, buffer );
if ( after == null )
{
return null;
}
return new PropertyCommand( neoStore == null ? null
: neoStore.getPropertyStore(), before, after );
}
private static PropertyRecord readPropertyRecord( long id, ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
// in_use(byte)+type(int)+key_indexId(int)+prop_blockId(long)+
// prev_prop_id(long)+next_prop_id(long)
if ( !readAndFlip( byteChannel, buffer, 1 + 8 + 8 + 8 ) )
{
return null;
}
PropertyRecord record = new PropertyRecord( id );
byte inUseFlag = buffer.get(); // 1
long nextProp = buffer.getLong(); // 8
long prevProp = buffer.getLong(); // 8
record.setNextProp( nextProp );
record.setPrevProp( prevProp );
boolean inUse = false;
if ( ( inUseFlag & Record.IN_USE.byteValue() ) == Record.IN_USE.byteValue() )
{
inUse = true;
}
boolean nodeProperty = true;
if ( ( inUseFlag & Record.REL_PROPERTY.byteValue() ) == Record.REL_PROPERTY.byteValue() )
{
nodeProperty = false;
}
long primitiveId = buffer.getLong(); // 8
if ( primitiveId != -1 && nodeProperty )
{
record.setNodeId( primitiveId );
}
else if ( primitiveId != -1 )
{
record.setRelId( primitiveId );
}
if ( !readAndFlip( byteChannel, buffer, 1 ) )
{
return null;
}
int nrPropBlocks = buffer.get();
assert nrPropBlocks >= 0;
if ( nrPropBlocks > 0 )
{
record.setInUse( true );
}
while ( nrPropBlocks-- > 0 )
{
PropertyBlock block = readPropertyBlock( byteChannel, buffer );
if ( block == null )
{
return null;
}
record.addPropertyBlock( block );
}
if ( !readDynamicRecords( byteChannel, buffer, record, PROPERTY_DELETED_DYNAMIC_RECORD_ADDER ) )
{
return null;
}
buffer.flip();
int deletedRecords = buffer.getInt(); // 4
assert deletedRecords >= 0;
while ( deletedRecords-- > 0 )
{
DynamicRecord read = readDynamicRecord( byteChannel, buffer );
if ( read == null )
{
return null;
}
record.addDeletedRecord( read );
}
if ( ( inUse && !record.inUse() ) || ( !inUse && record.inUse() ) )
{
throw new IllegalStateException( "Weird, inUse was read in as "
+ inUse
+ " but the record is "
+ record );
}
return record;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
285
|
static class NodeCommand extends Command
{
private final NodeStore store;
private final NodeRecord before;
private final NodeRecord after;
NodeCommand( NodeStore store, NodeRecord before, NodeRecord after )
{
super( after.getId(), Mode.fromRecordState( after ) );
this.store = store;
this.before = before;
this.after = after;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitNode( after );
}
@Override
public String toString()
{
return beforeAndAfterToString( before, after );
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
cacheAccess.removeNodeFromCache( getKey() );
}
@Override
public void execute()
{
store.updateRecord( after );
// Dynamic Label Records
Collection<DynamicRecord> toUpdate = new ArrayList<>( after.getDynamicLabelRecords() );
addRemoved( toUpdate );
store.updateDynamicLabelRecords( toUpdate );
}
private void addRemoved( Collection<DynamicRecord> toUpdate )
{
// the dynamic label records that exist in before, but not in after should be deleted.
Set<Long> idsToRemove = new HashSet<>();
for ( DynamicRecord record : before.getDynamicLabelRecords() )
{
idsToRemove.add( record.getId() );
}
for ( DynamicRecord record : after.getDynamicLabelRecords() )
{
idsToRemove.remove( record.getId() );
}
for ( long id : idsToRemove )
{
toUpdate.add( new DynamicRecord( id ) );
}
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( NODE_COMMAND );
buffer.putLong( after.getId() );
writeNodeRecord( buffer, before );
writeNodeRecord( buffer, after );
}
private void writeNodeRecord( LogBuffer buffer, NodeRecord record ) throws IOException
{
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
buffer.put( inUse );
if ( record.inUse() )
{
buffer.putLong( record.getNextRel() ).putLong( record.getNextProp() );
// labels
buffer.putLong( record.getLabelField() );
writeDynamicRecords( buffer, record.getDynamicLabelRecords() );
}
}
public static Command readFromFile( NeoStore neoStore, ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
if ( !readAndFlip( byteChannel, buffer, 8 ) )
{
return null;
}
long id = buffer.getLong();
NodeRecord before = readNodeRecord( id, byteChannel, buffer );
if ( before == null )
{
return null;
}
NodeRecord after = readNodeRecord( id, byteChannel, buffer );
if ( after == null )
{
return null;
}
if ( !before.inUse() && after.inUse() )
{
after.setCreated();
}
return new NodeCommand( neoStore == null ? null : neoStore.getNodeStore(), before, after );
}
private static NodeRecord readNodeRecord( long id, ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
if ( !readAndFlip( byteChannel, buffer, 1 ) )
{
return null;
}
byte inUseFlag = buffer.get();
boolean inUse = false;
if ( inUseFlag == Record.IN_USE.byteValue() )
{
inUse = true;
}
else if ( inUseFlag != Record.NOT_IN_USE.byteValue() )
{
throw new IOException( "Illegal in use flag: " + inUseFlag );
}
NodeRecord record;
if ( inUse )
{
if ( !readAndFlip( byteChannel, buffer, 8*3 ) )
{
return null;
}
record = new NodeRecord( id, buffer.getLong(), buffer.getLong() );
// labels
long labelField = buffer.getLong();
Collection<DynamicRecord> dynamicLabelRecords = new ArrayList<>();
readDynamicRecords( byteChannel, buffer, dynamicLabelRecords, COLLECTION_DYNAMIC_RECORD_ADDER );
record.setLabelField( labelField, dynamicLabelRecords );
}
else
{
record = new NodeRecord( id, Record.NO_NEXT_RELATIONSHIP.intValue(),
Record.NO_NEXT_PROPERTY.intValue() );
}
record.setInUse( inUse );
return record;
}
public NodeRecord getBefore()
{
return before;
}
public NodeRecord getAfter()
{
return after;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
286
|
static class NeoStoreCommand extends Command
{
private final NeoStoreRecord record;
private final NeoStore neoStore;
NeoStoreCommand( NeoStore neoStore, NeoStoreRecord record )
{
super( record.getId(), Mode.fromRecordState( record ) );
this.neoStore = neoStore;
this.record = record;
}
@Override
public void execute()
{
neoStore.setGraphNextProp( record.getNextProp() );
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitNeoStore( record );
}
@Override
public String toString()
{
return record.toString();
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
// no-op
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
buffer.put( NEOSTORE_COMMAND ).putLong( record.getNextProp() );
}
public static Command readFromFile( NeoStore neoStore,
ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
if ( !readAndFlip( byteChannel, buffer, 8 ) )
{
return null;
}
long nextProp = buffer.getLong();
NeoStoreRecord record = new NeoStoreRecord();
record.setNextProp( nextProp );
return new NeoStoreCommand( neoStore, record );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
287
|
static class LabelTokenCommand extends Command
{
private final LabelTokenRecord record;
private final LabelTokenStore store;
LabelTokenCommand( LabelTokenStore store,
LabelTokenRecord record )
{
super( record.getId(), Mode.fromRecordState( record ) );
this.record = record;
this.store = store;
}
@Override
public void accept( CommandRecordVisitor visitor )
{
visitor.visitLabelToken( record );
}
@Override
public String toString()
{
return record.toString();
}
@Override
void removeFromCache( CacheAccessBackDoor cacheAccess )
{
// no-op
}
@Override
public void execute()
{
store.updateRecord( record );
}
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// id+in_use(byte)+type_blockId(int)+nr_type_records(int)
byte inUse = record.inUse() ? Record.IN_USE.byteValue()
: Record.NOT_IN_USE.byteValue();
buffer.put( LABEL_KEY_COMMAND );
buffer.putInt( record.getId() ).put( inUse ).putInt( record.getNameId() );
writeDynamicRecords( buffer, record.getNameRecords() );
}
public static Command readFromFile( NeoStore neoStore,
ReadableByteChannel byteChannel, ByteBuffer buffer )
throws IOException
{
// id+in_use(byte)+type_blockId(int)+nr_type_records(int)
if ( !readAndFlip( byteChannel, buffer, 13 ) )
{
return null;
}
int id = buffer.getInt();
byte inUseFlag = buffer.get();
boolean inUse = false;
if ( (inUseFlag & Record.IN_USE.byteValue()) ==
Record.IN_USE.byteValue() )
{
inUse = true;
}
else if ( inUseFlag != Record.NOT_IN_USE.byteValue() )
{
throw new IOException( "Illegal in use flag: " + inUseFlag );
}
LabelTokenRecord record = new LabelTokenRecord( id );
record.setInUse( inUse );
record.setNameId( buffer.getInt() );
int nrTypeRecords = buffer.getInt();
for ( int i = 0; i < nrTypeRecords; i++ )
{
DynamicRecord dr = readDynamicRecord( byteChannel, buffer );
if ( dr == null )
{
return null;
}
record.addNameRecord( dr );
}
return new LabelTokenCommand(
neoStore == null ? null : neoStore.getLabelTokenStore(), record );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
288
|
{
@Override
public void add( Collection<DynamicRecord> target, DynamicRecord record )
{
target.add( record );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
289
|
{
@Override
public void add( PropertyRecord target, DynamicRecord record )
{
assert !record.inUse() : record + " is kinda weird";
target.addDeletedRecord( record );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_Command.java
|
290
|
private static class StoreSnapshot extends PrefetchingIterator<File> implements ResourceIterator<File>
{
private final Iterator<File> files;
private final Resource[] thingsToCloseWhenDone;
StoreSnapshot( Iterator<File> files, Resource... thingsToCloseWhenDone )
{
this.files = files;
this.thingsToCloseWhenDone = thingsToCloseWhenDone;
}
@Override
protected File fetchNextOrNull()
{
return files.hasNext() ? files.next() : null;
}
@Override
public void close()
{
for ( Resource resource : thingsToCloseWhenDone )
{
resource.close();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreFileListing.java
|
291
|
public class NeoStoreIndexStoreView implements IndexStoreView
{
private final PropertyStore propertyStore;
private final NodeStore nodeStore;
private final LockService locks;
public NeoStoreIndexStoreView( LockService locks, NeoStore neoStore )
{
this.locks = locks;
this.propertyStore = neoStore.getPropertyStore();
this.nodeStore = neoStore.getNodeStore();
}
@Override
public <FAILURE extends Exception> StoreScan<FAILURE> visitNodesWithPropertyAndLabel(
IndexDescriptor descriptor, final Visitor<NodePropertyUpdate, FAILURE> visitor )
{
final int soughtLabelId = descriptor.getLabelId();
final int soughtPropertyKeyId = descriptor.getPropertyKeyId();
return new NodeStoreScan<NodePropertyUpdate, FAILURE>()
{
@Override
protected NodePropertyUpdate read( NodeRecord node )
{
long[] labels = parseLabelsField( node ).get( nodeStore );
if ( !containsLabel( soughtLabelId, labels ) )
{
return null;
}
for ( PropertyBlock property : properties( node ) )
{
int propertyKeyId = property.getKeyIndexId();
if ( soughtPropertyKeyId == propertyKeyId )
{
return NodePropertyUpdate.add( node.getId(), propertyKeyId, valueOf( property ), labels );
}
}
return null;
}
@Override
protected void process( NodePropertyUpdate update ) throws FAILURE
{
visitor.visit( update );
}
};
}
@Override
public <FAILURE extends Exception> StoreScan<FAILURE> visitNodes(
final int[] labelIds, final int[] propertyKeyIds,
final Visitor<NodePropertyUpdate, FAILURE> propertyUpdateVisitor,
final Visitor<NodeLabelUpdate, FAILURE> labelUpdateVisitor )
{
return new NodeStoreScan<Update, FAILURE>()
{
@Override
protected Update read( NodeRecord node )
{
long[] labels = parseLabelsField( node ).get( nodeStore );
Update update = new Update( node.getId(), labels );
if ( !containsAnyLabel( labelIds, labels ) )
{
return update;
}
properties: for ( PropertyBlock property : properties( node ) )
{
int propertyKeyId = property.getKeyIndexId();
for ( int sought : propertyKeyIds )
{
if ( propertyKeyId == sought )
{
update.add( NodePropertyUpdate
.add( node.getId(), propertyKeyId, valueOf( property ), labels ) );
continue properties;
}
}
}
return update;
}
@Override
protected void process( Update update ) throws FAILURE
{
labelUpdateVisitor.visit( update.labels );
for ( NodePropertyUpdate propertyUpdate : update )
{
propertyUpdateVisitor.visit( propertyUpdate );
}
}
};
}
@Override
public Iterable<NodePropertyUpdate> nodeAsUpdates( long nodeId )
{
NodeRecord node = nodeStore.forceGetRecord( nodeId );
if ( !node.inUse() )
{
return Iterables.empty(); // node not in use => no updates
}
long firstPropertyId = node.getCommittedNextProp();
if ( firstPropertyId == Record.NO_NEXT_PROPERTY.intValue() )
{
return Iterables.empty(); // no properties => no updates (it's not going to be in any index)
}
long[] labels = parseLabelsField( node ).get( nodeStore );
if ( labels.length == 0 )
{
return Iterables.empty(); // no labels => no updates (it's not going to be in any index)
}
ArrayList<NodePropertyUpdate> updates = new ArrayList<>();
for ( PropertyRecord propertyRecord : propertyStore.getPropertyRecordChain( firstPropertyId ) )
{
for ( PropertyBlock property : propertyRecord.getPropertyBlocks() )
{
Object value = property.getType().getValue( property, propertyStore );
updates.add( NodePropertyUpdate.add( node.getId(), property.getKeyIndexId(), value, labels ) );
}
}
return updates;
}
@Override
public Property getProperty( long nodeId, int propertyKeyId ) throws EntityNotFoundException, PropertyNotFoundException
{
NodeRecord node = nodeStore.forceGetRecord( nodeId );
if ( !node.inUse() )
{
throw new EntityNotFoundException( EntityType.NODE, nodeId );
}
long firstPropertyId = node.getCommittedNextProp();
if ( firstPropertyId == Record.NO_NEXT_PROPERTY.intValue() )
{
throw new PropertyNotFoundException( propertyKeyId, EntityType.NODE, nodeId );
}
for ( PropertyRecord propertyRecord : propertyStore.getPropertyRecordChain( firstPropertyId ) )
{
PropertyBlock propertyBlock = propertyRecord.getPropertyBlock( propertyKeyId );
if ( propertyBlock != null )
{
return propertyBlock.newPropertyData( propertyStore );
}
}
throw new PropertyNotFoundException( propertyKeyId, EntityType.NODE, nodeId );
}
private Object valueOf( PropertyBlock property )
{
// Make sure the value is loaded, even if it's of a "heavy" kind.
propertyStore.ensureHeavy( property );
return property.getType().getValue( property, propertyStore );
}
private Iterable<PropertyBlock> properties( final NodeRecord node )
{
return new Iterable<PropertyBlock>()
{
@Override
public Iterator<PropertyBlock> iterator()
{
return new PropertyBlockIterator( node );
}
};
}
private static boolean containsLabel( int sought, long[] labels )
{
for ( long label : labels )
{
if ( label == sought )
{
return true;
}
}
return false;
}
private static boolean containsAnyLabel( int[] soughtIds, long[] labels )
{
for ( int soughtId : soughtIds )
{
if ( containsLabel( soughtId, labels ) )
{
return true;
}
}
return false;
}
private static class Update implements Iterable<NodePropertyUpdate>
{
private final NodeLabelUpdate labels;
private final List<NodePropertyUpdate> propertyUpdates = new ArrayList<>();
Update( long nodeId, long[] labels )
{
this.labels = labelChanges( nodeId, EMPTY_LONG_ARRAY, labels );
}
void add( NodePropertyUpdate update )
{
propertyUpdates.add( update );
}
@Override
public Iterator<NodePropertyUpdate> iterator()
{
return propertyUpdates.iterator();
}
}
private class PropertyBlockIterator extends PrefetchingIterator<PropertyBlock>
{
private final Iterator<PropertyRecord> records;
private Iterator<PropertyBlock> blocks = IteratorUtil.emptyIterator();
PropertyBlockIterator( NodeRecord node )
{
long firstPropertyId = node.getCommittedNextProp();
if ( firstPropertyId == Record.NO_NEXT_PROPERTY.intValue() )
{
records = IteratorUtil.emptyIterator();
}
else
{
records = propertyStore.getPropertyRecordChain( firstPropertyId ).iterator();
}
}
@Override
protected PropertyBlock fetchNextOrNull()
{
for (; ; )
{
if ( blocks.hasNext() )
{
return blocks.next();
}
if ( !records.hasNext() )
{
return null;
}
blocks = records.next().getPropertyBlocks().iterator();
}
}
}
private abstract class NodeStoreScan<RESULT, FAILURE extends Exception> implements StoreScan<FAILURE>
{
private volatile boolean continueScanning;
protected abstract RESULT read( NodeRecord node );
protected abstract void process( RESULT result ) throws FAILURE;
@Override
public void run() throws FAILURE
{
PrimitiveLongIterator nodeIds = new StoreIdIterator( nodeStore );
continueScanning = true;
while ( continueScanning && nodeIds.hasNext() )
{
long id = nodeIds.next();
RESULT result = null;
try ( Lock ignored = locks.acquireNodeLock( id, LockService.LockType.READ_LOCK ) )
{
NodeRecord record = nodeStore.forceGetRecord( id );
if ( record.inUse() )
{
result = read( record );
}
}
if ( result != null )
{
process( result );
}
}
}
@Override
public void stop()
{
continueScanning = false;
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
292
|
public class NeoStoreXaDataSource extends LogBackedXaDataSource implements NeoStoreProvider
{
public static final String DEFAULT_DATA_SOURCE_NAME = "nioneodb";
@SuppressWarnings("deprecation")
public static abstract class Configuration extends LogBackedXaDataSource.Configuration
{
public static final Setting<Boolean> read_only= GraphDatabaseSettings.read_only;
public static final Setting<File> store_dir = InternalAbstractGraphDatabase.Configuration.store_dir;
public static final Setting<File> neo_store = InternalAbstractGraphDatabase.Configuration.neo_store;
public static final Setting<File> logical_log = InternalAbstractGraphDatabase.Configuration.logical_log;
}
public static final byte BRANCH_ID[] = UTF8.encode( "414141" );
public static final String LOGICAL_LOG_DEFAULT_NAME = "nioneo_logical.log";
private final StringLogger msgLog;
private final Logging logging;
private final AbstractTransactionManager txManager;
private final DependencyResolver dependencyResolver;
private final TransactionStateFactory stateFactory;
@SuppressWarnings("deprecation")
private final TransactionInterceptorProviders providers;
private final TokenNameLookup tokenNameLookup;
private final PropertyKeyTokenHolder propertyKeyTokens;
private final LabelTokenHolder labelTokens;
private final RelationshipTypeTokenHolder relationshipTypeTokens;
private final PersistenceManager persistenceManager;
private final LockManager lockManager;
private final SchemaWriteGuard schemaWriteGuard;
private final StoreFactory storeFactory;
private final XaFactory xaFactory;
private final JobScheduler scheduler;
private final UpdateableSchemaState updateableSchemaState;
private final Config config;
private final LockService locks;
private LifeSupport life;
private KernelAPI kernel;
private NeoStore neoStore;
private IndexingService indexingService;
private SchemaIndexProvider indexProvider;
private XaContainer xaContainer;
private ArrayMap<Class<?>,Store> idGenerators;
private IntegrityValidator integrityValidator;
private NeoStoreFileListing fileListing;
private File storeDir;
private boolean readOnly;
private boolean logApplied = false;
private CacheAccessBackDoor cacheAccess;
private PersistenceCache persistenceCache;
private SchemaCache schemaCache;
private LabelScanStore labelScanStore;
private final IndexingService.Monitor indexingServiceMonitor;
private enum Diagnostics implements DiagnosticsExtractor<NeoStoreXaDataSource>
{
NEO_STORE_VERSIONS( "Store versions:" )
{
@Override
void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log )
{
source.neoStore.logVersions( log );
}
},
NEO_STORE_ID_USAGE( "Id usage:" )
{
@Override
void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log )
{
source.neoStore.logIdUsage( log );
}
},
PERSISTENCE_WINDOW_POOL_STATS( "Persistence Window Pool stats:" )
{
@Override
void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log )
{
source.neoStore.logAllWindowPoolStats( log );
}
@Override
boolean applicable( DiagnosticsPhase phase )
{
return phase.isExplicitlyRequested();
}
};
private final String message;
private Diagnostics( String message )
{
this.message = message;
}
@Override
public void dumpDiagnostics( final NeoStoreXaDataSource source, DiagnosticsPhase phase, StringLogger log )
{
if ( applicable( phase ) )
{
log.logLongMessage( message, new Visitor<StringLogger.LineLogger, RuntimeException>()
{
@Override
public boolean visit( StringLogger.LineLogger logger )
{
dump( source, logger );
return false;
}
}, true );
}
}
boolean applicable( DiagnosticsPhase phase )
{
return phase.isInitialization() || phase.isExplicitlyRequested();
}
abstract void dump( NeoStoreXaDataSource source, StringLogger.LineLogger log );
}
/**
* Creates a <CODE>NeoStoreXaDataSource</CODE> using configuration from
* <CODE>params</CODE>. First the map is checked for the parameter
* <CODE>config</CODE>.
* If that parameter exists a config file with that value is loaded (via
* {@link Properties#load}). Any parameter that exist in the config file
* and in the map passed into this constructor will take the value from the
* map.
* <p>
* If <CODE>config</CODE> parameter is set but file doesn't exist an
* <CODE>IOException</CODE> is thrown. If any problem is found with that
* configuration file or Neo4j store can't be loaded an <CODE>IOException is
* thrown</CODE>.
*
* Note that the tremendous number of dependencies for this class, clearly, is an architecture smell. It is part
* of the ongoing work on introducing the Kernel API, where components that were previously spread throughout the
* core API are now slowly accumulating in the Kernel implementation. Over time, these components should be
* refactored into bigger components that wrap the very granular things we depend on here.
*/
public NeoStoreXaDataSource( Config config, StoreFactory sf,
StringLogger stringLogger, XaFactory xaFactory, TransactionStateFactory stateFactory,
@SuppressWarnings("deprecation") TransactionInterceptorProviders providers,
JobScheduler scheduler, Logging logging,
UpdateableSchemaState updateableSchemaState,
TokenNameLookup tokenNameLookup,
DependencyResolver dependencyResolver, AbstractTransactionManager txManager,
PropertyKeyTokenHolder propertyKeyTokens, LabelTokenHolder labelTokens,
RelationshipTypeTokenHolder relationshipTypeTokens,
PersistenceManager persistenceManager, LockManager lockManager,
SchemaWriteGuard schemaWriteGuard, IndexingService.Monitor indexingServiceMonitor )
{
super( BRANCH_ID, DEFAULT_DATA_SOURCE_NAME );
this.config = config;
this.stateFactory = stateFactory;
this.tokenNameLookup = tokenNameLookup;
this.dependencyResolver = dependencyResolver;
this.providers = providers;
this.scheduler = scheduler;
this.logging = logging;
this.txManager = txManager;
this.propertyKeyTokens = propertyKeyTokens;
this.labelTokens = labelTokens;
this.relationshipTypeTokens = relationshipTypeTokens;
this.persistenceManager = persistenceManager;
this.lockManager = lockManager;
this.schemaWriteGuard = schemaWriteGuard;
this.indexingServiceMonitor = indexingServiceMonitor;
readOnly = config.get( Configuration.read_only );
msgLog = stringLogger;
this.storeFactory = sf;
this.xaFactory = xaFactory;
this.updateableSchemaState = updateableSchemaState;
this.locks = new ReentrantLockService();
}
@Override
public void init()
{ // We do our own internal life management:
// start() does life.init() and life.start(),
// stop() does life.stop() and life.shutdown().
}
@Override
public void start() throws IOException
{
life = new LifeSupport();
readOnly = config.get( Configuration.read_only );
storeDir = config.get( Configuration.store_dir );
File store = config.get( Configuration.neo_store );
storeFactory.ensureStoreExists();
final TransactionFactory tf;
if ( providers.shouldInterceptCommitting() )
{
tf = new InterceptingTransactionFactory();
}
else
{
tf = new TransactionFactory();
}
neoStore = storeFactory.newNeoStore( store );
schemaCache = new SchemaCache( Collections.<SchemaRule>emptyList() );
final NodeManager nodeManager = dependencyResolver.resolveDependency( NodeManager.class );
Iterator<? extends Cache<?>> caches = nodeManager.caches().iterator();
persistenceCache = new PersistenceCache(
(AutoLoadingCache<NodeImpl>)caches.next(),
(AutoLoadingCache<RelationshipImpl>)caches.next(), new Thunk<GraphPropertiesImpl>()
{
@Override
public GraphPropertiesImpl evaluate()
{
return nodeManager.getGraphProperties();
}
} );
cacheAccess = new BridgingCacheAccess( nodeManager, schemaCache, updateableSchemaState, persistenceCache );
try
{
indexProvider = dependencyResolver.resolveDependency( SchemaIndexProvider.class,
SchemaIndexProvider.HIGHEST_PRIORITIZED_OR_NONE );
// TODO: Build a real provider map
DefaultSchemaIndexProviderMap providerMap = new DefaultSchemaIndexProviderMap( indexProvider );
indexingService = life.add(
new IndexingService(
scheduler,
providerMap,
new NeoStoreIndexStoreView( locks, neoStore ),
tokenNameLookup, updateableSchemaState,
logging, indexingServiceMonitor ) );
integrityValidator = new IntegrityValidator( neoStore, indexingService );
xaContainer = xaFactory.newXaContainer(this, config.get( Configuration.logical_log ),
new CommandFactory( neoStore, indexingService ),
new NeoStoreInjectedTransactionValidator(integrityValidator), tf,
stateFactory, providers, readOnly );
labelScanStore = life.add( dependencyResolver.resolveDependency( LabelScanStoreProvider.class,
LabelScanStoreProvider.HIGHEST_PRIORITIZED ).getLabelScanStore() );
fileListing = new NeoStoreFileListing( xaContainer, storeDir, labelScanStore, indexingService );
kernel = life.add( new Kernel( txManager, propertyKeyTokens, labelTokens, relationshipTypeTokens,
persistenceManager, lockManager, updateableSchemaState, schemaWriteGuard,
indexingService, nodeManager, new Provider<NeoStore>()
{
@Override
public NeoStore instance()
{
return getNeoStore();
}
}, persistenceCache, schemaCache, providerMap, labelScanStore, readOnly ));
life.init();
// TODO: Why isn't this done in the init() method of the indexing service?
if ( !readOnly )
{
neoStore.setRecoveredStatus( true );
try
{
indexingService.initIndexes( loadIndexRules() );
xaContainer.openLogicalLog();
}
finally
{
neoStore.setRecoveredStatus( false );
}
}
if ( !xaContainer.getResourceManager().hasRecoveredTransactions() )
{
neoStore.makeStoreOk();
}
else
{
msgLog.debug( "Waiting for TM to take care of recovered " +
"transactions." );
}
idGenerators = new ArrayMap<>( (byte)5, false, false );
this.idGenerators.put( Node.class, neoStore.getNodeStore() );
this.idGenerators.put( Relationship.class, neoStore.getRelationshipStore() );
this.idGenerators.put( RelationshipType.class, neoStore.getRelationshipTypeStore() );
this.idGenerators.put( Label.class, neoStore.getLabelTokenStore() );
this.idGenerators.put( PropertyStore.class, neoStore.getPropertyStore() );
this.idGenerators.put( PropertyKeyTokenRecord.class,
neoStore.getPropertyStore().getPropertyKeyTokenStore() );
setLogicalLogAtCreationTime( xaContainer.getLogicalLog() );
// TODO Problem here is that we don't know if recovery has been performed at this point
// if it hasn't then no index recovery will be performed since the node store is still in
// "not ok" state and forceGetRecord will always return place holder node records that are not in use.
// This issue will certainly introduce index inconsistencies.
life.start();
}
catch ( Throwable e )
{ // Something unexpected happened during startup
try
{ // Close the neostore, so that locks are released properly
neoStore.close();
}
catch ( Exception closeException )
{
msgLog.logMessage( "Couldn't close neostore after startup failure" );
}
throw Exceptions.launderedException( e );
}
}
public NeoStore getNeoStore()
{
return neoStore;
}
public IndexingService getIndexService()
{
return indexingService;
}
public SchemaIndexProvider getIndexProvider()
{
return indexProvider;
}
public LabelScanStore getLabelScanStore()
{
return labelScanStore;
}
public LockService getLockService()
{
return locks;
}
@Override
public void stop()
{
super.stop();
if ( !readOnly )
{
forceEverything();
}
life.shutdown();
xaContainer.close();
if ( logApplied )
{
neoStore.rebuildIdGenerators();
logApplied = false;
}
neoStore.close();
msgLog.info( "NeoStore closed" );
}
private void forceEverything()
{
neoStore.flushAll();
indexingService.flushAll();
labelScanStore.force();
}
@Override
public void shutdown()
{ // We do our own internal life management:
// start() does life.init() and life.start(),
// stop() does life.stop() and life.shutdown().
}
public StoreId getStoreId()
{
return neoStore.getStoreId();
}
@Override
public NeoStoreXaConnection getXaConnection()
{
return new NeoStoreXaConnection( neoStore,
xaContainer.getResourceManager(), getBranchId() );
}
private static class CommandFactory extends XaCommandFactory
{
private final NeoStore neoStore;
private final IndexingService indexingService;
CommandFactory( NeoStore neoStore, IndexingService indexingService )
{
this.neoStore = neoStore;
this.indexingService = indexingService;
}
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
return Command.readCommand( neoStore, indexingService, byteChannel, buffer );
}
}
private class InterceptingTransactionFactory extends TransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state )
{
TransactionInterceptor first = providers.resolveChain( NeoStoreXaDataSource.this );
return new InterceptingWriteTransaction( lastCommittedTxWhenTransactionStarted, getLogicalLog(),
neoStore, state, cacheAccess, indexingService, labelScanStore, first, integrityValidator,
(KernelTransactionImplementation)kernel.newTransaction(), locks );
}
}
private class TransactionFactory extends XaTransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state )
{
return new NeoStoreTransaction( lastCommittedTxWhenTransactionStarted, getLogicalLog(), state,
neoStore, cacheAccess, indexingService, labelScanStore, integrityValidator,
(KernelTransactionImplementation)kernel.newTransaction(), locks );
}
@Override
public void recoveryComplete()
{
msgLog.debug( "Recovery complete, "
+ "all transactions have been resolved" );
msgLog.debug( "Rebuilding id generators as needed. "
+ "This can take a while for large stores..." );
forceEverything();
neoStore.makeStoreOk();
neoStore.setVersion( xaContainer.getLogicalLog().getHighestLogVersion() );
msgLog.debug( "Rebuild of id generators complete." );
}
@Override
public long getCurrentVersion()
{
return neoStore.getVersion();
}
@Override
public long getAndSetNewVersion()
{
return neoStore.incrementVersion();
}
@Override
public void setVersion( long version )
{
neoStore.setVersion( version );
}
@Override
public void flushAll()
{
forceEverything();
}
@Override
public long getLastCommittedTx()
{
return neoStore.getLastCommittedTx();
}
}
public long nextId( Class<?> clazz )
{
Store store = idGenerators.get( clazz );
if ( store == null )
{
throw new IdGenerationFailedException( "No IdGenerator for: "
+ clazz );
}
return store.nextId();
}
public long getHighestPossibleIdInUse( Class<?> clazz )
{
Store store = idGenerators.get( clazz );
if ( store == null )
{
throw new IdGenerationFailedException( "No IdGenerator for: "
+ clazz );
}
return store.getHighestPossibleIdInUse();
}
public long getNumberOfIdsInUse( Class<?> clazz )
{
Store store = idGenerators.get( clazz );
if ( store == null )
{
throw new IdGenerationFailedException( "No IdGenerator for: "
+ clazz );
}
return store.getNumberOfIdsInUse();
}
public String getStoreDir()
{
return storeDir.getPath();
}
@Override
public long getCreationTime()
{
return neoStore.getCreationTime();
}
@Override
public long getRandomIdentifier()
{
return neoStore.getRandomNumber();
}
@Override
public long getCurrentLogVersion()
{
return neoStore.getVersion();
}
public long incrementAndGetLogVersion()
{
return neoStore.incrementVersion();
}
// used for testing, do not use.
@Override
public void setLastCommittedTxId( long txId )
{
neoStore.setRecoveredStatus( true );
try
{
neoStore.setLastCommittedTx( txId );
}
finally
{
neoStore.setRecoveredStatus( false );
}
}
public boolean isReadOnly()
{
return readOnly;
}
public List<WindowPoolStats> getWindowPoolStats()
{
return neoStore.getAllWindowPoolStats();
}
@Override
public long getLastCommittedTxId()
{
return neoStore.getLastCommittedTx();
}
@Override
public XaContainer getXaContainer()
{
return xaContainer;
}
@Override
public boolean setRecovered( boolean recovered )
{
boolean currentValue = neoStore.isInRecoveryMode();
neoStore.setRecoveredStatus( true );
return currentValue;
}
@Override
public ResourceIterator<File> listStoreFiles( boolean includeLogicalLogs ) throws IOException
{
return fileListing.listStoreFiles( includeLogicalLogs );
}
@Override
public ResourceIterator<File> listStoreFiles() throws IOException
{
return fileListing.listStoreFiles();
}
@Override
public ResourceIterator<File> listLogicalLogs() throws IOException
{
return fileListing.listLogicalLogs();
}
public void registerDiagnosticsWith( DiagnosticsManager manager )
{
manager.registerAll( Diagnostics.class, this );
}
private Iterator<IndexRule> loadIndexRules()
{
return new SchemaStorage( neoStore.getSchemaStore() ).allIndexRules();
}
@Override
public NeoStore evaluate()
{
return neoStore;
}
@Override
public void recoveryCompleted() throws IOException
{
indexingService.startIndexes();
}
public LogBufferFactory createLogBufferFactory()
{
return xaContainer.getLogicalLog().createLogWriter( new Function<Config, File>()
{
@Override
public File apply( Config config )
{
return config.get( Configuration.logical_log );
}
} );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaDataSource.java
|
293
|
{
@Override
public NodeRecord newUnused( Long key, Void additionalData )
{
return new NodeRecord( key, Record.NO_NEXT_RELATIONSHIP.intValue(),
Record.NO_NEXT_PROPERTY.intValue() );
}
@Override
public NodeRecord load( Long key, Void additionalData )
{
return getNodeStore().getRecord( key );
}
@Override
public void ensureHeavy( NodeRecord record )
{
getNodeStore().ensureHeavy( record );
}
@Override
public NodeRecord clone(NodeRecord nodeRecord)
{
return nodeRecord.clone();
}
}, true );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
294
|
private static class NeoStoreXaResource extends XaResourceHelpImpl
{
private final Object identifier;
NeoStoreXaResource( Object identifier, XaResourceManager xaRm,
byte branchId[] )
{
super( xaRm, branchId );
this.identifier = identifier;
}
@Override
public boolean isSameRM( XAResource xares )
{
if ( xares instanceof NeoStoreXaResource )
{
return identifier
.equals( ((NeoStoreXaResource) xares).identifier );
}
return false;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaConnection.java
|
295
|
public class NeoStoreXaConnection extends XaConnectionHelpImpl
implements IndexXaConnection // Implements this to enable a temporary workaround, see #createIndex
{
private final NeoStoreXaResource xaResource;
private final NeoStore neoStore;
NeoStoreXaConnection( NeoStore neoStore, XaResourceManager xaRm,
byte branchId[] )
{
super( xaRm );
this.neoStore = neoStore;
this.xaResource = new NeoStoreXaResource(
neoStore.getStorageFileName(), xaRm, branchId );
}
@Override
public XAResource getXaResource()
{
return this.xaResource;
}
@Override
public NeoStoreTransaction getTransaction()
{
try
{
return (NeoStoreTransaction) super.getTransaction();
}
catch ( XAException e )
{
throw new TransactionFailureException( "Unable to create transaction.", e );
}
}
@Override
public NeoStoreTransaction createTransaction()
{
// Is only called once per write transaction so no need
// to cache the transaction here.
try
{
return (NeoStoreTransaction) super.createTransaction();
}
catch ( XAException e )
{
throw new TransactionFailureException( "Unable to create transaction.", e );
}
}
private static class NeoStoreXaResource extends XaResourceHelpImpl
{
private final Object identifier;
NeoStoreXaResource( Object identifier, XaResourceManager xaRm,
byte branchId[] )
{
super( xaRm, branchId );
this.identifier = identifier;
}
@Override
public boolean isSameRM( XAResource xares )
{
if ( xares instanceof NeoStoreXaResource )
{
return identifier
.equals( ((NeoStoreXaResource) xares).identifier );
}
return false;
}
};
// TEST These methods are only used by tests - refactor away if possible
public PropertyStore getPropertyStore()
{
return neoStore.getPropertyStore();
}
public RelationshipTypeTokenStore getRelationshipTypeStore()
{
return neoStore.getRelationshipTypeStore();
}
@Override
public void createIndex( Class<? extends PropertyContainer> entityType, String indexName,
Map<String, String> config )
{
// This gets called in the index creator thread in IndexManagerImpl when "creating"
// an index which uses the graph as its backing. Normally this would add a command to
// a log, put the transaction in a non-read-only state and cause it to commit and
// write these command plus add it to the index store (where index configuration is kept).
// But this is a temporary workaround for supporting in-graph indexes without the
// persistence around their creation or existence. The reason is that there are no
// index life cycle commands for the neo store. When/if graph data source gets merged
// with other index data sources (i.e. they will have one unified log and data source
// to act as the front end) this will be resolved and this workaround can be removed
// (NeoStoreXaConnection implementing IndexXaConnection).
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreXaConnection.java
|
296
|
private static class LockableRelationship implements Relationship
{
private final long id;
LockableRelationship( long id )
{
this.id = id;
}
@Override
public void delete()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node getEndNode()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public long getId()
{
return this.id;
}
@Override
public GraphDatabaseService getGraphDatabase()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node[] getNodes()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node getOtherNode( Node node )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Object getProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Object getProperty( String key, Object defaultValue )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Iterable<String> getPropertyKeys()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node getStartNode()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public RelationshipType getType()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public boolean isType( RelationshipType type )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public boolean hasProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Object removeProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public void setProperty( String key, Object value )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public boolean equals( Object o )
{
return o instanceof Relationship && this.getId() == ((Relationship) o).getId();
}
@Override
public int hashCode()
{
return (int) ((id >>> 32) ^ id);
}
@Override
public String toString()
{
return "Lockable relationship #" + this.getId();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
297
|
static class LabelChangeSummary
{
private static final long[] NO_LABELS = new long[0];
private final long[] addedLabels;
private final long[] removedLabels;
LabelChangeSummary( long[] labelsBefore, long[] labelsAfter )
{
// Ids are sorted in the store
long[] addedLabels = new long[labelsAfter.length];
long[] removedLabels = new long[labelsBefore.length];
int addedLabelsCursor = 0, removedLabelsCursor = 0;
for ( long labelAfter : labelsAfter )
{
if ( binarySearch( labelsBefore, labelAfter ) < 0 )
{
addedLabels[addedLabelsCursor++] = labelAfter;
}
}
for ( long labelBefore : labelsBefore )
{
if ( binarySearch( labelsAfter, labelBefore ) < 0 )
{
removedLabels[removedLabelsCursor++] = labelBefore;
}
}
// For each property on the node, produce one update for added labels and one for removed labels.
this.addedLabels = shrink( addedLabels, addedLabelsCursor );
this.removedLabels = shrink( removedLabels, removedLabelsCursor );
}
private long[] shrink( long[] array, int toLength )
{
if ( toLength == 0 )
{
return NO_LABELS;
}
return array.length == toLength ? array : copyOf( array, toLength );
}
public boolean hasAddedLabels()
{
return addedLabels.length > 0;
}
public boolean hasRemovedLabels()
{
return removedLabels.length > 0;
}
public long[] getAddedLabels()
{
return addedLabels;
}
public long[] getRemovedLabels()
{
return removedLabels;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
298
|
static class CommandSorter implements Comparator<Command>, Serializable
{
@Override
public int compare( Command o1, Command o2 )
{
long id1 = o1.getKey();
long id2 = o2.getKey();
long diff = id1 - id2;
if ( diff > Integer.MAX_VALUE )
{
return Integer.MAX_VALUE;
}
else if ( diff < Integer.MIN_VALUE )
{
return Integer.MIN_VALUE;
}
else
{
return (int) diff;
}
}
@Override
public boolean equals( Object o )
{
return o instanceof CommandSorter;
}
@Override
public int hashCode()
{
return 3217;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
299
|
{
@Override
public NeoStoreRecord newUnused( Long key, Void additionalData )
{
throw new UnsupportedOperationException();
}
@Override
public NeoStoreRecord load( Long key, Void additionalData )
{
return neoStore.asRecord();
}
@Override
public void ensureHeavy( NeoStoreRecord record )
{
}
@Override
public NeoStoreRecord clone(NeoStoreRecord neoStoreRecord) {
// We do not expect to manage the before state, so this operation will not be called.
throw new UnsupportedOperationException("Clone on NeoStoreRecord");
}
}, false );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.