Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
1,200
|
public class LabelTokenRecord extends TokenRecord
{
public LabelTokenRecord( int id )
{
super( id );
}
@Override
protected String simpleName()
{
return "Label";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_LabelTokenRecord.java
|
1,201
|
public class InvalidRecordException extends StoreFailureException
{
public InvalidRecordException( String msg )
{
super( msg );
}
public InvalidRecordException( Throwable cause )
{
super( cause );
}
public InvalidRecordException( String msg, Throwable cause )
{
super( msg, cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_InvalidRecordException.java
|
1,202
|
public class InvalidIdGeneratorException extends StoreFailureException
{
public InvalidIdGeneratorException( String msg )
{
super( msg );
}
public InvalidIdGeneratorException( Throwable cause )
{
super( cause );
}
public InvalidIdGeneratorException( String msg, Throwable cause )
{
super( msg, cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_InvalidIdGeneratorException.java
|
1,203
|
public class IndexRule extends AbstractSchemaRule
{
private static final long NO_OWNING_CONSTRAINT = -1;
private final SchemaIndexProvider.Descriptor providerDescriptor;
private final int propertyKey;
/**
* Non-null for constraint indexes, equal to {@link #NO_OWNING_CONSTRAINT} for
* constraint indexes with no owning constraint record.
*/
private final Long owningConstraint;
static IndexRule readIndexRule( long id, boolean constraintIndex, int label, ByteBuffer serialized )
{
SchemaIndexProvider.Descriptor providerDescriptor = readProviderDescriptor( serialized );
int propertyKeyId = readPropertyKey( serialized );
if ( constraintIndex )
{
long owningConstraint = readOwningConstraint( serialized );
return constraintIndexRule( id, label, propertyKeyId, providerDescriptor, owningConstraint );
}
else
{
return indexRule( id, label, propertyKeyId, providerDescriptor );
}
}
public static IndexRule indexRule( long id, int label, int propertyKeyId,
SchemaIndexProvider.Descriptor providerDescriptor )
{
return new IndexRule( id, label, propertyKeyId, providerDescriptor, null );
}
public static IndexRule constraintIndexRule( long id, int label, int propertyKeyId,
SchemaIndexProvider.Descriptor providerDescriptor,
Long owningConstraint )
{
return new IndexRule( id, label, propertyKeyId, providerDescriptor,
owningConstraint == null ? NO_OWNING_CONSTRAINT : owningConstraint );
}
private IndexRule( long id, int label, int propertyKey, SchemaIndexProvider.Descriptor providerDescriptor,
Long owningConstraint )
{
super( id, label, indexKind( owningConstraint ) );
this.owningConstraint = owningConstraint;
if ( providerDescriptor == null )
{
throw new IllegalArgumentException( "null provider descriptor prohibited" );
}
this.providerDescriptor = providerDescriptor;
this.propertyKey = propertyKey;
}
private static Kind indexKind( Long owningConstraint )
{
return owningConstraint == null ? Kind.INDEX_RULE : Kind.CONSTRAINT_INDEX_RULE;
}
private static SchemaIndexProvider.Descriptor readProviderDescriptor( ByteBuffer serialized )
{
String providerKey = getDecodedStringFrom( serialized );
String providerVersion = getDecodedStringFrom( serialized );
return new SchemaIndexProvider.Descriptor( providerKey, providerVersion );
}
private static int readPropertyKey( ByteBuffer serialized )
{
// Currently only one key is supported although the data format supports multiple
int count = serialized.getShort();
assert count == 1;
// Changed from being a long to an int 2013-09-10, but keeps reading a long to not change the store format.
return safeCastLongToInt( serialized.getLong() );
}
private static long readOwningConstraint( ByteBuffer serialized )
{
return serialized.getLong();
}
public SchemaIndexProvider.Descriptor getProviderDescriptor()
{
return providerDescriptor;
}
public int getPropertyKey()
{
return propertyKey;
}
public boolean isConstraintIndex()
{
return owningConstraint != null;
}
public Long getOwningConstraint()
{
if ( !isConstraintIndex() )
{
throw new IllegalStateException( "Can only get owner from constraint indexes." );
}
if ( owningConstraint == NO_OWNING_CONSTRAINT )
{
return null;
}
return owningConstraint;
}
@Override
public int length()
{
return super.length()
+ UTF8.computeRequiredByteBufferSize( providerDescriptor.getKey() )
+ UTF8.computeRequiredByteBufferSize( providerDescriptor.getVersion() )
+ 2 * 1 /* number of property keys, for now always 1 */
+ 8 /* the property keys */
+ (isConstraintIndex() ? 8 : 0) /* constraint indexes have an owner field */;
}
@Override
public void serialize( ByteBuffer target )
{
super.serialize( target );
UTF8.putEncodedStringInto( providerDescriptor.getKey(), target );
UTF8.putEncodedStringInto( providerDescriptor.getVersion(), target );
target.putShort( (short) 1 /*propertyKeys.length*/ );
target.putLong( propertyKey );
if ( isConstraintIndex() )
{
target.putLong( owningConstraint );
}
}
@Override
public int hashCode()
{
// TODO: Think if this needs to be extended with providerDescriptor
return 31 * super.hashCode() + propertyKey;
}
@Override
public boolean equals( Object obj )
{
if ( this == obj )
{
return true;
}
if ( !super.equals( obj ) )
{
return false;
}
if ( getClass() != obj.getClass() )
{
return false;
}
IndexRule other = (IndexRule) obj;
return propertyKey == other.propertyKey;
}
@Override
protected String innerToString()
{
StringBuilder result = new StringBuilder( ", provider=" ).append( providerDescriptor ).append( ", properties=" )
.append( propertyKey );
if ( owningConstraint != null )
{
result.append( ", owner=" );
if ( owningConstraint == -1 )
{
result.append( "<not set>" );
}
else
{
result.append( owningConstraint );
}
}
return result.toString();
}
public IndexRule withOwningConstraint( long constraintId )
{
if ( !isConstraintIndex() )
{
throw new IllegalStateException( this + " is not a constraint index" );
}
return constraintIndexRule( getId(), getLabel(), getPropertyKey(), getProviderDescriptor(), constraintId );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_IndexRule.java
|
1,204
|
EUROPEAN( 9, 7 )
{
@Override
char decTranslate( byte codePoint )
{
if ( codePoint < 0x40 )
{
if ( codePoint == 0x17 ) return '.';
if ( codePoint == 0x37 ) return '-';
return (char) ( codePoint + 0xC0 );
}
else
{
if ( codePoint == 0x40 ) return ' ';
if ( codePoint == 0x60 ) return '_';
if ( codePoint >= 0x5B && codePoint < 0x60 ) return (char) ( '0' + codePoint - 0x5B );
if ( codePoint >= 0x7B && codePoint < 0x80 ) return (char) ( '5' + codePoint - 0x7B );
return (char) codePoint;
}
}
@Override
int encPunctuation( byte b )
{
switch ( b )
{
case 0x00:
return 0x40; // SPACE
case 0x01:
return 0x60; // UNDERSCORE
case 0x02:
return 0x17; // DOT
case 0x03:
return 0x37; // DASH
case 0x07:
// TODO
return 0;
default:
throw cannotEncode( b );
}
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_LongerShortString.java
|
1,205
|
class MappedPersistenceWindow extends LockableWindow
{
private long position = -1;
private Buffer buffer = null;
private int windowSize = -1;
private int recordSize = -1;
private int totalSize = -1;
MappedPersistenceWindow( long position, int recordSize, int totalSize,
StoreChannel channel, FileChannel.MapMode mapMode )
{
super( channel );
assert recordSize > 0 : "Record size[" + recordSize
+ "] must be greater then zero";
assert totalSize >= recordSize : "Total size[" + totalSize
+ "] cannot be less than record size[" + recordSize + "]";
assert totalSize >= recordSize : "Total size[" + totalSize
+ "] must mod to zero with record size[" + recordSize + "]";
this.totalSize = totalSize;
windowSize = totalSize / recordSize;
this.recordSize = recordSize;
this.position = position;
try
{
buffer = new Buffer( this, channel.map( mapMode,
position * recordSize, totalSize ) );
// buffer.setByteBuffer( channel.map( mapMode,
// position * recordSize, totalSize ) );
}
catch ( IOException e )
{
this.position = -1;
throw new MappedMemException( "Unable to map pos=" + position +
" recordSize=" + recordSize + " totalSize=" + totalSize, e );
}
}
@Override
public Buffer getBuffer()
{
return buffer;
}
@Override
public int getRecordSize()
{
return recordSize;
}
@Override
public long position()
{
return position;
}
@Override
public int size()
{
return windowSize;
}
@Override
public void force()
{
((MappedByteBuffer) buffer.getBuffer()).force();
}
@Override
public boolean equals( Object o )
{
if ( !(o instanceof MappedPersistenceWindow) )
{
return false;
}
return position() == ((MappedPersistenceWindow) o).position();
}
private volatile int hashCode = 0;
@Override
public int hashCode()
{
if ( hashCode == 0 )
{
hashCode = (int) position();
}
return hashCode;
}
@Override
public String toString()
{
return "MappedPersistenceWindow[p=" + position + ",rs=" + recordSize
+ ",ws=" + windowSize + ",ts=" + totalSize + "]";
}
@Override
public synchronized void close()
{
buffer.close();
position = -1;
closed = true;
}
@Override
public Buffer getOffsettedBuffer( long id )
{
int offset = (int) (id - buffer.position()) * recordSize;
try
{
buffer.setOffset( offset );
return buffer;
} catch(IllegalArgumentException e)
{
throw new IllegalArgumentException( "Unable to set offset. id:" + id + ", position:" + buffer.position()
+ ", recordSize:" + recordSize, e );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_MappedPersistenceWindow.java
|
1,206
|
{
@Override
public void run()
{
try
{
// We will lock this row, but we must wait for theTriggeringOne to write to it
waitForFirstWriterToWrite();
/*
* This would deadlock, since theTriggeringOne waits for us to grab it before releasing, and
* we cannot grab it unless theTriggeringOne releases it. This is broken by the external
* breakpoint on PersistenceRow.lock()
*/
PersistenceRow row = (PersistenceRow) pool.acquire( 1, OperationType.READ );
// And we allow theOverwrittenOne to refresh bricks and read in the memory mapped buffer
theBreakingOneHasLockedTheRow.countDown();
// Wait for it to write
theOverwrittenOneHasWrittenItsChanges.await();
// And we broke it, since releasing this row will overwrite whatever theOverwrittenOne wrote
pool.release( row );
}
catch( Exception e)
{
throw new RuntimeException( e );
}
}
});
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRowAndWindowDirtyWriteIT.java
|
1,207
|
public class MappedPersistenceWindowTest
{
private static final TargetDirectory target = TargetDirectory.forTest( MappedPersistenceWindowTest.class );
@Rule
public final TargetDirectory.TestDirectory directory = target.testDirectory();
@Rule
public final ResourceCollection resources = new ResourceCollection();
@Test
public void shouldCloseUnusedWindow() throws Exception
{
// given
String filename = new File( directory.directory(), "mapped.file" ).getAbsolutePath();
RandomAccessFile file = resources.add( new RandomAccessFile( filename, "rw" ) );
StoreChannel channel = new StoreFileChannel( file.getChannel() );
MappedPersistenceWindow window = new MappedPersistenceWindow( 0, 8, 16, channel, READ_WRITE );
// when
boolean wasClosed = window.writeOutAndCloseIfFree( false );
file.close();
// then
assertTrue( wasClosed );
}
@Test
public void shouldNotCloseMarkedWindow() throws Exception
{
// given
String filename = new File( directory.directory(), "mapped.file" ).getAbsolutePath();
RandomAccessFile file = resources.add( new RandomAccessFile( filename, "rw" ) );
StoreChannel channel = new StoreFileChannel( file.getChannel() );
MappedPersistenceWindow window = new MappedPersistenceWindow( 0, 8, 16, channel, READ_WRITE );
window.markAsInUse();
// when
boolean wasClosed = window.writeOutAndCloseIfFree( false );
file.close();
// then
assertFalse( wasClosed );
}
@Test
public void shouldNotCloseLockedWindow() throws Exception
{
// given
String filename = new File( directory.directory(), "mapped.file" ).getAbsolutePath();
RandomAccessFile file = resources.add( new RandomAccessFile( filename, "rw" ) );
StoreChannel channel = new StoreFileChannel( file.getChannel() );
final MappedPersistenceWindow window = new MappedPersistenceWindow( 0, 8, 16, channel, READ_WRITE );
window.markAsInUse();
OtherThreadExecutor<Void> executor = new OtherThreadExecutor<>( "other thread", null );
executor.execute( new WorkerCommand<Void, Void>()
{
@Override
public Void doWork( Void state )
{
window.lock( OperationType.WRITE );
return null;
}
} );
// when
boolean wasClosed = window.writeOutAndCloseIfFree( false );
file.close();
// then
assertFalse( wasClosed );
executor.close();
}
@Test
public void shouldCloseReleasedWindow() throws Exception
{
// given
String filename = new File( directory.directory(), "mapped.file" ).getAbsolutePath();
RandomAccessFile file = resources.add( new RandomAccessFile( filename, "rw" ) );
StoreChannel channel = new StoreFileChannel( file.getChannel() );
MappedPersistenceWindow window = new MappedPersistenceWindow( 0, 8, 16, channel, READ_WRITE );
window.markAsInUse();
window.lock( OperationType.WRITE );
window.unLock();
// when
boolean wasClosed = window.writeOutAndCloseIfFree( false );
file.close();
// then
assertTrue( wasClosed );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_MappedPersistenceWindowTest.java
|
1,208
|
{
@Override
public void run()
{
/*
* When we attempt to grab a brick for the first time, we always immediately memory map it if there is
* enough memory. But, we don't want to grab a window for position 1. We need a row. So, we ask for
* a position that is in the second brick of the pool (we create two in total, the first has our
* contended position, the other is the dummy we create now). That will cause a copy of the brick
* array which will get rid of the mapping for position 1.
* Look into PersistenceWindowPool.acquire() and PWP.expandBricks() for a better understanding.
*/
pool.release( pool.acquire( 13, OperationType.READ ) );
/*
* For the bug to be triggered, we'll need the position to be memory mapped. That happens only if
* enough hits have happened on the brick that covers that position. We will ask for that position
* 3 times in this test, the third of which must return a window and the first two a row. We set things
* up so that this happens here
*/
for ( int i = 0; i < PersistenceWindowPool.REFRESH_BRICK_COUNT - 3; i++ )
{
pool.release( pool.acquire( 1, OperationType.READ ) );
}
// This will grab and write lock a row, marking it as dirty
PersistenceRow row = (PersistenceRow) pool.acquire( 1, OperationType.WRITE );
row.getOffsettedBuffer( 1 ).put( new byte[]{1, 2, 3, 4} );
// Do not release, theEvilOne must mark it first
waitForBreakingToAcquire();
// Release, now it's in the hands of theEvilOne
pool.release( row );
// done
}
});
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRowAndWindowDirtyWriteIT.java
|
1,209
|
@RunWith(SubProcessTestRunner.class)
@ForeignBreakpoints({
@ForeignBreakpoints.BreakpointDef(type = "org.neo4j.kernel.impl.nioneo.store.PersistenceRow",
method = "lock", on = BreakPoint.Event.ENTRY ) })
/*
* If this test hangs, it's because a deadlock has happened between the threads. If, for example, the refreshBricks
* does not happen in theOverwrittenOne then it will deadlock with the evil one. Look for that sort of thing if
* this test starts breaking.
*/
@Ignore("Fix pending")
public class PersistenceRowAndWindowDirtyWriteIT
{
@Test
@EnabledBreakpoints( { "waitForFirstWriterToWrite", "waitForBreakingToAcquire" } )
public void theTest() throws Exception
{
/*
* If you want to change or fix this test, you sort of have to read what follows.
*
* I can hear you, exasperated, giving up all hope, thinking "Oh noes! Another one of those tests".
* I know.
* I understand.
* I sympathise.
* After all, I wrote it.
* So let me, dear reader, to try and show you what is going on here.
* The bug is the following: A thread, let's call it theTriggeringOne, will read in a record through a
* PersistentRow. It locks it, it writes stuff to it. In the mean time, before theTriggeringOne releases the lock,
* another thread, we name it affectionately theEvilOne, will try to *read* (hence, no locks on the object)
* the same record. It, too, will attempt to go to the same PersistentRow, stored now in the row map in
* PersistenceWindowPool. It will mark it, but not lock it, because theTriggeringOne still holds the lock. Now,
* theTriggeringOne releases the lock. It does not write out the changes though, because well, theEvilOne has it
* already marked.
* A third thread comes in now, which we'll call theOverwrittenOne. That one grabs a brick for the same position,
* but in doing so it triggers a refresh of the bricks (actually, any thread could have done that, but this way
* we don't need to introduce a 4th thread). That leave the contended file position being now memory mapped. Note
* how we are suddenly in a world of crap, since we have one PersistenceWindow *and* a PersistenceRow, which
* btw is dirty, for the same file position. I assume you can guess the rest. theOverwrittenOne will write
* something, release and flush it, and theEvilOne will finish its read operation right after that, release
* the row and then its contents will be written out, overwriting theOverwrittenOne's changes.
*
* Now, the test below reproduces the exact scenario above. The thread names are the same. Synchronization is
* achieved mostly through latches. There is one point though where an external breakpoint is required, and
* that is between theEvilOne marks and attempts to lock the row that theTriggeringOne already has locked.
* The breakpoint is added on PersistenceRow.lock() and what it does is it resumes theEvilOne which is
* suspended by theTriggeringOne on its startup. That's what the 3 breakpoint handlers do - one is for suspending
* theEvilOne on startup, the other is for pausing theTriggeringOne before releasing the lock and the
* lock handler allows theTriggeringOne to continue and release the lock when theEvilOne marks the row.
*
* The rest is details explained inline.
*/
File dataFile = TargetDirectory.forTest( getClass() ).file( "dataFile" );
StoreChannel dataFileChannel = new StoreFileChannel( new RandomAccessFile( dataFile, "rw" ).getChannel() );
final PersistenceWindowPool pool =
new PersistenceWindowPool( dataFile, 4, // record size
dataFileChannel,
50000, // memory available, must be at least that big for 2 windows to exist
true, // memory map?
false, // read only?
new ConcurrentHashMap<Long, PersistenceRow>(), BrickElementFactory.DEFAULT,
StringLogger.DEV_NULL );
Thread theTriggeringOne = new Thread( new Runnable()
{
@Override
public void run()
{
/*
* When we attempt to grab a brick for the first time, we always immediately memory map it if there is
* enough memory. But, we don't want to grab a window for position 1. We need a row. So, we ask for
* a position that is in the second brick of the pool (we create two in total, the first has our
* contended position, the other is the dummy we create now). That will cause a copy of the brick
* array which will get rid of the mapping for position 1.
* Look into PersistenceWindowPool.acquire() and PWP.expandBricks() for a better understanding.
*/
pool.release( pool.acquire( 13, OperationType.READ ) );
/*
* For the bug to be triggered, we'll need the position to be memory mapped. That happens only if
* enough hits have happened on the brick that covers that position. We will ask for that position
* 3 times in this test, the third of which must return a window and the first two a row. We set things
* up so that this happens here
*/
for ( int i = 0; i < PersistenceWindowPool.REFRESH_BRICK_COUNT - 3; i++ )
{
pool.release( pool.acquire( 1, OperationType.READ ) );
}
// This will grab and write lock a row, marking it as dirty
PersistenceRow row = (PersistenceRow) pool.acquire( 1, OperationType.WRITE );
row.getOffsettedBuffer( 1 ).put( new byte[]{1, 2, 3, 4} );
// Do not release, theEvilOne must mark it first
waitForBreakingToAcquire();
// Release, now it's in the hands of theEvilOne
pool.release( row );
// done
}
});
final CountDownLatch theOverwrittenOneHasWrittenItsChanges = new CountDownLatch( 1 );
final CountDownLatch theBreakingOneHasLockedTheRow = new CountDownLatch( 1 );
Thread theEvilOne = new Thread( new Runnable()
{
@Override
public void run()
{
try
{
// We will lock this row, but we must wait for theTriggeringOne to write to it
waitForFirstWriterToWrite();
/*
* This would deadlock, since theTriggeringOne waits for us to grab it before releasing, and
* we cannot grab it unless theTriggeringOne releases it. This is broken by the external
* breakpoint on PersistenceRow.lock()
*/
PersistenceRow row = (PersistenceRow) pool.acquire( 1, OperationType.READ );
// And we allow theOverwrittenOne to refresh bricks and read in the memory mapped buffer
theBreakingOneHasLockedTheRow.countDown();
// Wait for it to write
theOverwrittenOneHasWrittenItsChanges.await();
// And we broke it, since releasing this row will overwrite whatever theOverwrittenOne wrote
pool.release( row );
}
catch( Exception e)
{
throw new RuntimeException( e );
}
}
});
Thread theOverwrittenOne = new Thread( new Runnable()
{
@Override
public void run()
{
try
{
// Wait for theEvilOne to grab the lock on the row
theBreakingOneHasLockedTheRow.await();
/*
* Because of the setup theTriggeringOne did, this will do a refreshBricks() and read in a
* LockableWindow instead of a PersistenceRow.
*/
LockableWindow window = (LockableWindow) pool.acquire( 1, OperationType.WRITE );
// Write the new stuff in - that will be overwritten by the flush when theEvilOne releases
window.getOffsettedBuffer( 1 ).put( new byte[]{5, 6, 7, 8} );
// Release the lock - not really necessary, just good form
pool.release( window );
// Allow theEvilOne to continue
theOverwrittenOneHasWrittenItsChanges.countDown();
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
});
theEvilOne.start();
theOverwrittenOne.start();
theTriggeringOne.start();
theEvilOne.join();
theTriggeringOne.join();
theOverwrittenOne.join();
byte[] finalResult = new byte[4];
pool.acquire( 1, OperationType.READ ).getOffsettedBuffer( 1 ).get( finalResult );
/*
* This is the assertion. The content should be the ones that theOverwrittenOne wrote, as it locked position
* 1 after theTriggeringOne had released it. All high level locks have been respected, but still the thread
* that has happened-before the last lock grab for position 1 is applied last.
*/
assertTrue( Arrays.toString( finalResult ), Arrays.equals( new byte[]{5, 6, 7, 8}, finalResult ) );
pool.close();
}
// Debug stuff
private static DebuggedThread theTriggeringOne;
private static DebuggedThread theEvilOne;
@BreakpointTrigger("waitForFirstWriterToWrite")
public void waitForFirstWriterToWrite()
{}
@BreakpointHandler("waitForFirstWriterToWrite")
public static void waitForFirstWriterToWriteHandler( BreakPoint self, DebugInterface di )
{
theEvilOne = di.thread().suspend( null );
self.disable();
}
@BreakpointTrigger("waitForBreakingToAcquire")
public void waitForBreakingToAcquire()
{}
@BreakpointHandler("waitForBreakingToAcquire")
public static void waitForBreakingToAcquireHandler( BreakPoint self, DebugInterface di,
@BreakpointHandler("lock") BreakPoint onWindowLock )
{
theEvilOne.resume();
theTriggeringOne = di.thread().suspend( null );
onWindowLock.enable();
self.disable();
}
@BreakpointHandler( "lock" )
public static void lockHandler( BreakPoint self, DebugInterface di )
{
theTriggeringOne.resume();
self.disable();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRowAndWindowDirtyWriteIT.java
|
1,210
|
DIRTY
{
@Override
State transition( OperationType operationType, PersistenceRow persistenceRow )
{
return DIRTY;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRow.java
|
1,211
|
CLEAN
{
@Override
State transition( OperationType operationType, PersistenceRow persistenceRow )
{
switch ( operationType)
{
case READ:
return CLEAN;
case WRITE:
return DIRTY;
default:
throw new IllegalStateException( "Unknown operation type: " + operationType );
}
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRow.java
|
1,212
|
EMPTY
{
@Override
State transition( OperationType operationType, PersistenceRow persistenceRow )
{
switch ( operationType)
{
case READ:
persistenceRow.readFullWindow();
return CLEAN;
case WRITE:
return DIRTY;
default:
throw new IllegalStateException( "Unknown operation type: " + operationType );
}
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRow.java
|
1,213
|
public class PersistenceRow extends LockableWindow
{
private State bufferState = State.EMPTY;
private int recordSize = -1;
private final long position;
private final Buffer buffer;
PersistenceRow( long position, int recordSize, StoreChannel channel )
{
super( channel );
assert position >= 0 : "Illegal position[" + position + "]";
assert recordSize > 0 : "Illegal recordSize[" + recordSize + "]";
assert channel != null : "Null file channel";
this.position = position;
this.recordSize = recordSize;
this.buffer = new Buffer( this, ByteBuffer.allocate( recordSize ) );
markAsInUse();
}
@Override
void lock( OperationType operationType )
{
super.lock( operationType );
boolean success = false;
try
{
bufferState = bufferState.transition( operationType, this );
success = true;
}
finally
{
if ( !success )
{
unLock();
}
}
}
public boolean isDirty()
{
return bufferState == State.DIRTY;
}
@Override
public Buffer getBuffer()
{
return buffer;
}
@Override
public int getRecordSize()
{
return recordSize;
}
@Override
public Buffer getOffsettedBuffer( long id )
{
if ( id != buffer.position() )
{
throw new InvalidRecordException( "Id[" + id +
"] not equal to buffer position[" + buffer.position() + "]" );
}
return buffer;
}
@Override
public long position()
{
return position;
}
private static enum State
{
EMPTY
{
@Override
State transition( OperationType operationType, PersistenceRow persistenceRow )
{
switch ( operationType)
{
case READ:
persistenceRow.readFullWindow();
return CLEAN;
case WRITE:
return DIRTY;
default:
throw new IllegalStateException( "Unknown operation type: " + operationType );
}
}
},
CLEAN
{
@Override
State transition( OperationType operationType, PersistenceRow persistenceRow )
{
switch ( operationType)
{
case READ:
return CLEAN;
case WRITE:
return DIRTY;
default:
throw new IllegalStateException( "Unknown operation type: " + operationType );
}
}
},
DIRTY
{
@Override
State transition( OperationType operationType, PersistenceRow persistenceRow )
{
return DIRTY;
}
};
abstract State transition( OperationType operationType, PersistenceRow persistenceRow );
}
void readFullWindow()
{
try
{
ByteBuffer byteBuffer = buffer.getBuffer();
byteBuffer.clear();
getFileChannel().read( byteBuffer, position * recordSize );
byteBuffer.clear();
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to load position["
+ position + "] @[" + position * recordSize + "]", e );
}
}
@Override
synchronized void setClean()
{
super.setClean();
bufferState = State.CLEAN;
}
private void writeContents()
{
if ( isDirty() )
{
ByteBuffer byteBuffer = buffer.getBuffer().duplicate();
byteBuffer.clear();
try
{
int written = 0;
while ( byteBuffer.hasRemaining() )
{
int writtenThisTime = getFileChannel().write( byteBuffer, position * recordSize + written );
if ( writtenThisTime == 0 )
{
throw new IOException( "Unable to write to disk, reported bytes written was 0" );
}
written += writtenThisTime;
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to write record["
+ position + "] @[" + position * recordSize + "]", e );
}
setClean();
}
}
@Override
public int size()
{
return 1;
}
@Override
public void force()
{
writeContents();
}
@Override
public boolean equals( Object o )
{
if ( !(o instanceof PersistenceRow) )
{
return false;
}
return position() == ((PersistenceRow) o).position();
}
@Override
public int hashCode()
{
return (int) this.position;
}
@Override
public String toString()
{
return "PersistenceRow[" + position + "]";
}
@Override
public synchronized void close()
{
buffer.close();
closed = true;
}
public void reset()
{
buffer.reset();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_PersistenceRow.java
|
1,214
|
public class NotCurrentStoreVersionException extends StoreFailureException
{
private final boolean possibleToAutomaticallyUpgrade;
public NotCurrentStoreVersionException( String expectedVersion, String foundVersion, String msg,
boolean possibleToAutomaticallyUpgrade )
{
super( String.format( "Was expecting store version [%s] but found [%s]. Store %s be upgraded automatically. ",
expectedVersion, foundVersion, possibleToAutomaticallyUpgrade ? "can" : "cannot") + msg );
this.possibleToAutomaticallyUpgrade = possibleToAutomaticallyUpgrade;
}
public boolean possibleToAutomaticallyUpgrade()
{
return possibleToAutomaticallyUpgrade;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NotCurrentStoreVersionException.java
|
1,215
|
public class NodeStoreTest
{
@Test
public void shouldReadFirstFromSingleRecordDynamicLongArray() throws Exception
{
// GIVEN
Long expectedId = 12l;
long[] ids = new long[] { expectedId, 23l, 42l };
DynamicRecord firstRecord = new DynamicRecord( 0l );
List<DynamicRecord> dynamicRecords = asList( firstRecord );
allocateFromNumbers( ids, dynamicRecords.iterator(), new PreAllocatedRecords( 60 ) );
// WHEN
Long firstId = readOwnerFromDynamicLabelsRecord( firstRecord );
// THEN
assertEquals( expectedId, firstId );
}
@Test
public void shouldReadFirstAsNullFromEmptyDynamicLongArray() throws Exception
{
// GIVEN
Long expectedId = null;
long[] ids = new long[] { };
DynamicRecord firstRecord = new DynamicRecord( 0l );
List<DynamicRecord> dynamicRecords = asList( firstRecord );
allocateFromNumbers( ids, dynamicRecords.iterator(), new PreAllocatedRecords( 60 ) );
// WHEN
Long firstId = readOwnerFromDynamicLabelsRecord( firstRecord );
// THEN
assertEquals( expectedId, firstId );
}
@Test
public void shouldReadFirstFromTwoRecordDynamicLongArray() throws Exception
{
// GIVEN
Long expectedId = 12l;
long[] ids = new long[] { expectedId, 1l, 2l, 3l, 4l, 5l, 6l, 7l, 8l, 9l, 10l, 11l };
DynamicRecord firstRecord = new DynamicRecord( 0l );
List<DynamicRecord> dynamicRecords = asList( firstRecord, new DynamicRecord( 1l ) );
allocateFromNumbers( ids, dynamicRecords.iterator(), new PreAllocatedRecords( 8 ) );
// WHEN
Long firstId = readOwnerFromDynamicLabelsRecord( firstRecord );
// THEN
assertEquals( expectedId, firstId );
}
@Test
public void shouldCombineProperFiveByteLabelField() throws Exception
{
// GIVEN
// -- a store
EphemeralFileSystemAbstraction fs = new EphemeralFileSystemAbstraction();
Config config = new Config();
IdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory();
WindowPoolFactory windowPoolFactory = new DefaultWindowPoolFactory();
StoreFactory factory = new StoreFactory( config, idGeneratorFactory, windowPoolFactory, fs, DEV_NULL, new DefaultTxHook() );
File nodeStoreFileName = new File( "nodestore" );
factory.createNodeStore( nodeStoreFileName );
NodeStore nodeStore = factory.newNodeStore( nodeStoreFileName );
// -- a record with the msb carrying a negative value
long nodeId = 0, labels = 0x8000000001L;
NodeRecord record = new NodeRecord( nodeId, NO_NEXT_RELATIONSHIP.intValue(), NO_NEXT_PROPERTY.intValue() );
record.setInUse( true );
record.setLabelField( labels, Collections.<DynamicRecord>emptyList() );
nodeStore.updateRecord( record );
// WHEN
// -- reading that record back
NodeRecord readRecord = nodeStore.getRecord( nodeId );
// THEN
// -- the label field must be the same
assertEquals( labels, readRecord.getLabelField() );
// CLEANUP
nodeStore.close();
fs.shutdown();
}
@Test
public void shouldKeepRecordLightWhenSettingLabelFieldWithoutDynamicRecords() throws Exception
{
// GIVEN
NodeRecord record = new NodeRecord( 0, NO_NEXT_RELATIONSHIP.intValue(), NO_NEXT_PROPERTY.intValue() );
// WHEN
record.setLabelField( 0, Collections.<DynamicRecord>emptyList() );
// THEN
assertTrue( record.isLight() );
}
@Test
public void shouldMarkRecordHeavyWhenSettingLabelFieldWithDynamicRecords() throws Exception
{
// GIVEN
NodeRecord record = new NodeRecord( 0, NO_NEXT_RELATIONSHIP.intValue(), NO_NEXT_PROPERTY.intValue() );
// WHEN
DynamicRecord dynamicRecord = new DynamicRecord( 1 );
record.setLabelField( 0x8000000001L, asList( dynamicRecord ) );
// THEN
assertFalse( record.isLight() );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_NodeStoreTest.java
|
1,216
|
public static abstract class Configuration
extends AbstractStore.Configuration
{
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NodeStore.java
|
1,217
|
public class NodeStore extends AbstractRecordStore<NodeRecord> implements Store
{
public static Long readOwnerFromDynamicLabelsRecord( DynamicRecord record )
{
byte[] data = record.getData();
byte[] header = PropertyType.ARRAY.readDynamicRecordHeader( data );
byte[] array = Arrays.copyOfRange( data, header.length, data.length );
int requiredBits = header[2];
if ( requiredBits == 0 )
{
return null;
}
Bits bits = Bits.bitsFromBytes( array );
return bits.getLong( requiredBits );
}
public static abstract class Configuration
extends AbstractStore.Configuration
{
}
public static final String TYPE_DESCRIPTOR = "NodeStore";
// in_use(byte)+next_rel_id(int)+next_prop_id(int)+labels(5)
public static final int RECORD_SIZE = 14;
private DynamicArrayStore dynamicLabelStore;
public NodeStore(File fileName, Config config,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger,
DynamicArrayStore dynamicLabelStore )
{
super(fileName, config, IdType.NODE, idGeneratorFactory, windowPoolFactory, fileSystemAbstraction, stringLogger);
this.dynamicLabelStore = dynamicLabelStore;
}
@Override
public <FAILURE extends Exception> void accept( Processor<FAILURE> processor, NodeRecord record ) throws FAILURE
{
processor.processNode( this, record );
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
@Override
public int getRecordSize()
{
return RECORD_SIZE;
}
@Override
public int getRecordHeaderSize()
{
return getRecordSize();
}
public void ensureHeavy( NodeRecord node )
{
parseLabelsField( node ).ensureHeavy( this );
}
public void ensureHeavy( NodeRecord node, long firstDynamicLabelRecord )
{
if ( !node.isLight() )
{
return;
}
// Load any dynamic labels and populate the node record
node.setLabelField( node.getLabelField(), dynamicLabelStore.getRecords( firstDynamicLabelRecord ) );
}
@Override
public NodeRecord getRecord( long id )
{
PersistenceWindow window = acquireWindow( id, OperationType.READ );
try
{
return getRecord( id, window, RecordLoad.NORMAL );
}
finally
{
releaseWindow( window );
}
}
@Override
public NodeRecord forceGetRecord( long id )
{
PersistenceWindow window;
try
{
window = acquireWindow( id, OperationType.READ );
}
catch ( InvalidRecordException e )
{
return new NodeRecord( id, Record.NO_NEXT_RELATIONSHIP.intValue(), Record.NO_NEXT_PROPERTY.intValue() ); // inUse=false by default
}
try
{
return getRecord( id, window, RecordLoad.FORCE );
}
finally
{
releaseWindow( window );
}
}
@Override
public NodeRecord forceGetRaw( NodeRecord record )
{
return record;
}
@Override
public NodeRecord forceGetRaw( long id )
{
return forceGetRecord( id );
}
@Override
public void forceUpdateRecord( NodeRecord record )
{
PersistenceWindow window = acquireWindow( record.getId(),
OperationType.WRITE );
try
{
updateRecord( record, window, true );
}
finally
{
releaseWindow( window );
}
}
@Override
public void updateRecord( NodeRecord record )
{
PersistenceWindow window = acquireWindow( record.getId(),
OperationType.WRITE );
try
{
updateRecord( record, window, false );
}
finally
{
releaseWindow( window );
}
}
public NodeRecord loadLightNode( long id )
{
PersistenceWindow window;
try
{
window = acquireWindow( id, OperationType.READ );
}
catch ( InvalidRecordException e )
{
// ok id to high
return null;
}
try
{
return getRecord( id, window, RecordLoad.CHECK );
}
finally
{
releaseWindow( window );
}
}
private NodeRecord getRecord( long id, PersistenceWindow window,
RecordLoad load )
{
Buffer buffer = window.getOffsettedBuffer( id );
// [ , x] in use bit
// [ ,xxx ] higher bits for rel id
// [xxxx, ] higher bits for prop id
long inUseByte = buffer.get();
boolean inUse = (inUseByte & 0x1) == Record.IN_USE.intValue();
if ( !inUse )
{
switch ( load )
{
case NORMAL:
throw new InvalidRecordException( "NodeRecord[" + id + "] not in use" );
case CHECK:
return null;
case FORCE:
break;
}
}
long nextRel = buffer.getUnsignedInt();
long nextProp = buffer.getUnsignedInt();
long relModifier = (inUseByte & 0xEL) << 31;
long propModifier = (inUseByte & 0xF0L) << 28;
long lsbLabels = buffer.getUnsignedInt();
long hsbLabels = buffer.get() & 0xFF; // so that a negative bye won't fill the "extended" bits with ones.
long labels = lsbLabels | (hsbLabels << 32);
NodeRecord nodeRecord = new NodeRecord( id, longFromIntAndMod( nextRel, relModifier ),
longFromIntAndMod( nextProp, propModifier ) );
nodeRecord.setInUse( inUse );
nodeRecord.setLabelField( labels, Collections.<DynamicRecord>emptyList() );
return nodeRecord;
}
private void updateRecord( NodeRecord record, PersistenceWindow window, boolean force )
{
long id = record.getId();
registerIdFromUpdateRecord( id );
Buffer buffer = window.getOffsettedBuffer( id );
if ( record.inUse() || force )
{
long nextRel = record.getNextRel();
long nextProp = record.getNextProp();
short relModifier = nextRel == Record.NO_NEXT_RELATIONSHIP.intValue() ? 0 : (short)((nextRel & 0x700000000L) >> 31);
short propModifier = nextProp == Record.NO_NEXT_PROPERTY.intValue() ? 0 : (short)((nextProp & 0xF00000000L) >> 28);
// [ , x] in use bit
// [ ,xxx ] higher bits for rel id
// [xxxx, ] higher bits for prop id
short inUseUnsignedByte = ( record.inUse() ? Record.IN_USE : Record.NOT_IN_USE ).byteValue();
inUseUnsignedByte = (short) ( inUseUnsignedByte | relModifier | propModifier );
buffer.put( (byte) inUseUnsignedByte ).putInt( (int) nextRel ).putInt( (int) nextProp );
// lsb of labels
long labelField = record.getLabelField();
buffer.putInt( (int) labelField );
// msb of labels
buffer.put( (byte) ((labelField&0xFF00000000L) >> 32) );
}
else
{
buffer.put( Record.NOT_IN_USE.byteValue() );
if ( !isInRecoveryMode() )
{
freeId( id );
}
}
}
@Override
public List<WindowPoolStats> getAllWindowPoolStats()
{
List<WindowPoolStats> list = new ArrayList<>();
list.add( getWindowPoolStats() );
return list;
}
public DynamicArrayStore getDynamicLabelStore()
{
return dynamicLabelStore;
}
@Override
protected void closeStorage()
{
if ( dynamicLabelStore != null )
{
dynamicLabelStore.close();
dynamicLabelStore = null;
}
}
public Collection<DynamicRecord> allocateRecordsForDynamicLabels( long nodeId, long[] labels )
{
return allocateRecordsForDynamicLabels( nodeId, labels, Collections.<DynamicRecord>emptyList().iterator() );
}
public Collection<DynamicRecord> allocateRecordsForDynamicLabels( long nodeId, long[] labels,
Iterator<DynamicRecord> useFirst )
{
long[] storedLongs = LabelIdArray.prependNodeId( nodeId, labels );
return dynamicLabelStore.allocateRecords( storedLongs, useFirst );
}
public long[] getDynamicLabelsArray( Iterable<DynamicRecord> records )
{
long[] storedLongs = (long[])
DynamicArrayStore.getRightArray( dynamicLabelStore.readFullByteArray( records, PropertyType.ARRAY ) );
return LabelIdArray.stripNodeId( storedLongs );
}
public static long[] getDynamicLabelsArrayFromHeavyRecords( Iterable<DynamicRecord> records )
{
long[] storedLongs = (long[])
DynamicArrayStore.getRightArray( readFullByteArrayFromHeavyRecords( records, PropertyType.ARRAY ) );
return LabelIdArray.stripNodeId( storedLongs );
}
public Pair<Long, long[]> getDynamicLabelsArrayAndOwner( Iterable<DynamicRecord> records )
{
long[] storedLongs = (long[])
DynamicArrayStore.getRightArray( dynamicLabelStore.readFullByteArray( records, PropertyType.ARRAY ) );
return Pair.of(storedLongs[0], LabelIdArray.stripNodeId( storedLongs ));
}
public void updateDynamicLabelRecords( Iterable<DynamicRecord> dynamicLabelRecords )
{
for ( DynamicRecord record : dynamicLabelRecords )
{
dynamicLabelStore.updateRecord( record );
}
}
@Override
protected void setRecovered()
{
dynamicLabelStore.setRecovered();
super.setRecovered();
}
@Override
protected void unsetRecovered()
{
dynamicLabelStore.unsetRecovered();
super.unsetRecovered();
}
@Override
public void makeStoreOk()
{
dynamicLabelStore.makeStoreOk();
super.makeStoreOk();
}
@Override
public void rebuildIdGenerators()
{
dynamicLabelStore.rebuildIdGenerators();
super.rebuildIdGenerators();
}
protected void updateIdGenerators()
{
dynamicLabelStore.updateHighId();
super.updateHighId();
}
@Override
public void flushAll()
{
dynamicLabelStore.flushAll();
super.flushAll();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NodeStore.java
|
1,218
|
public class NodeRecordTest
{
@Test
public void cloneShouldProduceExactCopy() throws Exception
{
// Given
long relId = 1337l;
long propId = 1338l;
long inlinedLabels = 12l;
NodeRecord node = new NodeRecord( 1l, relId, propId );
node.setLabelField( inlinedLabels, asList( new DynamicRecord( 1l ), new DynamicRecord( 2l ) ) );
node.setInUse( true );
// When
NodeRecord clone = node.clone();
// Then
assertEquals(node.inUse(), clone.inUse());
assertEquals(node.getLabelField(), clone.getLabelField());
assertEquals(node.getNextProp(), clone.getNextProp());
assertEquals(node.getNextRel(), clone.getNextRel());
assertThat( clone.getDynamicLabelRecords(), equalTo(node.getDynamicLabelRecords()) );
}
@Test
public void shouldListLabelRecordsInUse() throws Exception
{
// Given
NodeRecord node = new NodeRecord( 1, -1, -1 );
long inlinedLabels = 12l;
DynamicRecord dynamic1 = dynamicRecord( 1l, true );
DynamicRecord dynamic2 = dynamicRecord( 2l, true );
DynamicRecord dynamic3 = dynamicRecord( 3l, true );
node.setLabelField( inlinedLabels, asList( dynamic1, dynamic2, dynamic3 ) );
dynamic3.setInUse( false );
// When
Iterable<DynamicRecord> usedRecords = node.getUsedDynamicLabelRecords();
// Then
assertThat(toList(usedRecords), equalTo(asList( dynamic1, dynamic2 )));
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_NodeRecordTest.java
|
1,219
|
{
@Override
public boolean accept( DynamicRecord item )
{
return item.inUse();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NodeRecord.java
|
1,220
|
public class NodeRecord extends PrimitiveRecord
{
private final long committedNextRel;
private long nextRel;
private long labels;
private Collection<DynamicRecord> dynamicLabelRecords = emptyList();
private boolean isLight = true;
public NodeRecord( long id, long nextRel, long nextProp )
{
super( id, nextProp );
this.committedNextRel = this.nextRel = nextRel;
}
public long getNextRel()
{
return nextRel;
}
public void setNextRel( long nextRel )
{
this.nextRel = nextRel;
}
public long getCommittedNextRel()
{
return isCreated() ? Record.NO_NEXT_RELATIONSHIP.intValue() : committedNextRel;
}
/**
* Sets the label field to a pointer to the first changed dynamic record. All changed
* dynamic records by doing this are supplied here.
*
* @param labels this will be either in-lined labels, or an id where to get the labels
* @param dynamicRecords all changed dynamic records by doing this.
*/
public void setLabelField( long labels, Collection<DynamicRecord> dynamicRecords )
{
this.labels = labels;
this.dynamicLabelRecords = dynamicRecords;
// Only mark it as heavy if there are dynamic records, since there's a possibility that we just
// loaded a light version of the node record where this method was called for setting the label field.
// Keeping it as light in this case would make it possible to load it fully later on.
this.isLight = dynamicRecords.isEmpty();
}
public long getLabelField()
{
return this.labels;
}
public boolean isLight()
{
return isLight;
}
public Collection<DynamicRecord> getDynamicLabelRecords()
{
return this.dynamicLabelRecords;
}
public Iterable<DynamicRecord> getUsedDynamicLabelRecords()
{
return filter( RECORD_IN_USE, dynamicLabelRecords );
}
@Override
public String toString()
{
StringBuilder builder = new StringBuilder( "Node[" ).append( getId() )
.append( ",used=" ).append( inUse() )
.append( ",rel=" ).append( nextRel )
.append( ",prop=" ).append( getNextProp() )
.append( ",labels=" ).append( parseLabelsField( this ) )
.append( "," ).append( isLight ? "light" : "heavy" );
if ( !isLight && !dynamicLabelRecords.isEmpty() )
{
builder.append( ",dynlabels=" ).append( dynamicLabelRecords );
}
return builder.append( "]" ).toString();
}
@Override
public void setIdTo( PropertyRecord property )
{
property.setNodeId( getId() );
}
@Override
public NodeRecord clone()
{
NodeRecord clone = new NodeRecord( getId(), getCommittedNextRel(), getCommittedNextProp() );
clone.setNextProp( getNextProp() );
clone.nextRel = nextRel;
clone.labels = labels;
clone.isLight = isLight;
clone.setInUse( inUse() );
if( dynamicLabelRecords.size() > 0 )
{
List<DynamicRecord> clonedLabelRecords = new ArrayList<>(dynamicLabelRecords.size());
for ( DynamicRecord labelRecord : dynamicLabelRecords )
{
clonedLabelRecords.add( labelRecord.clone() );
}
clone.dynamicLabelRecords = clonedLabelRecords;
}
return clone;
}
public static final Predicate<DynamicRecord> RECORD_IN_USE = new Predicate<DynamicRecord>()
{
@Override
public boolean accept( DynamicRecord item )
{
return item.inUse();
}
};
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NodeRecord.java
|
1,221
|
public class NeoStoreRecord extends PrimitiveRecord
{
public NeoStoreRecord()
{
super( -1, Record.NO_NEXT_PROPERTY.intValue() );
setInUse( true );
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" +
"used=" + inUse() +
",nextProp=" + getNextProp() +
"]";
}
@Override
public void setIdTo( PropertyRecord property )
{
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NeoStoreRecord.java
|
1,222
|
public static abstract class Configuration
extends AbstractStore.Configuration
{
public static final Setting<Integer> relationship_grab_size = GraphDatabaseSettings.relationship_grab_size;
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NeoStore.java
|
1,223
|
public class NeoStore extends AbstractStore
{
public static abstract class Configuration
extends AbstractStore.Configuration
{
public static final Setting<Integer> relationship_grab_size = GraphDatabaseSettings.relationship_grab_size;
}
public static final String TYPE_DESCRIPTOR = "NeoStore";
/*
* 7 longs in header (long + in use), time | random | version | txid | store version | graph next prop | latest constraint tx
*/
public static final int RECORD_SIZE = 9;
public static final String DEFAULT_NAME = "neostore";
// Positions of meta-data records
private static final int TIME_POSITION = 0;
private static final int RANDOM_POSITION = 1;
private static final int VERSION_POSITION = 2;
private static final int LATEST_TX_POSITION = 3;
private static final int STORE_VERSION_POSITION = 4;
private static final int NEXT_GRAPH_PROP_POSITION = 5;
private static final int LATEST_CONSTRAINT_TX_POSITION = 6;
public static boolean isStorePresent( FileSystemAbstraction fs, Config config )
{
File neoStore = config.get( Configuration.neo_store );
return fs.fileExists( neoStore );
}
private NodeStore nodeStore;
private PropertyStore propStore;
private RelationshipStore relStore;
private RelationshipTypeTokenStore relTypeStore;
private LabelTokenStore labelTokenStore;
private SchemaStore schemaStore;
private final RemoteTxHook txHook;
private long lastCommittedTx = -1;
private long latestConstraintIntroducingTx = -1;
private final int REL_GRAB_SIZE;
public NeoStore( File fileName, Config conf,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction,
StringLogger stringLogger, RemoteTxHook txHook,
RelationshipTypeTokenStore relTypeStore, LabelTokenStore labelTokenStore,
PropertyStore propStore, RelationshipStore relStore,
NodeStore nodeStore, SchemaStore schemaStore )
{
super( fileName, conf, IdType.NEOSTORE_BLOCK, idGeneratorFactory, windowPoolFactory,
fileSystemAbstraction, stringLogger);
this.relTypeStore = relTypeStore;
this.labelTokenStore = labelTokenStore;
this.propStore = propStore;
this.relStore = relStore;
this.nodeStore = nodeStore;
this.schemaStore = schemaStore;
REL_GRAB_SIZE = conf.get( Configuration.relationship_grab_size );
this.txHook = txHook;
/* [MP:2012-01-03] Fix for the problem in 1.5.M02 where store version got upgraded but
* corresponding store version record was not added. That record was added in the release
* thereafter so this missing record doesn't trigger an upgrade of the neostore file and so any
* unclean shutdown on such a db with 1.5.M02 < neo4j version <= 1.6.M02 would make that
* db unable to start for that version with a "Mismatching store version found" exception.
*
* This will make a cleanly shut down 1.5.M02, then started and cleanly shut down with 1.6.M03 (or higher)
* successfully add the missing record.
*/
setRecovered();
try
{
if ( getCreationTime() != 0 /*Store that wasn't just now created*/ &&
getStoreVersion() == 0 /*Store is missing the store version record*/ )
{
setStoreVersion( versionStringToLong( CommonAbstractStore.ALL_STORES_VERSION ) );
updateHighId();
}
}
finally
{
unsetRecovered();
}
}
@Override
protected void checkVersion()
{
try
{
verifyCorrectTypeDescriptorAndVersion();
/*
* If the trailing version string check returns normally, either
* the store is not ok and needs recovery or everything is fine. The
* latter is boring. The first case however is interesting. If we
* need recovery we have no idea what the store version is - we erase
* that information on startup and write it back out on clean shutdown.
* So, if the above passes and the store is not ok, we check the
* version field in our store vs the expected one. If it is the same,
* we can recover and proceed, otherwise we are allowed to die a horrible death.
*/
if ( !getStoreOk() )
{
/*
* Could we check that before? Well, yes. But. When we would read in the store version
* field it could very well overshoot and read in the version descriptor if the
* store is cleanly shutdown. If we are here though the store is not ok, so no
* version descriptor so the file is actually smaller than expected so we won't read
* in garbage.
* Yes, this has to be fixed to be prettier.
*/
String foundVersion = versionLongToString( getStoreVersion(fileSystemAbstraction, configuration.get( Configuration.neo_store) ));
if ( !CommonAbstractStore.ALL_STORES_VERSION.equals( foundVersion ) )
{
throw new IllegalStateException( format(
"Mismatching store version found (%s while expecting %s). The store cannot be automatically upgraded since it isn't cleanly shutdown." +
" Recover by starting the database using the previous Neo4j version, followed by a clean shutdown. Then start with this version again.",
foundVersion, CommonAbstractStore.ALL_STORES_VERSION ) );
}
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException( "Unable to check version "
+ getStorageFileName(), e );
}
}
@Override
protected void verifyFileSizeAndTruncate() throws IOException
{
super.verifyFileSizeAndTruncate();
/* MP: 2011-11-23
* A little silent upgrade for the "next prop" record. It adds one record last to the neostore file.
* It's backwards compatible, that's why it can be a silent and automatic upgrade.
*/
if ( getFileChannel().size() == RECORD_SIZE*5 )
{
insertRecord( NEXT_GRAPH_PROP_POSITION, -1 );
registerIdFromUpdateRecord( NEXT_GRAPH_PROP_POSITION );
}
/* Silent upgrade for latest constraint introducing tx
*/
if ( getFileChannel().size() == RECORD_SIZE*6 )
{
insertRecord( LATEST_CONSTRAINT_TX_POSITION, 0 );
registerIdFromUpdateRecord( LATEST_CONSTRAINT_TX_POSITION );
}
}
private void insertRecord( int recordPosition, long value ) throws IOException
{
try
{
StoreChannel channel = getFileChannel();
long previousPosition = channel.position();
channel.position( RECORD_SIZE*recordPosition );
int trail = (int) (channel.size()-channel.position());
ByteBuffer trailBuffer = null;
if ( trail > 0 )
{
trailBuffer = ByteBuffer.allocate( trail );
channel.read( trailBuffer );
trailBuffer.flip();
}
ByteBuffer buffer = ByteBuffer.allocate( RECORD_SIZE );
buffer.put( Record.IN_USE.byteValue() );
buffer.putLong( value );
buffer.flip();
channel.position( RECORD_SIZE*recordPosition );
channel.write( buffer );
if ( trail > 0 )
{
channel.write( trailBuffer );
}
channel.position( previousPosition );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
/**
* Closes the node,relationship,property and relationship type stores.
*/
@Override
protected void closeStorage()
{
if ( relTypeStore != null )
{
relTypeStore.close();
relTypeStore = null;
}
if ( labelTokenStore != null )
{
labelTokenStore.close();
labelTokenStore = null;
}
if ( propStore != null )
{
propStore.close();
propStore = null;
}
if ( relStore != null )
{
relStore.close();
relStore = null;
}
if ( nodeStore != null )
{
nodeStore.close();
nodeStore = null;
}
if ( schemaStore != null )
{
schemaStore.close();
schemaStore = null;
}
}
@Override
public void flushAll()
{
if ( relTypeStore == null || labelTokenStore == null || propStore == null || relStore == null ||
nodeStore == null || schemaStore == null )
{
return;
}
super.flushAll();
relTypeStore.flushAll();
labelTokenStore.flushAll();
propStore.flushAll();
relStore.flushAll();
nodeStore.flushAll();
schemaStore.flushAll();
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
@Override
public int getRecordSize()
{
return RECORD_SIZE;
}
public boolean freeIdsDuringRollback()
{
return txHook.freeIdsDuringRollback();
}
/**
* Sets the version for the given neostore file in {@code storeDir}.
* @param storeDir the store dir to locate the neostore file in.
* @param version the version to set.
* @return the previous version before writing.
*/
public static long setVersion( FileSystemAbstraction fileSystem, File storeDir, long version )
{
StoreChannel channel = null;
try
{
channel = fileSystem.open( new File( storeDir, NeoStore.DEFAULT_NAME ), "rw" );
channel.position( RECORD_SIZE*2+1/*inUse*/ );
ByteBuffer buffer = ByteBuffer.allocate( 8 );
channel.read( buffer );
buffer.flip();
long previous = buffer.getLong();
channel.position( RECORD_SIZE*2+1/*inUse*/ );
buffer.clear();
buffer.putLong( version ).flip();
channel.write( buffer );
return previous;
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
try
{
if ( channel != null ) channel.close();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
public static long getStoreVersion( FileSystemAbstraction fs, File neoStore )
{
return getRecord( fs, neoStore, 4 );
}
public static long getTxId( FileSystemAbstraction fs, File neoStore )
{
return getRecord( fs, neoStore, 3 );
}
private static long getRecord( FileSystemAbstraction fs, File neoStore, long recordPosition )
{
StoreChannel channel = null;
try
{
channel = fs.open( neoStore, "r" );
/*
* We have to check size, because the store version
* field was introduced with 1.5, so if there is a non-clean
* shutdown we may have a buffer underflow.
*/
if ( recordPosition > 3 && channel.size() < RECORD_SIZE * 5 )
{
return -1;
}
channel.position( RECORD_SIZE * recordPosition + 1/*inUse*/);
ByteBuffer buffer = ByteBuffer.allocate( 8 );
channel.read( buffer );
buffer.flip();
return buffer.getLong();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
try
{
if ( channel != null )
channel.close();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
public StoreId getStoreId()
{
return new StoreId( getCreationTime(), getRandomNumber(),
getStoreVersion() );
}
public long getCreationTime()
{
return getRecord( 0 );
}
public void setCreationTime( long time )
{
setRecord( 0, time );
}
public long getRandomNumber()
{
return getRecord( 1 );
}
public void setRandomNumber( long nr )
{
setRecord( 1, nr );
}
public void setRecoveredStatus( boolean status )
{
if ( status )
{
setRecovered();
nodeStore.setRecovered();
propStore.setRecovered();
relStore.setRecovered();
relTypeStore.setRecovered();
labelTokenStore.setRecovered();
schemaStore.setRecovered();
}
else
{
unsetRecovered();
nodeStore.unsetRecovered();
propStore.unsetRecovered();
relStore.unsetRecovered();
relTypeStore.unsetRecovered();
labelTokenStore.unsetRecovered();
schemaStore.unsetRecovered();
}
}
public long getVersion()
{
return getRecord( 2 );
}
public void setVersion( long version )
{
setRecord( 2, version );
}
public synchronized void setLastCommittedTx( long txId )
{
long current = getLastCommittedTx();
if ( (current + 1) != txId && !isInRecoveryMode() )
{
throw new InvalidRecordException( "Could not set tx commit id[" +
txId + "] since the current one is[" + current + "]" );
}
setRecord( 3, txId );
lastCommittedTx = txId;
}
public synchronized long getLastCommittedTx()
{
if ( lastCommittedTx == -1 )
{
lastCommittedTx = getRecord( 3 );
}
return lastCommittedTx;
}
public long getLatestConstraintIntroducingTx()
{
if(latestConstraintIntroducingTx == -1)
{
latestConstraintIntroducingTx = getRecord( LATEST_CONSTRAINT_TX_POSITION );
}
return latestConstraintIntroducingTx;
}
public void setLatestConstraintIntroducingTx( long latestConstraintIntroducingTx )
{
setRecord( LATEST_CONSTRAINT_TX_POSITION, latestConstraintIntroducingTx );
this.latestConstraintIntroducingTx = latestConstraintIntroducingTx;
}
public long incrementVersion()
{
long current = getVersion();
setVersion( current + 1 );
return current;
}
private long getRecord( long id )
{
PersistenceWindow window = acquireWindow( id, OperationType.READ );
try
{
Buffer buffer = window.getOffsettedBuffer( id );
buffer.get();
return buffer.getLong();
}
finally
{
releaseWindow( window );
}
}
private void setRecord( long id, long value )
{
PersistenceWindow window = acquireWindow( id, OperationType.WRITE );
try
{
Buffer buffer = window.getOffsettedBuffer( id );
buffer.put( Record.IN_USE.byteValue() ).putLong( value );
registerIdFromUpdateRecord( id );
}
finally
{
releaseWindow( window );
}
}
public long getStoreVersion()
{
return getRecord( 4 );
}
public void setStoreVersion( long version )
{
setRecord( 4, version );
}
public long getGraphNextProp()
{
return getRecord( 5 );
}
public void setGraphNextProp( long propId )
{
setRecord( 5, propId );
}
/**
* Returns the node store.
*
* @return The node store
*/
public NodeStore getNodeStore()
{
return nodeStore;
}
/**
* @return the schema store.
*/
public SchemaStore getSchemaStore()
{
return schemaStore;
}
/**
* The relationship store.
*
* @return The relationship store
*/
public RelationshipStore getRelationshipStore()
{
return relStore;
}
/**
* Returns the relationship type store.
*
* @return The relationship type store
*/
public RelationshipTypeTokenStore getRelationshipTypeStore()
{
return relTypeStore;
}
/**
* Returns the label store.
*
* @return The label store
*/
public LabelTokenStore getLabelTokenStore()
{
return labelTokenStore;
}
/**
* Returns the property store.
*
* @return The property store
*/
public PropertyStore getPropertyStore()
{
return propStore;
}
@Override
public void makeStoreOk()
{
relTypeStore.makeStoreOk();
labelTokenStore.makeStoreOk();
propStore.makeStoreOk();
relStore.makeStoreOk();
nodeStore.makeStoreOk();
schemaStore.makeStoreOk();
super.makeStoreOk();
}
@Override
public void rebuildIdGenerators()
{
relTypeStore.rebuildIdGenerators();
labelTokenStore.rebuildIdGenerators();
propStore.rebuildIdGenerators();
relStore.rebuildIdGenerators();
nodeStore.rebuildIdGenerators();
schemaStore.rebuildIdGenerators();
super.rebuildIdGenerators();
}
public void updateIdGenerators()
{
this.updateHighId();
relTypeStore.updateIdGenerators();
labelTokenStore.updateIdGenerators();
propStore.updateIdGenerators();
relStore.updateHighId();
nodeStore.updateIdGenerators();
schemaStore.updateHighId();
}
public int getRelationshipGrabSize()
{
return REL_GRAB_SIZE;
}
@Override
public List<WindowPoolStats> getAllWindowPoolStats()
{
// Reverse order from everything else
List<WindowPoolStats> list = new ArrayList<WindowPoolStats>();
// TODO no stats for schema store?
list.addAll( nodeStore.getAllWindowPoolStats() );
list.addAll( propStore.getAllWindowPoolStats() );
list.addAll( relStore.getAllWindowPoolStats() );
list.addAll( relTypeStore.getAllWindowPoolStats() );
list.addAll( labelTokenStore.getAllWindowPoolStats() );
return list;
}
@Override
public void logAllWindowPoolStats( StringLogger.LineLogger logger )
{
super.logAllWindowPoolStats( logger );
// TODO no stats for schema store?
nodeStore.logAllWindowPoolStats( logger );
relStore.logAllWindowPoolStats( logger );
relTypeStore.logAllWindowPoolStats( logger );
labelTokenStore.logAllWindowPoolStats( logger );
propStore.logAllWindowPoolStats( logger );
}
public boolean isStoreOk()
{
return getStoreOk() && relTypeStore.getStoreOk() && labelTokenStore.getStoreOk() &&
propStore.getStoreOk() && relStore.getStoreOk() && nodeStore.getStoreOk() && schemaStore.getStoreOk();
}
@Override
public void logVersions( StringLogger.LineLogger msgLog)
{
msgLog.logLine( "Store versions:" );
super.logVersions( msgLog );
schemaStore.logVersions( msgLog );
nodeStore.logVersions( msgLog );
relStore.logVersions( msgLog );
relTypeStore.logVersions( msgLog );
labelTokenStore.logVersions( msgLog );
propStore.logVersions( msgLog );
stringLogger.flush();
}
@Override
public void logIdUsage( StringLogger.LineLogger msgLog )
{
msgLog.logLine( "Id usage:" );
schemaStore.logIdUsage( msgLog );
nodeStore.logIdUsage( msgLog );
relStore.logIdUsage( msgLog );
relTypeStore.logIdUsage( msgLog );
labelTokenStore.logIdUsage( msgLog );
propStore.logIdUsage( msgLog );
stringLogger.flush();
}
public NeoStoreRecord asRecord()
{
NeoStoreRecord result = new NeoStoreRecord();
result.setNextProp( getRecord( 5 ) );
return result;
}
/*
* The following two methods encode and decode a string that is presumably
* the store version into a long via Latin1 encoding. This leaves room for
* 7 characters and 1 byte for the length. Current string is
* 0.A.0 which is 5 chars, so we have room for expansion. When that
* becomes a problem we will be in a yacht, sipping alcoholic
* beverages of our choice. Or taking turns crashing golden
* helicopters. Anyway, it should suffice for some time and by then
* it should have become SEP.
*/
public static long versionStringToLong( String storeVersion )
{
if ( CommonAbstractStore.UNKNOWN_VERSION.equals( storeVersion ) )
{
return -1;
}
Bits bits = Bits.bits(8);
int length = storeVersion.length();
if ( length == 0 || length > 7 )
{
throw new IllegalArgumentException(
String.format(
"The given string %s is not of proper size for a store version string",
storeVersion ) );
}
bits.put( length, 8 );
for ( int i = 0; i < length; i++ )
{
char c = storeVersion.charAt( i );
if ( c < 0 || c >= 256 )
throw new IllegalArgumentException(
String.format(
"Store version strings should be encode-able as Latin1 - %s is not",
storeVersion ) );
bits.put( c, 8 ); // Just the lower byte
}
return bits.getLong();
}
public static String versionLongToString( long storeVersion )
{
if ( storeVersion == -1 )
{
return CommonAbstractStore.UNKNOWN_VERSION;
}
Bits bits = Bits.bitsFromLongs(new long[]{storeVersion});
int length = bits.getShort( 8 );
if ( length == 0 || length > 7 )
{
throw new IllegalArgumentException( String.format(
"The read in version string length %d is not proper.",
length ) );
}
char[] result = new char[length];
for ( int i = 0; i < length; i++ )
{
result[i] = (char) bits.getShort( 8 );
}
return new String( result );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_NeoStore.java
|
1,224
|
public class MultipleUnderlyingStorageExceptions extends UnderlyingStorageException
{
public final Set<Pair<IndexDescriptor, UnderlyingStorageException>> exceptions;
public MultipleUnderlyingStorageExceptions( Set<Pair<IndexDescriptor, UnderlyingStorageException>> exceptions )
{
super( buildMessage( exceptions ) );
this.exceptions = Collections.unmodifiableSet( exceptions );
for ( Pair<IndexDescriptor, UnderlyingStorageException> exception : exceptions )
{
this.addSuppressed( exception.other() );
}
}
private static String buildMessage( Set<Pair<IndexDescriptor, UnderlyingStorageException>> exceptions )
{
StringBuilder builder = new StringBuilder( );
builder.append("Errors when closing (flushing) index updaters:");
for ( Pair<IndexDescriptor, UnderlyingStorageException> pair : exceptions )
{
builder.append( format( " (%s) %s", pair.first().toString(), pair.other().getMessage() ) );
}
return builder.toString();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_MultipleUnderlyingStorageExceptions.java
|
1,225
|
public class MismatchingStoreIdException extends StoreFailureException
{
private final StoreId expected;
private final StoreId encountered;
public MismatchingStoreIdException( StoreId expected, StoreId encountered )
{
super( "Expected:" + expected + ", encountered:" + encountered );
this.expected = expected;
this.encountered = encountered;
}
public StoreId getExpected()
{
return expected;
}
public StoreId getEncountered()
{
return encountered;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_MismatchingStoreIdException.java
|
1,226
|
{
@Override
public Void doWork( Void state )
{
window.lock( OperationType.WRITE );
return null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_MappedPersistenceWindowTest.java
|
1,227
|
public class RelationshipRecord extends PrimitiveRecord
{
private long firstNode;
private long secondNode;
private int type;
private long firstPrevRel = Record.NO_PREV_RELATIONSHIP.intValue();
private long firstNextRel = Record.NO_NEXT_RELATIONSHIP.intValue();
private long secondPrevRel = Record.NO_PREV_RELATIONSHIP.intValue();
private long secondNextRel = Record.NO_NEXT_RELATIONSHIP.intValue();
public RelationshipRecord( long id, long firstNode, long secondNode, int type )
{
// TODO take firstProp in here
this( id );
this.firstNode = firstNode;
this.secondNode = secondNode;
this.type = type;
}
public RelationshipRecord( long id )
{
super( id, Record.NO_NEXT_PROPERTY.intValue() );
}
public void setLinks( long firstNode, long secondNode, int type )
{
this.firstNode = firstNode;
this.secondNode = secondNode;
this.type = type;
}
public long getFirstNode()
{
return firstNode;
}
public long getSecondNode()
{
return secondNode;
}
public int getType()
{
return type;
}
public long getFirstPrevRel()
{
return firstPrevRel;
}
public void setFirstPrevRel( long firstPrevRel )
{
this.firstPrevRel = firstPrevRel;
}
public long getFirstNextRel()
{
return firstNextRel;
}
public void setFirstNextRel( long firstNextRel )
{
this.firstNextRel = firstNextRel;
}
public long getSecondPrevRel()
{
return secondPrevRel;
}
public void setSecondPrevRel( long secondPrevRel )
{
this.secondPrevRel = secondPrevRel;
}
public long getSecondNextRel()
{
return secondNextRel;
}
public void setSecondNextRel( long secondNextRel )
{
this.secondNextRel = secondNextRel;
}
@Override
public String toString()
{
return new StringBuilder( "Relationship[" ).append( getId() ).append( ",used=" ).append( inUse() ).append(
",source=" ).append( firstNode ).append( ",target=" ).append( secondNode ).append( ",type=" ).append(
type ).append( ",sPrev=" ).append( firstPrevRel ).append( ",sNext=" ).append( firstNextRel ).append(
",tPrev=" ).append( secondPrevRel ).append( ",tNext=" ).append( secondNextRel ).append( ",prop=" ).append(
getNextProp() ).append( "]" ).toString();
}
@Override
public void setIdTo( PropertyRecord property )
{
property.setRelId( getId() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_RelationshipRecord.java
|
1,228
|
public class RelationshipStore extends AbstractRecordStore<RelationshipRecord> implements Store
{
public static abstract class Configuration
extends AbstractStore.Configuration
{
}
public static final String TYPE_DESCRIPTOR = "RelationshipStore";
// record header size
// directed|in_use(byte)+first_node(int)+second_node(int)+rel_type(int)+
// first_prev_rel_id(int)+first_next_rel_id+second_prev_rel_id(int)+
// second_next_rel_id+next_prop_id(int)
public static final int RECORD_SIZE = 33;
public RelationshipStore(File fileName, Config configuration, IdGeneratorFactory idGeneratorFactory,
WindowPoolFactory windowPoolFactory, FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger)
{
super(fileName, configuration, IdType.RELATIONSHIP, idGeneratorFactory,
windowPoolFactory, fileSystemAbstraction, stringLogger);
}
@Override
public <FAILURE extends Exception> void accept( Processor<FAILURE> processor, RelationshipRecord record ) throws FAILURE
{
processor.processRelationship( this, record );
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
@Override
public int getRecordSize()
{
return RECORD_SIZE;
}
@Override
public int getRecordHeaderSize()
{
return getRecordSize();
}
@Override
public RelationshipRecord getRecord( long id )
{
PersistenceWindow window = acquireWindow( id, OperationType.READ );
try
{
return getRecord( id, window, RecordLoad.NORMAL );
}
finally
{
releaseWindow( window );
}
}
@Override
public RelationshipRecord forceGetRecord( long id )
{
PersistenceWindow window;
try
{
window = acquireWindow( id, OperationType.READ );
}
catch ( InvalidRecordException e )
{
return new RelationshipRecord( id, -1, -1, -1 );
}
try
{
return getRecord( id, window, RecordLoad.FORCE );
}
finally
{
releaseWindow( window );
}
}
@Override
public RelationshipRecord forceGetRaw( RelationshipRecord record )
{
return record;
}
@Override
public RelationshipRecord forceGetRaw( long id )
{
return forceGetRecord( id );
}
public RelationshipRecord getLightRel( long id )
{
PersistenceWindow window;
try
{
window = acquireWindow( id, OperationType.READ );
}
catch ( InvalidRecordException e )
{
// ok to high id
return null;
}
try
{
return getRecord( id, window, RecordLoad.CHECK );
}
finally
{
releaseWindow( window );
}
}
@Override
public void updateRecord( RelationshipRecord record )
{
PersistenceWindow window = acquireWindow( record.getId(),
OperationType.WRITE );
try
{
updateRecord( record, window, false );
}
finally
{
releaseWindow( window );
}
}
@Override
public void forceUpdateRecord( RelationshipRecord record )
{
PersistenceWindow window = acquireWindow( record.getId(),
OperationType.WRITE );
try
{
updateRecord( record, window, true );
}
finally
{
releaseWindow( window );
}
}
private void updateRecord( RelationshipRecord record,
PersistenceWindow window, boolean force )
{
long id = record.getId();
registerIdFromUpdateRecord( id );
Buffer buffer = window.getOffsettedBuffer( id );
if ( record.inUse() || force )
{
long firstNode = record.getFirstNode();
short firstNodeMod = (short)((firstNode & 0x700000000L) >> 31);
long secondNode = record.getSecondNode();
long secondNodeMod = (secondNode & 0x700000000L) >> 4;
long firstPrevRel = record.getFirstPrevRel();
long firstPrevRelMod = firstPrevRel == Record.NO_NEXT_RELATIONSHIP.intValue() ? 0 : (firstPrevRel & 0x700000000L) >> 7;
long firstNextRel = record.getFirstNextRel();
long firstNextRelMod = firstNextRel == Record.NO_NEXT_RELATIONSHIP.intValue() ? 0 : (firstNextRel & 0x700000000L) >> 10;
long secondPrevRel = record.getSecondPrevRel();
long secondPrevRelMod = secondPrevRel == Record.NO_NEXT_RELATIONSHIP.intValue() ? 0 : (secondPrevRel & 0x700000000L) >> 13;
long secondNextRel = record.getSecondNextRel();
long secondNextRelMod = secondNextRel == Record.NO_NEXT_RELATIONSHIP.intValue() ? 0 : (secondNextRel & 0x700000000L) >> 16;
long nextProp = record.getNextProp();
long nextPropMod = nextProp == Record.NO_NEXT_PROPERTY.intValue() ? 0 : (nextProp & 0xF00000000L) >> 28;
// [ , x] in use flag
// [ ,xxx ] first node high order bits
// [xxxx, ] next prop high order bits
short inUseUnsignedByte = (short)((record.inUse() ? Record.IN_USE : Record.NOT_IN_USE).byteValue() | firstNodeMod | nextPropMod);
// [ xxx, ][ , ][ , ][ , ] second node high order bits, 0x70000000
// [ ,xxx ][ , ][ , ][ , ] first prev rel high order bits, 0xE000000
// [ , x][xx , ][ , ][ , ] first next rel high order bits, 0x1C00000
// [ , ][ xx,x ][ , ][ , ] second prev rel high order bits, 0x380000
// [ , ][ , xxx][ , ][ , ] second next rel high order bits, 0x70000
// [ , ][ , ][xxxx,xxxx][xxxx,xxxx] type
int typeInt = (int)(record.getType() | secondNodeMod | firstPrevRelMod | firstNextRelMod | secondPrevRelMod | secondNextRelMod);
buffer.put( (byte)inUseUnsignedByte ).putInt( (int) firstNode ).putInt( (int) secondNode )
.putInt( typeInt ).putInt( (int) firstPrevRel ).putInt( (int) firstNextRel )
.putInt( (int) secondPrevRel ).putInt( (int) secondNextRel ).putInt( (int) nextProp );
}
else
{
buffer.put( Record.NOT_IN_USE.byteValue() );
if ( !isInRecoveryMode() )
{
freeId( id );
}
}
}
private RelationshipRecord getRecord( long id, PersistenceWindow window,
RecordLoad load )
{
Buffer buffer = window.getOffsettedBuffer( id );
// [ , x] in use flag
// [ ,xxx ] first node high order bits
// [xxxx, ] next prop high order bits
long inUseByte = buffer.get();
boolean inUse = (inUseByte & 0x1) == Record.IN_USE.intValue();
if ( !inUse )
{
switch ( load )
{
case NORMAL:
throw new InvalidRecordException( "RelationshipRecord[" + id + "] not in use" );
case CHECK:
return null;
}
}
long firstNode = buffer.getUnsignedInt();
long firstNodeMod = (inUseByte & 0xEL) << 31;
long secondNode = buffer.getUnsignedInt();
// [ xxx, ][ , ][ , ][ , ] second node high order bits, 0x70000000
// [ ,xxx ][ , ][ , ][ , ] first prev rel high order bits, 0xE000000
// [ , x][xx , ][ , ][ , ] first next rel high order bits, 0x1C00000
// [ , ][ xx,x ][ , ][ , ] second prev rel high order bits, 0x380000
// [ , ][ , xxx][ , ][ , ] second next rel high order bits, 0x70000
// [ , ][ , ][xxxx,xxxx][xxxx,xxxx] type
long typeInt = buffer.getInt();
long secondNodeMod = (typeInt & 0x70000000L) << 4;
int type = (int)(typeInt & 0xFFFF);
RelationshipRecord record = new RelationshipRecord( id,
longFromIntAndMod( firstNode, firstNodeMod ),
longFromIntAndMod( secondNode, secondNodeMod ), type );
record.setInUse( inUse );
long firstPrevRel = buffer.getUnsignedInt();
long firstPrevRelMod = (typeInt & 0xE000000L) << 7;
record.setFirstPrevRel( longFromIntAndMod( firstPrevRel, firstPrevRelMod ) );
long firstNextRel = buffer.getUnsignedInt();
long firstNextRelMod = (typeInt & 0x1C00000L) << 10;
record.setFirstNextRel( longFromIntAndMod( firstNextRel, firstNextRelMod ) );
long secondPrevRel = buffer.getUnsignedInt();
long secondPrevRelMod = (typeInt & 0x380000L) << 13;
record.setSecondPrevRel( longFromIntAndMod( secondPrevRel, secondPrevRelMod ) );
long secondNextRel = buffer.getUnsignedInt();
long secondNextRelMod = (typeInt & 0x70000L) << 16;
record.setSecondNextRel( longFromIntAndMod( secondNextRel, secondNextRelMod ) );
long nextProp = buffer.getUnsignedInt();
long nextPropMod = (inUseByte & 0xF0L) << 28;
record.setNextProp( longFromIntAndMod( nextProp, nextPropMod ) );
return record;
}
// private RelationshipRecord getFullRecord( long id, PersistenceWindow window )
// {
// Buffer buffer = window.getOffsettedBuffer( id );
// byte inUse = buffer.get();
// boolean inUseFlag = ((inUse & Record.IN_USE.byteValue()) ==
// Record.IN_USE.byteValue());
// RelationshipRecord record = new RelationshipRecord( id,
// buffer.getInt(), buffer.getInt(), buffer.getInt() );
// record.setInUse( inUseFlag );
// record.setFirstPrevRel( buffer.getInt() );
// record.setFirstNextRel( buffer.getInt() );
// record.setSecondPrevRel( buffer.getInt() );
// record.setSecondNextRel( buffer.getInt() );
// record.setNextProp( buffer.getInt() );
// return record;
// }
public RelationshipRecord getChainRecord( long relId )
{
PersistenceWindow window;
try
{
window = acquireWindow( relId, OperationType.READ );
}
catch ( InvalidRecordException e )
{
// ok to high id
return null;
}
try
{
// return getFullRecord( relId, window );
return getRecord( relId, window, RecordLoad.NORMAL );
}
finally
{
releaseWindow( window );
}
}
@Override
public List<WindowPoolStats> getAllWindowPoolStats()
{
List<WindowPoolStats> list = new ArrayList<>();
list.add( getWindowPoolStats() );
return list;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_RelationshipStore.java
|
1,229
|
public static abstract class Configuration
extends AbstractStore.Configuration
{
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_RelationshipStore.java
|
1,230
|
INT
{
@Override
String randomString( int maxLen )
{
return Long.toString( random.nextInt() );
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,231
|
UNIFORM_LATIN
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = (char) ( 0x20 + random.nextInt( 0xC0 ) );
if ( chars[i] > 0x7f ) chars[i] += 0x20;
}
return new String( chars );
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,232
|
SYMBOLS
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = SYMBOL_CHARS[random.nextInt( SYMBOL_CHARS.length )];
}
return new String( chars );
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,233
|
UNIFORM_ASCII
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = (char) ( 0x20 + random.nextInt( 94 ) );
}
return new String( chars );
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,234
|
@Ignore( "Not used anymore" )
public abstract class TestShortString
{
@Test
public void canEncodeEmptyString() throws Exception
{
assertCanEncode( "" );
}
@Test
public void canEncodeReallyLongString() throws Exception
{
assertCanEncode( " " ); // 20 spaces
assertCanEncode( " " ); // 16 spaces
}
@Test
public void canEncodeFifteenSpaces() throws Exception
{
assertCanEncode( " " );
}
@Test
public void canEncodeNumericalString() throws Exception
{
assertCanEncode( "0123456789+,'.-" );
assertCanEncode( " ,'.-0123456789" );
assertCanEncode( "+ '.0123456789-" );
assertCanEncode( "+, 0123456789.-" );
assertCanEncode( "+,0123456789' -" );
assertCanEncode( "+0123456789,'. " );
// IP(v4) numbers
assertCanEncode( "192.168.0.1" );
assertCanEncode( "127.0.0.1" );
assertCanEncode( "255.255.255.255" );
}
@Test
public void canEncodeTooLongStringsWithCharsInDifferentTables()
throws Exception
{
assertCanEncode( "____________+" );
assertCanEncode( "_____+_____" );
assertCanEncode( "____+____" );
assertCanEncode( "HELLO world" );
assertCanEncode( "Hello_World" );
}
@Test
public void canEncodeUpToNineEuropeanChars() throws Exception
{
// Shorter than 10 chars
assertCanEncode( "fågel" ); // "bird" in Swedish
assertCanEncode( "påfågel" ); // "peacock" in Swedish
assertCanEncode( "påfågelö" ); // "peacock island" in Swedish
assertCanEncode( "påfågelön" ); // "the peacock island" in Swedish
// 10 chars
assertCanEncode( "påfågelöar" ); // "peacock islands" in Swedish
}
@Test
public void canEncodeEuropeanCharsWithPunctuation() throws Exception
{
assertCanEncode( "qHm7 pp3" );
assertCanEncode( "UKKY3t.gk" );
}
@Test
public void canEncodeAlphanumerical() throws Exception
{
assertCanEncode( "1234567890" ); // Just a sanity check
assertCanEncodeInBothCasings( "HelloWor1d" ); // There is a number there
assertCanEncode( " " ); // Alphanum is the first that can encode 10 spaces
assertCanEncode( "_ _ _ _ _ " ); // The only available punctuation
assertCanEncode( "H3Lo_ or1D" ); // Mixed case + punctuation
assertCanEncode( "q1w2e3r4t+" ); // + is not in the charset
}
@Test
public void canEncodeHighUnicode() throws Exception
{
assertCanEncode( "\u02FF" );
assertCanEncode( "hello\u02FF" );
}
@Test
public void canEncodeLatin1SpecialChars() throws Exception
{
assertCanEncode( "#$#$#$#" );
assertCanEncode( "$hello#" );
}
@Test
public void canEncodeTooLongLatin1String() throws Exception
{
assertCanEncode( "#$#$#$#$" );
}
@Test
public void canEncodeLowercaseAndUppercaseStringsUpTo12Chars() throws Exception
{
assertCanEncodeInBothCasings( "hello world" );
assertCanEncode( "hello_world" );
assertCanEncode( "_hello_world" );
assertCanEncode( "hello::world" );
assertCanEncode( "hello//world" );
assertCanEncode( "hello world" );
assertCanEncode( "http://ok" );
assertCanEncode( "::::::::" );
assertCanEncode( " _.-:/ _.-:/" );
}
// === test utils ===
private void assertCanEncodeInBothCasings( String string )
{
assertCanEncode( string.toLowerCase() );
assertCanEncode( string.toUpperCase() );
}
abstract protected void assertCanEncode( String string );
// === Micro benchmarking === [includes random tests]
public static void main( String[] args )
{
microbench( 10, TimeUnit.SECONDS, Charset.UNIFORM_ASCII );
microbench( 10, TimeUnit.SECONDS, Charset.SYMBOLS );
microbench( 10, TimeUnit.SECONDS, Charset.LONG );
microbench( 10, TimeUnit.SECONDS, Charset.INT );
microbench( 10, TimeUnit.SECONDS, Charset.UNIFORM_LATIN );
microbench( 10, TimeUnit.SECONDS, Charset.UNICODE );
}
@SuppressWarnings( "boxing" )
private static void microbench( long time, TimeUnit unit, Charset charset )
{
long successes = 0, failures = 0, errors = 0;
long remaining = time = unit.toMillis( time );
while ( remaining > 0 )
{
List<String> strings = randomStrings( 1000, charset, 15 );
long start = System.currentTimeMillis();
for ( String string : strings )
{
String result = roundtrip( string );
if ( result != null )
{
if ( string.equals( result ) )
{
successes++;
}
else
{
errors++;
System.out.printf( "Expected: %s, got: %s%n", string, result );
}
}
else
{
failures++;
}
}
remaining -= System.currentTimeMillis() - start;
}
time -= remaining;
System.out.printf( "=== %s ===%n", charset.name() );
System.out.printf( "%s successful, %s non-convertable, %s misconverted%n", successes, failures, errors );
long total = successes + failures + errors;
System.out.printf( "%.3f conversions per ms%n", total / (double) time );
System.out.printf( "%.2f%% success rate%n", 100 * ( successes / ( (double) ( total ) ) ) );
}
private static String roundtrip( @SuppressWarnings("UnusedParameters") String string )
{
return null;
}
public static List<String> randomStrings( int count, Charset charset, int maxLen )
{
List<String> result = new ArrayList<String>( count );
for ( int i = 0; i < count; i++ )
{
result.add( charset.randomString( maxLen ) );
}
return result;
}
private static Random random = new Random();
public static enum Charset
{
UNIFORM_ASCII
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = (char) ( 0x20 + random.nextInt( 94 ) );
}
return new String( chars );
}
},
SYMBOLS
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = SYMBOL_CHARS[random.nextInt( SYMBOL_CHARS.length )];
}
return new String( chars );
}
},
UNIFORM_LATIN
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = (char) ( 0x20 + random.nextInt( 0xC0 ) );
if ( chars[i] > 0x7f ) chars[i] += 0x20;
}
return new String( chars );
}
},
LONG
{
@Override
String randomString( int maxLen )
{
return Long.toString( random.nextLong() % ( (long) Math.pow( 10, maxLen ) ) );
}
},
INT
{
@Override
String randomString( int maxLen )
{
return Long.toString( random.nextInt() );
}
},
UNICODE
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = (char) ( 1 + random.nextInt( 0xD7FE ) );
}
return new String( chars );
}
},
;
static char[] SYMBOL_CHARS = new char[26 + 26 + 10 + 1];
static
{
SYMBOL_CHARS[0] = '_';
int i = 1;
for ( char c = '0'; c <= '9'; c++ )
{
SYMBOL_CHARS[i++] = c;
}
for ( char c = 'A'; c <= 'Z'; c++ )
{
SYMBOL_CHARS[i++] = c;
}
for ( char c = 'a'; c <= 'z'; c++ )
{
SYMBOL_CHARS[i++] = c;
}
}
abstract String randomString( int maxLen );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,235
|
public class TestShortArray
{
private static final int DEFAULT_PAYLOAD_SIZE = PropertyType.getPayloadSize();
@Test
public void canEncodeSomeSampleArraysWithDefaultPayloadSize() throws Exception
{
assertCanEncodeAndDecodeToSameValue( new boolean[] { true, false, true,
true, true, true, true, true, true, true, false, true } );
assertCanEncodeAndDecodeToSameValue( new byte[] { -1, -10, 43, 127, 0, 4, 2, 3, 56, 47, 67, 43 } );
assertCanEncodeAndDecodeToSameValue( new short[] { 1,2,3,45,5,6,7 } );
assertCanEncodeAndDecodeToSameValue( new int[] { 1,2,3,4,5,6,7 } );
assertCanEncodeAndDecodeToSameValue( new long[] { 1,2,3,4,5,6,7 } );
assertCanEncodeAndDecodeToSameValue( new float[] { 0.34f, 0.21f } );
assertCanEncodeAndDecodeToSameValue( new long[] { 1 << 63, 1 << 63 } );
assertCanEncodeAndDecodeToSameValue( new long[] { 1 << 63, 1 << 63,
1 << 63 } );
assertCanEncodeAndDecodeToSameValue( new byte[] { 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0 } );
assertCanEncodeAndDecodeToSameValue( new long[] { 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0 } );
}
@Test
public void testCannotEncodeMarginal() throws Exception
{
assertCanNotEncode( new long[] { 1l << 15, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1 } );
}
@Test
public void canEncodeBiggerArraysWithBiggerPayloadSize() throws Exception
{
int[] intArray = intArray( 10, 2600 );
assertCanEncodeAndDecodeToSameValue( intArray, 32 );
}
private void assertCanNotEncode( Object intArray )
{
assertCanNotEncode( intArray, DEFAULT_PAYLOAD_SIZE );
}
private void assertCanNotEncode( Object intArray, int payloadSize )
{
assertFalse( ShortArray.encode( 0, intArray, new PropertyBlock(),
payloadSize ) );
}
private int[] intArray( int count, int stride )
{
int[] result = new int[count];
for ( int i = 0; i < count; i++ )
{
result[i] = i*stride;
}
return result;
}
private void assertCanEncodeAndDecodeToSameValue( Object value )
{
assertCanEncodeAndDecodeToSameValue( value, PropertyType.getPayloadSize() );
}
private void assertCanEncodeAndDecodeToSameValue( Object value, int payloadSize )
{
PropertyBlock target = new PropertyBlock();
boolean encoded = ShortArray.encode( 0, value, target, payloadSize );
assertTrue( encoded );
assertArraysEquals( value, ShortArray.decode( target ) );
}
private void assertArraysEquals( Object value1, Object value2 )
{
assertEquals( value1.getClass().getComponentType(), value2.getClass().getComponentType() );
int length1 = Array.getLength( value1 );
int length2 = Array.getLength( value2 );
assertEquals( length1, length2 );
for ( int i = 0; i < length1; i++ )
{
Object item1 = Array.get( value1, i );
Object item2 = Array.get( value2, i );
assertEquals( item1, item2 );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortArray.java
|
1,236
|
public class TestPropertyKey
{
@Test
public void lazyLoadWithinWriteTransaction() throws Exception
{
File dir = new File( "dir" );
BatchInserter inserter = BatchInserters.inserter( dir.getPath(), fs.get() );
int count = 3000;
long nodeId = inserter.createNode( mapWithManyProperties( count /* larger than initial property index load threshold */ ) );
inserter.shutdown();
GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fs.get() ).newImpermanentDatabase( dir.getPath() );
Transaction tx = db.beginTx();
try
{
db.createNode();
Node node = db.getNodeById( nodeId );
assertEquals( count, IteratorUtil.count( node.getPropertyKeys() ) );
tx.success();
}
finally
{
tx.finish();
db.shutdown();
}
}
private Map<String, Object> mapWithManyProperties( int count )
{
Map<String, Object> properties = new HashMap<String, Object>();
for ( int i = 0; i < count; i++ )
properties.put( "key:" + i, "value" );
return properties;
}
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestPropertyKey.java
|
1,237
|
public class TestPropertyBlocks extends AbstractNeo4jTestCase
{
@Override
protected boolean restartGraphDbBetweenTests()
{
return true;
}
@Test
public void simpleAddIntegers()
{
long inUseBefore = propertyRecordsInUse();
Node node = getGraphDb().createNode();
for ( int i = 0; i < PropertyType.getPayloadSizeLongs(); i++ )
{
node.setProperty( "prop" + i, i );
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
assertEquals( i, node.getProperty( "prop" + i ) );
}
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
for ( int i = 0; i < PropertyType.getPayloadSizeLongs(); i++ )
{
assertEquals( i, node.getProperty( "prop" + i ) );
}
for ( int i = 0; i < PropertyType.getPayloadSizeLongs(); i++ )
{
assertEquals( i, node.removeProperty( "prop" + i ) );
assertFalse( node.hasProperty( "prop" + i ) );
}
commit();
assertEquals( inUseBefore, propertyRecordsInUse() );
}
@Test
public void simpleAddDoubles()
{
long inUseBefore = propertyRecordsInUse();
Node node = getGraphDb().createNode();
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() / 2; i++ )
{
node.setProperty( "prop" + i, i * -1.0 );
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
assertEquals( i * -1.0, node.getProperty( "prop" + i ) );
}
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() / 2; i++ )
{
assertEquals( i * -1.0, node.getProperty( "prop" + i ) );
}
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() / 2; i++ )
{
assertEquals( i * -1.0, node.removeProperty( "prop" + i ) );
assertFalse( node.hasProperty( "prop" + i ) );
}
commit();
assertEquals( inUseBefore, propertyRecordsInUse() );
}
@Test
public void deleteEverythingInMiddleRecord()
{
long inUseBefore = propertyRecordsInUse();
Node node = getGraphDb().createNode();
for ( int i = 0; i < 3 * PropertyType.getPayloadSizeLongs(); i++ )
{
node.setProperty( "shortString" + i, String.valueOf( i ) );
}
assertEquals( inUseBefore + 3, propertyRecordsInUse() );
newTransaction();
clearCache();
for ( int i = PropertyType.getPayloadSizeLongs(); i < 2 * PropertyType.getPayloadSizeLongs(); i++ )
{
assertEquals( String.valueOf( i ), node.removeProperty( "shortString" + i ) );
}
newTransaction();
clearCache();
assertEquals( inUseBefore + 2, propertyRecordsInUse() );
for ( int i = 0; i < PropertyType.getPayloadSizeLongs(); i++ )
{
assertEquals( String.valueOf( i ), node.removeProperty( "shortString" + i ) );
}
for ( int i = PropertyType.getPayloadSizeLongs(); i < 2 * PropertyType.getPayloadSizeLongs(); i++ )
{
assertFalse( node.hasProperty( "shortString" + i ) );
}
for ( int i = 2 * PropertyType.getPayloadSizeLongs(); i < 3 * PropertyType.getPayloadSizeLongs(); i++ )
{
assertEquals( String.valueOf( i ), node.removeProperty( "shortString" + i ) );
}
}
@Test
public void largeTx() throws IOException
{
Node node = getGraphDb().createNode();
node.setProperty( "anchor", "hi" );
for ( int i = 0; i < 255; i++ )
{
node.setProperty( "foo", 1 );
node.removeProperty( "foo" );
}
commit();
}
/*
* Creates a PropertyRecord, fills it up, removes something and
* adds something that should fit.
*/
@Test
public void deleteAndAddToFullPropertyRecord()
{
// Fill it up, each integer is one block
Node node = getGraphDb().createNode();
for ( int i = 0; i < PropertyType.getPayloadSizeLongs(); i++ )
{
node.setProperty( "prop" + i, i );
}
newTransaction();
clearCache();
// Remove all but one and add one
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() - 1; i++ )
{
assertEquals( i, node.removeProperty( "prop" + i ) );
}
node.setProperty( "profit", 5 );
newTransaction();
clearCache();
// Verify
int remainingProperty = PropertyType.getPayloadSizeLongs() - 1;
assertEquals( remainingProperty, node.getProperty( "prop" + remainingProperty ) );
assertEquals( 5, node.getProperty( "profit" ) );
}
@Test
public void checkPacking()
{
long inUseBefore = propertyRecordsInUse();
// Fill it up, each integer is one block
Node node = getGraphDb().createNode();
node.setProperty( "prop0", 0 );
newTransaction();
clearCache();
// One record must have been added
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Since integers take up one block, adding the remaining should not
// create a new record.
for ( int i = 1; i < PropertyType.getPayloadSizeLongs(); i++ )
{
node.setProperty( "prop" + i, i );
}
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Removing one and adding one of the same size should not create a new
// record.
assertEquals( 0, node.removeProperty( "prop0" ) );
node.setProperty( "prop-1", -1 );
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Removing two that take up 1 block and adding one that takes up 2
// should not create a new record.
assertEquals( -1, node.removeProperty( "prop-1" ) );
// Hopefully prop1 exists, meaning payload is at least 16
assertEquals( 1, node.removeProperty( "prop1" ) );
// A double value should do the trick
node.setProperty( "propDouble", 1.0 );
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Adding just one now should create a new property record.
node.setProperty( "prop-2", -2 );
newTransaction();
clearCache();
assertEquals( inUseBefore + 2, propertyRecordsInUse() );
}
@Test
public void substituteOneLargeWithManySmallPropBlocks()
{
Node node = getGraphDb().createNode();
long inUseBefore = propertyRecordsInUse();
/*
* Fill up with doubles and the rest with ints - we assume
* the former take up two blocks, the latter 1.
*/
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() / 2; i++ )
{
node.setProperty( "double" + i, i * 1.0 );
}
/*
* I know this is stupid in that it is executed 0 or 1 times but it
* is easier to maintain and change for different payload sizes.
*/
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() % 2; i++ )
{
node.setProperty( "int" + i, i );
}
newTransaction();
clearCache();
// Just checking that the assumptions above is correct
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// We assume at least one double has been added
node.removeProperty( "double0" );
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Do the actual substitution, check that no record is created
node.setProperty( "int-1", -1 );
node.setProperty( "int-2", -2 );
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Finally, make sure we actually are with a full prop record
node.setProperty( "int-3", -3 );
newTransaction();
clearCache();
assertEquals( inUseBefore + 2, propertyRecordsInUse() );
}
/*
* Adds at least 3 1-block properties and removes the first and third.
* Adds a 2-block property and checks if it is added in the same record.
*/
@Test
public void testBlockDefragmentationWithTwoSpaces()
{
Assume.assumeTrue( PropertyType.getPayloadSizeLongs() > 2 );
Node node = getGraphDb().createNode();
long inUseBefore = propertyRecordsInUse();
int stuffedIntegers = 0;
for ( ; stuffedIntegers < PropertyType.getPayloadSizeLongs(); stuffedIntegers++ )
{
node.setProperty( "int" + stuffedIntegers, stuffedIntegers );
}
// Basic check that integers take up one (8 byte) block.
assertEquals( stuffedIntegers, PropertyType.getPayloadSizeLongs() );
newTransaction();
clearCache();
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
// Remove first and third
node.removeProperty( "int0" );
node.removeProperty( "int2" );
newTransaction();
clearCache();
// Add the two block thing.
node.setProperty( "theDouble", 1.0 );
newTransaction();
clearCache();
// Let's make sure everything is in one record and with proper values.
assertEquals( inUseBefore + 1, propertyRecordsInUse() );
assertNull( node.getProperty( "int0", null ) );
assertEquals( 1, node.getProperty( "int1" ) );
assertNull( node.getProperty( "int2", null ) );
for ( int i = 3; i < stuffedIntegers; i++ )
{
assertEquals( i, node.getProperty( "int" + i ) );
}
assertEquals( 1.0, node.getProperty( "theDouble" ) );
}
@Test
public void checkDeletesRemoveRecordsWhenProper()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
int stuffedBooleans = 0;
for ( ; stuffedBooleans < PropertyType.getPayloadSizeLongs(); stuffedBooleans++ )
{
node.setProperty( "boolean" + stuffedBooleans, stuffedBooleans % 2 == 0 );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.setProperty( "theExraOne", true );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
for ( int i = 0; i < stuffedBooleans; i++ )
{
assertEquals( Boolean.valueOf( i % 2 == 0 ), node.removeProperty( "boolean" + i ) );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
for ( int i = 0; i < stuffedBooleans; i++ )
{
assertFalse( node.hasProperty( "boolean" + i ) );
}
assertEquals( Boolean.TRUE, node.getProperty( "theExraOne" ) );
}
/*
* Creates 3 records and deletes stuff from the middle one. Assumes that a 2 character
* string that is a number fits in one block.
*/
@Test
public void testMessWithMiddleRecordDeletes()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
int stuffedShortStrings = 0;
for ( ; stuffedShortStrings < 3 * PropertyType.getPayloadSizeLongs(); stuffedShortStrings++ )
{
node.setProperty( "shortString" + stuffedShortStrings, String.valueOf( stuffedShortStrings ) );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 3, propertyRecordsInUse() );
int secondBlockInSecondRecord = PropertyType.getPayloadSizeLongs() + 1;
int thirdBlockInSecondRecord = PropertyType.getPayloadSizeLongs() + 2;
assertEquals( String.valueOf( secondBlockInSecondRecord ),
node.removeProperty( "shortString" + secondBlockInSecondRecord ) );
assertEquals( String.valueOf( thirdBlockInSecondRecord ),
node.removeProperty( "shortString" + thirdBlockInSecondRecord ) );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 3, propertyRecordsInUse() );
for ( int i = 0; i < stuffedShortStrings; i++ )
{
if ( i == secondBlockInSecondRecord )
{
assertFalse( node.hasProperty( "shortString" + i ) );
}
else if ( i == thirdBlockInSecondRecord )
{
assertFalse( node.hasProperty( "shortString" + i ) );
}
else
{
assertEquals( String.valueOf( i ), node.getProperty( "shortString" + i ) );
}
}
// Start deleting stuff. First, all the middle property blocks
int deletedProps = 0;
for ( int i = PropertyType.getPayloadSizeLongs(); i < PropertyType.getPayloadSizeLongs() * 2; i++ )
{
if ( node.hasProperty( "shortString" + i ) )
{
deletedProps++;
node.removeProperty( "shortString" + i );
}
}
assertEquals( PropertyType.getPayloadSizeLongs() - 2, deletedProps );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
for ( int i = 0; i < PropertyType.getPayloadSizeLongs(); i++ )
{
assertEquals( String.valueOf( i ), node.removeProperty( "shortString" + i ) );
}
for ( int i = PropertyType.getPayloadSizeLongs(); i < PropertyType.getPayloadSizeLongs() * 2; i++ )
{
assertFalse( node.hasProperty( "shortString" + i ) );
}
for ( int i = PropertyType.getPayloadSizeLongs() * 2; i < PropertyType.getPayloadSizeLongs() * 3; i++ )
{
assertEquals( String.valueOf( i ), node.removeProperty( "shortString" + i ) );
}
}
@Test
public void mixAndPackDifferentTypes()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
int stuffedShortStrings = 0;
for ( ; stuffedShortStrings < PropertyType.getPayloadSizeLongs(); stuffedShortStrings++ )
{
node.setProperty( "shortString" + stuffedShortStrings, String.valueOf( stuffedShortStrings ) );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.removeProperty( "shortString0" );
node.removeProperty( "shortString2" );
node.setProperty( "theDoubleOne", -1.0 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
for ( int i = 0; i < stuffedShortStrings; i++ )
{
if ( i == 0 )
{
assertFalse( node.hasProperty( "shortString" + i ) );
}
else if ( i == 2 )
{
assertFalse( node.hasProperty( "shortString" + i ) );
}
else
{
assertEquals( String.valueOf( i ), node.getProperty( "shortString" + i ) );
}
}
assertEquals( -1.0, node.getProperty( "theDoubleOne" ) );
}
@Test
public void testAdditionsHappenAtTheFirstRecordIfFits1()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
node.setProperty( "int1", 1 );
node.setProperty( "double1", 1.0 );
node.setProperty( "int2", 2 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.removeProperty( "double1" );
newTransaction();
clearCache();
node.setProperty( "double2", 1.0 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.setProperty( "paddingBoolean", false );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
}
@Test
@Ignore( "Assumes space to put a block is searched along the whole chain - that is not the case currently" )
public void testAdditionsHappenAtTheFirstRecordWhenFits()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
node.setProperty( "int1", 1 );
node.setProperty( "double1", 1.0 );
node.setProperty( "int2", 2 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.removeProperty( "int1" );
newTransaction();
clearCache();
node.setProperty( "double2", 1.0 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
node.removeProperty( "int2" );
newTransaction();
clearCache();
node.setProperty( "double3", 1.0 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
node.setProperty( "paddingBoolean", false );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
}
@Test
public void testAdditionHappensInTheMiddleIfItFits()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
node.setProperty( "int1", 1 );
node.setProperty( "double1", 1.0 );
node.setProperty( "int2", 2 );
int stuffedShortStrings = 0;
for ( ; stuffedShortStrings < PropertyType.getPayloadSizeLongs(); stuffedShortStrings++ )
{
node.setProperty( "shortString" + stuffedShortStrings, String.valueOf( stuffedShortStrings ) );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
node.removeProperty( "shortString" + 1 );
node.setProperty( "int3", 3 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
}
@Test
public void testChangePropertyType()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
int stuffedShortStrings = 0;
for ( ; stuffedShortStrings < PropertyType.getPayloadSizeLongs(); stuffedShortStrings++ )
{
node.setProperty( "shortString" + stuffedShortStrings, String.valueOf( stuffedShortStrings ) );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.setProperty( "shortString1", 1.0 );
commit();
}
@Test
@Ignore( "Assumes space to put a block is searched along the whole chain - that is not the case currently" )
public void testPackingAndOverflowingValueChangeInMiddleRecord()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
long valueRecordsInUseAtStart = dynamicArrayRecordsInUse();
int shortArrays = 0;
for ( ; shortArrays < PropertyType.getPayloadSizeLongs() - 1; shortArrays++ )
{
node.setProperty( "shortArray" + shortArrays, new long[] { 1, 2, 3, 4 } );
}
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
// Takes up two blocks
node.setProperty( "theDoubleThatBecomesAnArray", 1.0 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
// This takes up three blocks
node.setProperty( "theLargeArray", new long[] { 1 << 63, 1 << 63 } );
newTransaction();
clearCache();
assertTrue( Arrays.equals( new long[] { 1 << 63, 1 << 63 }, (long[]) node.getProperty( "theLargeArray" ) ) );
assertEquals( recordsInUseAtStart + 3, propertyRecordsInUse() );
node.setProperty( "fillerByte1", (byte) 3 );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 3, propertyRecordsInUse() );
node.setProperty( "fillerByte2", (byte) -4 );
assertEquals( recordsInUseAtStart + 3, propertyRecordsInUse() );
// Make it take up 3 blocks instead of 2
node.setProperty( "theDoubleThatBecomesAnArray", new long[] { 1 << 63, 1 << 63, 1 << 63 } );
assertEquals( valueRecordsInUseAtStart, dynamicArrayRecordsInUse() );
assertEquals( recordsInUseAtStart + 4, propertyRecordsInUse() );
newTransaction();
clearCache();
assertEquals( recordsInUseAtStart + 4, propertyRecordsInUse() );
while ( shortArrays-- > 0 )
{
assertTrue( Arrays.equals( new long[] { 1, 2, 3, 4 },
(long[]) node.getProperty( "shortArray" + shortArrays ) ) );
}
assertEquals( (byte) 3, node.getProperty( "fillerByte1" ) );
assertEquals( (byte) -4, node.getProperty( "fillerByte2" ) );
assertTrue( Arrays.equals( new long[] { 1 << 63, 1 << 63 }, (long[]) node.getProperty( "theLargeArray" ) ) );
assertTrue( Arrays.equals( new long[] { 1 << 63, 1 << 63, 1 << 63 },
(long[]) node.getProperty( "theDoubleThatBecomesAnArray" ) ) );
}
@Test
public void testRevertOverflowingChange()
{
Relationship rel = getGraphDb().createNode()
.createRelationshipTo( getGraphDb().createNode(),
DynamicRelationshipType.withName( "INVALIDATES" ) );
long recordsInUseAtStart = propertyRecordsInUse();
long valueRecordsInUseAtStart = dynamicArrayRecordsInUse();
rel.setProperty( "theByte", (byte) -8 );
rel.setProperty( "theDoubleThatGrows", Math.PI );
rel.setProperty( "theInteger", -444345 );
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
rel.setProperty( "theDoubleThatGrows", new long[] { 1 << 63, 1 << 63, 1 << 63 } );
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
assertEquals( valueRecordsInUseAtStart, dynamicArrayRecordsInUse() );
rel.setProperty( "theDoubleThatGrows", Math.E );
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
newTransaction();
clearCache();
/*
* The following line should pass if we have packing on property block
* size shrinking.
*/
// assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
assertEquals( (byte) -8, rel.getProperty( "theByte" ) );
assertEquals( -444345, rel.getProperty( "theInteger" ) );
assertEquals( Math.E, rel.getProperty( "theDoubleThatGrows" ) );
}
@Test
public void testYoYoArrayPropertyWithinTx()
{
testYoyoArrayBase( false );
}
@Test
public void testYoYoArrayPropertyOverTxs()
{
testYoyoArrayBase( true );
}
private void testYoyoArrayBase( boolean withNewTx )
{
Relationship rel = getGraphDb().createNode().createRelationshipTo( getGraphDb().createNode(),
DynamicRelationshipType.withName( "LOCKS" ) );
long recordsInUseAtStart = propertyRecordsInUse();
long valueRecordsInUseAtStart = dynamicArrayRecordsInUse();
List<Long> theYoyoData = new ArrayList<Long>();
for ( int i = 0; i < PropertyType.getPayloadSizeLongs() - 1; i++ )
{
theYoyoData.add( 1l << 63 );
Long[] value = theYoyoData.toArray( new Long[] {} );
rel.setProperty( "yoyo", value );
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
assertEquals( valueRecordsInUseAtStart, dynamicArrayRecordsInUse() );
if ( withNewTx )
{
newTransaction();
clearCache();
}
}
theYoyoData.add( 1l << 63 );
Long[] value = theYoyoData.toArray( new Long[] {} );
rel.setProperty( "yoyo", value );
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
assertEquals( valueRecordsInUseAtStart + 1, dynamicArrayRecordsInUse() );
newTransaction();
clearCache();
rel.setProperty( "filler", new long[] { 1 << 63, 1 << 63, 1 << 63 } );
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
}
@Test
public void testRemoveZigZag()
{
Relationship rel = getGraphDb().createNode().createRelationshipTo( getGraphDb().createNode(),
DynamicRelationshipType.withName( "LOCKS" ) );
long recordsInUseAtStart = propertyRecordsInUse();
int propRecCount = 1;
for ( ; propRecCount <= 3; propRecCount++ )
{
for ( int i = 1; i <= PropertyType.getPayloadSizeLongs(); i++ )
{
rel.setProperty( "int" + ( propRecCount * 10 + i ), ( propRecCount * 10 + i ) );
assertEquals( recordsInUseAtStart + propRecCount, propertyRecordsInUse() );
}
}
newTransaction();
clearCache();
for ( int i = 1; i <= PropertyType.getPayloadSizeLongs(); i++ )
{
for ( int j = 1; j < propRecCount; j++ )
{
assertEquals( j * 10 + i, rel.removeProperty( "int" + ( j * 10 + i ) ) );
if ( i == PropertyType.getPayloadSize() - 1 && j != propRecCount - 1 )
{
assertEquals( recordsInUseAtStart + ( propRecCount - j ), propertyRecordsInUse() );
}
else if ( i == PropertyType.getPayloadSize() - 1 && j == propRecCount - 1 )
{
assertEquals( recordsInUseAtStart, propertyRecordsInUse() );
}
else
{
assertEquals( recordsInUseAtStart + 3, propertyRecordsInUse() );
}
}
}
for ( int i = 1; i <= PropertyType.getPayloadSizeLongs(); i++ )
{
for ( int j = 1; j < propRecCount; j++ )
{
assertFalse( rel.hasProperty( "int" + ( j * 10 + i ) ) );
}
}
newTransaction();
for ( int i = 1; i <= PropertyType.getPayloadSizeLongs(); i++ )
{
for ( int j = 1; j < propRecCount; j++ )
{
assertFalse( rel.hasProperty( "int" + ( j * 10 + i ) ) );
}
}
assertEquals( recordsInUseAtStart, propertyRecordsInUse() );
}
@Test
public void testSetWithSameValue()
{
Node node = getGraphDb().createNode();
node.setProperty( "rev_pos", "40000633e7ad67ff" );
assertEquals( "40000633e7ad67ff", node.getProperty( "rev_pos" ) );
newTransaction();
clearCache();
node.setProperty( "rev_pos", "40000633e7ad67ef" );
assertEquals( "40000633e7ad67ef", node.getProperty( "rev_pos" ) );
}
private void testStringYoYoBase( boolean withNewTx )
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
long valueRecordsInUseAtStart = dynamicStringRecordsInUse();
String data = "0";
int counter = 1;
while ( dynamicStringRecordsInUse() == valueRecordsInUseAtStart )
{
data += counter++;
node.setProperty( "yoyo", data );
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
if ( withNewTx )
{
newTransaction();
clearCache();
}
}
assertEquals( valueRecordsInUseAtStart + 1, dynamicStringRecordsInUse() );
data = data.substring( 0, data.length() - 2 );
node.setProperty( "yoyo", data );
newTransaction();
assertEquals( valueRecordsInUseAtStart, dynamicStringRecordsInUse() );
assertEquals( recordsInUseAtStart + 1, propertyRecordsInUse() );
node.setProperty( "fillerBoolean", true );
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
}
@Test
public void testStringYoYoWithTx()
{
testStringYoYoBase( true );
}
@Test
public void testStringYoYoWithoutTx()
{
testStringYoYoBase( false );
}
@Test
public void testRemoveFirstOfTwo()
{
Node node = getGraphDb().createNode();
long recordsInUseAtStart = propertyRecordsInUse();
node.setProperty( "Double1", 1.0 );
node.setProperty( "Int1", 1 );
node.setProperty( "Int2", 2 );
node.setProperty( "Int2", 1.2 );
node.setProperty( "Int2", 2 );
node.setProperty( "Double3", 3.0 );
assertEquals( recordsInUseAtStart + 2, propertyRecordsInUse() );
newTransaction();
clearCache();
assertEquals( new Double( 1.0 ), node.getProperty( "Double1" ) );
assertEquals( new Integer( 1 ), node.getProperty( "Int1" ) );
assertEquals( new Integer( 2 ), node.getProperty( "Int2" ) );
assertEquals( new Double( 3.0 ), node.getProperty( "Double3" ) );
}
@Test
public void deleteNodeWithNewPropertyRecordShouldFreeTheNewRecord() throws Exception
{
final long propcount = getNodeManager().getNumberOfIdsInUse( PropertyStore.class );
Node node = getGraphDb().createNode();
node.setProperty( "one", 1 );
node.setProperty( "two", 2 );
node.setProperty( "three", 3 );
node.setProperty( "four", 4 );
assertEquals( "Invalid assumption: property record count", propcount + 1,
getNodeManager().getNumberOfIdsInUse( PropertyStore.class ) );
newTransaction();
assertEquals( "Invalid assumption: property record count", propcount + 1,
getNodeManager().getNumberOfIdsInUse( PropertyStore.class ) );
node.setProperty( "final", 666 );
assertEquals( "Invalid assumption: property record count", propcount + 2,
getNodeManager().getNumberOfIdsInUse( PropertyStore.class ) );
node.delete();
commit();
assertEquals( "All property records should be freed", propcount,
getNodeManager().getNumberOfIdsInUse( PropertyStore.class ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestPropertyBlocks.java
|
1,238
|
private class Worker extends Thread
{
private volatile boolean halted;
private final Random random = new Random();
private long count;
@Override
public void run()
{
warmItUp();
while ( !halted )
{
OperationType type = randomOperationTypeButFavoringReads(0.6f);
long id = randomPosition();
PersistenceWindow window = pool.acquire( id, type );
try
{
switch ( type )
{
case READ:
readStuff( window, id );
break;
case WRITE:
writeStuff( window, id );
break;
}
}
finally
{
pool.release( window );
}
count++;
}
}
private void warmItUp()
{
for ( int i = 0; i < 100000; i++ )
{
long id = randomPosition();
PersistenceWindow window = pool.acquire( id, OperationType.READ );
try
{
readStuff( window, id );
}
finally
{
pool.release( window );
}
}
}
private synchronized long waitForEnd() throws InterruptedException
{
join();
return count;
}
private void readStuff( PersistenceWindow window, long id )
{
Buffer buffer = window.getOffsettedBuffer( id );
long read = buffer.getLong();
// Just having this Map lookup affects the test too much.
Long existingValue = values.get( id );
if ( existingValue != null )
{
Assert.assertEquals( existingValue.longValue(), read );
}
}
private void writeStuff( PersistenceWindow window, long id )
{
Buffer buffer = window.getOffsettedBuffer( id );
long value = random.nextLong();
buffer.putLong( value );
// Just having this Map lookup affects the test too much.
values.put( id, value );
}
private long randomPosition()
{
return random.nextInt( (int)(fileSize/recordSize) );
}
private OperationType randomOperationTypeButFavoringReads( float percentageReads )
{
return random.nextFloat() <= percentageReads ? OperationType.READ : OperationType.WRITE;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestPersistenceWindowPoolContention.java
|
1,239
|
@Ignore( "Not a proper test really, merely a contention measurement" )
public class TestPersistenceWindowPoolContention
{
private static final int recordSize = 30;
private static final long mappingSize = giga( 1 );
private long fileSize = mega( 800 );
private StoreChannel channel;
private PersistenceWindowPool pool;
private final Map<Long, Long> values = new ConcurrentHashMap<Long, Long>();
@Before
public void before() throws Exception
{
File file = new File( "target/bigfile" );
assertTrue( "delete " + file, file.delete() );
channel = new StoreFileChannel( new RandomAccessFile( file, "rw" ).getChannel() );
write( channel, fileSize );
pool = new PersistenceWindowPool( new File("contention test"), recordSize, channel, mappingSize,
true, false, new ConcurrentHashMap<Long, PersistenceRow>(), BrickElementFactory.DEFAULT,
StringLogger.DEV_NULL );
}
private void write( StoreChannel channel, long bytes ) throws IOException
{
channel.position( bytes );
channel.write( ByteBuffer.wrap( new byte[1] ) );
channel.position( 0 );
channel.force( true );
}
@After
public void after() throws Exception
{
// close() is package-access, so a little good ol' reflection
Method closeMethod = pool.getClass().getDeclaredMethod( "close" );
closeMethod.setAccessible( true );
closeMethod.invoke( pool );
channel.close();
}
private static long kilo( long i )
{
return i*1024;
}
private static long mega( long i )
{
return kilo( kilo( i ) );
}
private static long giga( long i )
{
return mega( kilo( i ) );
}
@Test
public void triggerContentionAmongstPersistenceWindows() throws Exception
{
List<Worker> workers = new ArrayList<Worker>();
for ( int i = 0; i < 8; i++ )
{
Worker worker = new Worker();
workers.add( worker );
worker.start();
}
long endTime = System.currentTimeMillis() + SECONDS.toMillis( 60*3 );
int tick = 2;
while ( System.currentTimeMillis() < endTime )
{
Thread.sleep( SECONDS.toMillis( tick ) );
System.out.println( getPoolStats() );
fileSize += mega( tick );
}
for ( Worker worker : workers )
{
worker.halted = true;
}
long total = 0;
for ( Worker putter : workers )
{
total += putter.waitForEnd();
}
System.out.println( "total:" + total );
}
private String getPoolStats() throws Exception
{
Method method = pool.getClass().getDeclaredMethod( "getStats" );
method.setAccessible( true );
return method.invoke( pool ).toString();
}
private class Worker extends Thread
{
private volatile boolean halted;
private final Random random = new Random();
private long count;
@Override
public void run()
{
warmItUp();
while ( !halted )
{
OperationType type = randomOperationTypeButFavoringReads(0.6f);
long id = randomPosition();
PersistenceWindow window = pool.acquire( id, type );
try
{
switch ( type )
{
case READ:
readStuff( window, id );
break;
case WRITE:
writeStuff( window, id );
break;
}
}
finally
{
pool.release( window );
}
count++;
}
}
private void warmItUp()
{
for ( int i = 0; i < 100000; i++ )
{
long id = randomPosition();
PersistenceWindow window = pool.acquire( id, OperationType.READ );
try
{
readStuff( window, id );
}
finally
{
pool.release( window );
}
}
}
private synchronized long waitForEnd() throws InterruptedException
{
join();
return count;
}
private void readStuff( PersistenceWindow window, long id )
{
Buffer buffer = window.getOffsettedBuffer( id );
long read = buffer.getLong();
// Just having this Map lookup affects the test too much.
Long existingValue = values.get( id );
if ( existingValue != null )
{
Assert.assertEquals( existingValue.longValue(), read );
}
}
private void writeStuff( PersistenceWindow window, long id )
{
Buffer buffer = window.getOffsettedBuffer( id );
long value = random.nextLong();
buffer.putLong( value );
// Just having this Map lookup affects the test too much.
values.put( id, value );
}
private long randomPosition()
{
return random.nextInt( (int)(fileSize/recordSize) );
}
private OperationType randomOperationTypeButFavoringReads( float percentageReads )
{
return random.nextFloat() <= percentageReads ? OperationType.READ : OperationType.WRITE;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestPersistenceWindowPoolContention.java
|
1,240
|
public class TestOsSpecificLocks
{
private File path;
@Rule
public TestName name = new TestName();
@Before
public void doBefore()
{
path = TargetDirectory.forTest( getClass() ).cleanDirectory( name.getMethodName() );
}
@Test
public void sanityCheck() throws Exception
{
assumeTrue( Settings.osIsWindows() );
FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
// Must grab locks only on store_lock file
File fileName = new File( path, StoreLocker.STORE_LOCK_FILENAME );
StoreChannel channel = fs.open( fileName, "rw" );
// Lock this sucker!
FileLock lock = fs.tryLock( fileName, channel );
assertTrue( new File( path, "lock" ).exists() );
try
{
fs.tryLock( fileName, channel );
fail( "Should have thrown IOException" );
}
catch ( IOException e )
{ // Good, expected
}
// But the rest of the files should return non null (placebos,
// actually)
StoreChannel tempChannel = fs.open( new File( fileName.getPath() + "1" ), "rw" );
FileLock tempLock = fs.tryLock( new File( fileName.getPath() + "1"), tempChannel );
assertNotNull( tempLock );
tempLock.release();
tempChannel.close();
// Release and retry, should succeed
lock.release();
assertFalse( new File( path, "lock" ).exists() );
fs.tryLock( fileName, channel ).release(); // NPE on fail here
assertFalse( new File( path, "lock" ).exists() );
}
@Test
public void testDatabaseLocking()
{
assumeTrue( Settings.osIsWindows() );
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() );
Transaction tx = db.beginTx();
db.createNode();
tx.success();
tx.finish();
assertTrue( new File( path, "lock" ).exists() );
try
{
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() );
fail("Should not be able to start up another db in the same dir");
}
catch ( Exception e )
{
// Good
}
db.shutdown();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestOsSpecificLocks.java
|
1,241
|
private static class MyPropertyKeyToken extends Token
{
private static Map<String, Token> stringToIndex = new HashMap<>();
private static Map<Integer, Token> intToIndex = new HashMap<>();
protected MyPropertyKeyToken( String key, int keyId )
{
super( key, keyId );
}
public static Iterable<Token> index( String key )
{
if ( stringToIndex.containsKey( key ) )
{
return Arrays.asList( stringToIndex.get( key ) );
}
return Collections.emptyList();
}
public static Token getIndexFor( int index )
{
return intToIndex.get( index );
}
public static void add( MyPropertyKeyToken index )
{
stringToIndex.put( index.name(), index );
intToIndex.put( index.id(), index );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestNeoStore.java
|
1,242
|
private static class CountingPropertyReceiver implements PropertyReceiver
{
private int count;
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{
count++;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestNeoStore.java
|
1,243
|
{
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{
props.put( property.propertyKeyId(), Pair.of( property, propertyRecordId ) );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestNeoStore.java
|
1,244
|
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return type.cast( config );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestNeoStore.java
|
1,245
|
{
private final LabelScanStoreProvider labelScanStoreProvider =
new LabelScanStoreProvider( new InMemoryLabelScanStore(), 10 );
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector ) throws IllegalArgumentException
{
if ( SchemaIndexProvider.class.isAssignableFrom( type ) )
{
return type.cast( SchemaIndexProvider.NO_INDEX_PROVIDER );
}
else if ( NodeManager.class.isAssignableFrom( type ) )
{
return type.cast( nodeManager );
}
else if ( LabelScanStoreProvider.class.isAssignableFrom( type ) )
{
return type.cast( labelScanStoreProvider );
}
throw new IllegalArgumentException( type.toString() );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestNeoStore.java
|
1,246
|
public class TestNeoStore
{
private PropertyStore pStore;
private RelationshipTypeTokenStore rtStore;
private NeoStoreXaDataSource ds;
private NeoStoreXaConnection xaCon;
private TargetDirectory targetDirectory;
private File path;
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
@Rule public TargetDirectory.TestDirectory testDir = TargetDirectory.testDirForTest( getClass() );
private File file( String name )
{
return new File( path, name);
}
@Before
public void setUpNeoStore() throws Exception
{
targetDirectory = TargetDirectory.forTest( fs.get(), getClass() );
path = targetDirectory.cleanDirectory( "dir" );
Config config = new Config( new HashMap<String, String>(), GraphDatabaseSettings.class );
StoreFactory sf = new StoreFactory( config, new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(),
fs.get(), StringLogger.DEV_NULL, null );
sf.createNeoStore( file( NeoStore.DEFAULT_NAME ) ).close();
}
private static class MyPropertyKeyToken extends Token
{
private static Map<String, Token> stringToIndex = new HashMap<>();
private static Map<Integer, Token> intToIndex = new HashMap<>();
protected MyPropertyKeyToken( String key, int keyId )
{
super( key, keyId );
}
public static Iterable<Token> index( String key )
{
if ( stringToIndex.containsKey( key ) )
{
return Arrays.asList( stringToIndex.get( key ) );
}
return Collections.emptyList();
}
public static Token getIndexFor( int index )
{
return intToIndex.get( index );
}
public static void add( MyPropertyKeyToken index )
{
stringToIndex.put( index.name(), index );
intToIndex.put( index.id(), index );
}
}
private Token createDummyIndex( int id, String key )
{
MyPropertyKeyToken index = new MyPropertyKeyToken( key, id );
MyPropertyKeyToken.add( index );
return index;
}
private void initializeStores() throws IOException
{
LockManager lockManager = new LockManagerImpl( new RagManager() );
final Config config = new Config( MapUtil.stringMap(
InternalAbstractGraphDatabase.Configuration.store_dir.name(), path.getPath(),
InternalAbstractGraphDatabase.Configuration.neo_store.name(), "neo",
InternalAbstractGraphDatabase.Configuration.logical_log.name(), file( "nioneo_logical.log" ).getPath() ),
GraphDatabaseSettings.class );
StoreFactory sf = new StoreFactory( config, new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(),
fs.get(), StringLogger.DEV_NULL, null );
KernelHealth kernelHealth = mock( KernelHealth.class );
NodeManager nodeManager = mock(NodeManager.class);
@SuppressWarnings( "rawtypes" )
List caches = Arrays.asList(
(Cache) mock( AutoLoadingCache.class ),
(Cache) mock( AutoLoadingCache.class ) );
when( nodeManager.caches() ).thenReturn( caches );
ds = new NeoStoreXaDataSource(config, sf, StringLogger.DEV_NULL,
new XaFactory( config, TxIdGenerator.DEFAULT, new PlaceboTm( lockManager, TxIdGenerator.DEFAULT ),
fs.get(), new Monitors(), new DevNullLoggingService(), RecoveryVerifier.ALWAYS_VALID,
LogPruneStrategies.NO_PRUNING, kernelHealth ), noStateFactory( new DevNullLoggingService() ),
new TransactionInterceptorProviders( Collections.<TransactionInterceptorProvider>emptyList(),
dependencyResolverForConfig( config ) ), null, new SingleLoggingService( DEV_NULL ),
new KernelSchemaStateStore(),
mock(TokenNameLookup.class),
dependencyResolverForNoIndexProvider( nodeManager ), mock( AbstractTransactionManager.class),
mock( PropertyKeyTokenHolder.class ), mock(LabelTokenHolder.class),
mock( RelationshipTypeTokenHolder.class), mock(PersistenceManager.class), mock(LockManager.class),
mock( SchemaWriteGuard.class), IndexingService.NO_MONITOR );
ds.init();
ds.start();
xaCon = ds.getXaConnection();
pStore = xaCon.getPropertyStore();
rtStore = xaCon.getRelationshipTypeStore();
}
private DependencyResolver dependencyResolverForNoIndexProvider( final NodeManager nodeManager )
{
return new DependencyResolver.Adapter()
{
private final LabelScanStoreProvider labelScanStoreProvider =
new LabelScanStoreProvider( new InMemoryLabelScanStore(), 10 );
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector ) throws IllegalArgumentException
{
if ( SchemaIndexProvider.class.isAssignableFrom( type ) )
{
return type.cast( SchemaIndexProvider.NO_INDEX_PROVIDER );
}
else if ( NodeManager.class.isAssignableFrom( type ) )
{
return type.cast( nodeManager );
}
else if ( LabelScanStoreProvider.class.isAssignableFrom( type ) )
{
return type.cast( labelScanStoreProvider );
}
throw new IllegalArgumentException( type.toString() );
}
};
}
private Adapter dependencyResolverForConfig( final Config config )
{
return new DependencyResolver.Adapter()
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return type.cast( config );
}
};
}
private Xid dummyXid;
private byte txCount = (byte) 0;
XAResource xaResource;
private void startTx() throws XAException
{
dummyXid = new XidImpl( new byte[txCount], new byte[txCount] );
txCount++;
xaResource = xaCon.getXaResource();
xaResource.start( dummyXid, XAResource.TMNOFLAGS );
}
private void commitTx() throws XAException
{
xaResource.end( dummyXid, XAResource.TMSUCCESS );
xaResource.commit( dummyXid, true );
// xaCon.clearAllTransactions();
}
@After
public void tearDownNeoStore()
{
for ( String file : new String[] {
"neo",
"neo.nodestore.db",
"neo.nodestore.db.labels",
"neo.propertystore.db",
"neo.propertystore.db.index",
"neo.propertystore.db.index.keys",
"neo.propertystore.db.strings",
"neo.propertystore.db.arrays",
"neo.relationshipstore.db",
"neo.relationshiptypestore.db",
"neo.relationshiptypestore.db.names",
"neo.schemastore.db",
} )
{
fs.get().deleteFile( file( file ) );
fs.get().deleteFile( file( file + ".id" ) );
}
File file = new File( "." );
for ( File nioFile : fs.get().listFiles( file ) )
{
if ( nioFile.getName().startsWith( "nioneo_logical.log" ) )
{
fs.get().deleteFile( nioFile );
}
}
}
private int index( String key )
{
Iterator<Token> itr = MyPropertyKeyToken.index( key ).iterator();
if ( !itr.hasNext() )
{
int id = (int) ds.nextId( PropertyKeyTokenRecord.class );
createDummyIndex( id, key );
xaCon.getTransaction().createPropertyKeyToken( key, id );
return id;
}
return itr.next().id();
}
@Test
public void testCreateNeoStore() throws Exception
{
initializeStores();
startTx();
// setup test population
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
DefinedProperty n1prop1 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string1" );
DefinedProperty n1prop2 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop2" ), 1 );
DefinedProperty n1prop3 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop3" ), true );
DefinedProperty n2prop1 = xaCon.getTransaction().nodeAddProperty(
node2, index( "prop1" ), "string2" );
DefinedProperty n2prop2 = xaCon.getTransaction().nodeAddProperty(
node2, index( "prop2" ), 2 );
DefinedProperty n2prop3 = xaCon.getTransaction().nodeAddProperty(
node2, index( "prop3" ), false );
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1, "relationshiptype1" );
int relType2 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType2, "relationshiptype2" );
long rel1 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel1, relType1, node1, node2 );
long rel2 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel2, relType2, node2, node1 );
DefinedProperty r1prop1 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop1" ), "string1" );
DefinedProperty r1prop2 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop2" ), 1 );
DefinedProperty r1prop3 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop3" ), true );
DefinedProperty r2prop1 = xaCon.getTransaction().relAddProperty(
rel2, index( "prop1" ), "string2" );
DefinedProperty r2prop2 = xaCon.getTransaction().relAddProperty(
rel2, index( "prop2" ), 2 );
DefinedProperty r2prop3 = xaCon.getTransaction().relAddProperty(
rel2, index( "prop3" ), false );
commitTx();
ds.stop();
initializeStores();
startTx();
// validate node
validateNodeRel1( node1, n1prop1, n1prop2, n1prop3, rel1, rel2,
relType1, relType2 );
validateNodeRel2( node2, n2prop1, n2prop2, n2prop3, rel1, rel2,
relType1, relType2 );
// validate rels
validateRel1( rel1, r1prop1, r1prop2, r1prop3, node1, node2, relType1 );
validateRel2( rel2, r2prop1, r2prop2, r2prop3, node2, node1, relType2 );
validateRelTypes( relType1, relType2 );
// validate reltypes
validateRelTypes( relType1, relType2 );
commitTx();
ds.stop();
initializeStores();
startTx();
// validate and delete rels
deleteRel1( rel1, r1prop1, r1prop2, r1prop3, node1, node2, relType1 );
deleteRel2( rel2, r2prop1, r2prop2, r2prop3, node2, node1, relType2 );
// validate and delete nodes
deleteNode1( node1, n1prop1, n1prop2, n1prop3 );
deleteNode2( node2, n2prop1, n2prop2, n2prop3 );
commitTx();
ds.stop();
initializeStores();
startTx();
assertNull( xaCon.getTransaction().nodeLoadLight( node1 ) );
assertNull( xaCon.getTransaction().nodeLoadLight( node2 ) );
testGetRels( new long[]{rel1, rel2} );
// testGetProps( neoStore, new int[] {
// n1prop1, n1prop2, n1prop3, n2prop1, n2prop2, n2prop3,
// r1prop1, r1prop2, r1prop3, r2prop1, r2prop2, r2prop3
// } );
long nodeIds[] = new long[10];
for ( int i = 0; i < 3; i++ )
{
nodeIds[i] = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( nodeIds[i] );
xaCon.getTransaction().nodeAddProperty( nodeIds[i],
index( "nisse" ), new Integer( 10 - i ) );
}
for ( int i = 0; i < 2; i++ )
{
long id = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( id, relType1, nodeIds[i], nodeIds[i + 1] );
xaCon.getTransaction().relDelete( id );
}
for ( int i = 0; i < 3; i++ )
{
AtomicLong pos = getPosition( xaCon, nodeIds[i] );
for ( RelationshipRecord rel : getMore( xaCon, nodeIds[i], pos ) )
{
xaCon.getTransaction().relDelete( rel.getId() );
}
xaCon.getTransaction().nodeDelete( nodeIds[i] );
}
commitTx();
ds.stop();
}
private AtomicLong getPosition( NeoStoreXaConnection xaCon, long node )
{
return new AtomicLong( xaCon.getTransaction().getRelationshipChainPosition( node ) );
}
private Iterable<RelationshipRecord> getMore( NeoStoreXaConnection xaCon, long node, AtomicLong pos )
{
Pair<Map<DirectionWrapper, Iterable<RelationshipRecord>>, Long> rels =
xaCon.getTransaction().getMoreRelationships( node, pos.get() );
pos.set( rels.other() );
List<Iterable<RelationshipRecord>> list = new ArrayList<>();
for ( Map.Entry<DirectionWrapper, Iterable<RelationshipRecord>> entry : rels.first().entrySet() )
{
list.add( entry.getValue() );
}
return new CombiningIterable<>( list );
}
private void validateNodeRel1( long node, DefinedProperty prop1,
DefinedProperty prop2, DefinedProperty prop3, long rel1, long rel2,
int relType1, int relType2 ) throws IOException
{
NodeRecord nodeRecord = xaCon.getTransaction().nodeLoadLight( node );
assertTrue( nodeRecord != null );
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
PropertyReceiver receiver = newPropertyReceiver( props );
xaCon.getTransaction().nodeLoadProperties( node, false, receiver );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "string1", data.value() );
xaCon.getTransaction().nodeChangeProperty( node, prop1.propertyKeyId(), "-string1" );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( 1, data.value() );
xaCon.getTransaction().nodeChangeProperty( node, prop2.propertyKeyId(), new Integer( -1 ) );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( true, data.value() );
xaCon.getTransaction().nodeChangeProperty( node, prop3.propertyKeyId(), false );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
count = 0;
AtomicLong pos = getPosition( xaCon, node );
while ( true )
{
Iterable<RelationshipRecord> relData = getMore( xaCon, node, pos );
if ( !relData.iterator().hasNext() )
{
break;
}
for ( RelationshipRecord rel : relData )
{
if ( rel.getId() == rel1 )
{
assertEquals( node, rel.getFirstNode() );
assertEquals( relType1, rel.getType() );
}
else if ( rel.getId() == rel2 )
{
assertEquals( node, rel.getSecondNode() );
assertEquals( relType2, rel.getType() );
}
else
{
throw new IOException();
}
count++;
}
}
assertEquals( 2, count );
}
private PropertyReceiver newPropertyReceiver( final ArrayMap<Integer, Pair<DefinedProperty, Long>> props )
{
return new PropertyReceiver()
{
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{
props.put( property.propertyKeyId(), Pair.of( property, propertyRecordId ) );
}
};
}
private void validateNodeRel2( long node, DefinedProperty prop1,
DefinedProperty prop2, DefinedProperty prop3,
long rel1, long rel2, int relType1, int relType2 ) throws IOException
{
NodeRecord nodeRecord = xaCon.getTransaction().nodeLoadLight( node );
assertTrue( nodeRecord != null );
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().nodeLoadProperties( node, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "string2", data.value() );
xaCon.getTransaction().nodeChangeProperty( node, prop1.propertyKeyId(), "-string2" );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( 2, data.value() );
xaCon.getTransaction().nodeChangeProperty( node, prop2.propertyKeyId(), new Integer( -2 ) );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( false, data.value() );
xaCon.getTransaction().nodeChangeProperty( node, prop3.propertyKeyId(), true );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
count = 0;
AtomicLong pos = getPosition( xaCon, node );
while ( true )
{
Iterable<RelationshipRecord> relData = getMore( xaCon, node, pos );
if ( !relData.iterator().hasNext() )
{
break;
}
for ( RelationshipRecord rel : relData )
{
if ( rel.getId() == rel1 )
{
assertEquals( node, rel.getSecondNode() );
assertEquals( relType1, rel.getType() );
}
else if ( rel.getId() == rel2 )
{
assertEquals( node, rel.getFirstNode() );
assertEquals( relType2, rel.getType() );
}
else
{
throw new IOException();
}
count++;
}
}
assertEquals( 2, count );
}
private void validateRel1( long rel, DefinedProperty prop1,
DefinedProperty prop2, DefinedProperty prop3,
long firstNode, long secondNode, int relType ) throws IOException
{
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().relLoadProperties( rel, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "string1", data.value() );
xaCon.getTransaction().relChangeProperty( rel, prop1.propertyKeyId(), "-string1" );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( 1, data.value() );
xaCon.getTransaction().relChangeProperty( rel, prop2.propertyKeyId(), new Integer( -1 ) );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( true, data.value() );
xaCon.getTransaction().relChangeProperty( rel, prop3.propertyKeyId(), false );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
RelationshipRecord relData = xaCon.getTransaction().relLoadLight( rel );
assertEquals( firstNode, relData.getFirstNode() );
assertEquals( secondNode, relData.getSecondNode() );
assertEquals( relType, relData.getType() );
}
private void validateRel2( long rel, DefinedProperty prop1,
DefinedProperty prop2, DefinedProperty prop3,
long firstNode, long secondNode, int relType ) throws IOException
{
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().relLoadProperties( rel, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "string2", data.value() );
xaCon.getTransaction().relChangeProperty( rel, prop1.propertyKeyId(), "-string2" );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( 2, data.value() );
xaCon.getTransaction().relChangeProperty( rel, prop2.propertyKeyId(), new Integer( -2 ) );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( false, data.value() );
xaCon.getTransaction().relChangeProperty( rel, prop3.propertyKeyId(), true );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
RelationshipRecord relData = xaCon.getTransaction().relLoadLight( rel );
assertEquals( firstNode, relData.getFirstNode() );
assertEquals( secondNode, relData.getSecondNode() );
assertEquals( relType, relData.getType() );
}
private void validateRelTypes( int relType1, int relType2 )
throws IOException
{
Token data = rtStore.getToken( relType1 );
assertEquals( relType1, data.id() );
assertEquals( "relationshiptype1", data.name() );
data = rtStore.getToken( relType2 );
assertEquals( relType2, data.id() );
assertEquals( "relationshiptype2", data.name() );
Token allData[] = rtStore.getTokens( Integer.MAX_VALUE );
assertEquals( 2, allData.length );
for ( int i = 0; i < 2; i++ )
{
if ( allData[i].id() == relType1 )
{
assertEquals( relType1, allData[i].id() );
assertEquals( "relationshiptype1", allData[i].name() );
}
else if ( allData[i].id() == relType2 )
{
assertEquals( relType2, allData[i].id() );
assertEquals( "relationshiptype2", allData[i].name() );
}
else
{
throw new IOException();
}
}
}
private void deleteRel1( long rel, DefinedProperty prop1, DefinedProperty prop2,
DefinedProperty prop3, long firstNode, long secondNode, int relType ) throws IOException
{
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().relLoadProperties( rel, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "-string1", data.value() );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( -1, data.value() );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( false, data.value() );
xaCon.getTransaction().relRemoveProperty( rel, prop3.propertyKeyId() );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
CountingPropertyReceiver propertyCounter = new CountingPropertyReceiver();
xaCon.getTransaction().relLoadProperties( rel, false, propertyCounter );
assertEquals( 3, propertyCounter.count );
RelationshipRecord relData = xaCon.getTransaction().relLoadLight( rel );
assertEquals( firstNode, relData.getFirstNode() );
assertEquals( secondNode, relData.getSecondNode() );
assertEquals( relType, relData.getType() );
xaCon.getTransaction().relDelete( rel );
AtomicLong firstPos = getPosition( xaCon, firstNode );
Iterator<RelationshipRecord> first = getMore( xaCon, firstNode, firstPos ).iterator();
first.next();
AtomicLong secondPos = getPosition( xaCon, secondNode );
Iterator<RelationshipRecord> second = getMore( xaCon, secondNode, secondPos ).iterator();
second.next();
assertTrue( first.hasNext() );
assertTrue( second.hasNext() );
}
private static class CountingPropertyReceiver implements PropertyReceiver
{
private int count;
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{
count++;
}
}
private void deleteRel2( long rel, DefinedProperty prop1, DefinedProperty prop2,
DefinedProperty prop3, long firstNode, long secondNode, int relType ) throws IOException
{
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().relLoadProperties( rel, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "-string2", data.value() );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( -2, data.value() );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( true, data.value() );
xaCon.getTransaction().relRemoveProperty( rel, prop3.propertyKeyId() );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
CountingPropertyReceiver propertyCounter = new CountingPropertyReceiver();
xaCon.getTransaction().relLoadProperties( rel, false, propertyCounter );
assertEquals( 3, propertyCounter.count );
RelationshipRecord relData = xaCon.getTransaction().relLoadLight( rel );
assertEquals( firstNode, relData.getFirstNode() );
assertEquals( secondNode, relData.getSecondNode() );
assertEquals( relType, relData.getType() );
xaCon.getTransaction().relDelete( rel );
AtomicLong firstPos = getPosition( xaCon, firstNode );
Iterator<RelationshipRecord> first = getMore( xaCon, firstNode, firstPos ).iterator();
AtomicLong secondPos = getPosition( xaCon, secondNode );
Iterator<RelationshipRecord> second = getMore( xaCon, secondNode, secondPos ).iterator();
assertTrue( first.hasNext() );
assertTrue( second.hasNext() );
}
private void deleteNode1( long node, DefinedProperty prop1,
DefinedProperty prop2, DefinedProperty prop3 )
throws IOException
{
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().nodeLoadProperties( node, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "-string1", data.value() );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( -1, data.value() );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( false, data.value() );
xaCon.getTransaction().nodeRemoveProperty( node, prop3.propertyKeyId() );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
CountingPropertyReceiver propertyCounter = new CountingPropertyReceiver();
xaCon.getTransaction().nodeLoadProperties( node, false, propertyCounter );
assertEquals( 3, propertyCounter.count );
AtomicLong pos = getPosition( xaCon, node );
Iterator<RelationshipRecord> rels = getMore( xaCon, node, pos ).iterator();
assertTrue( rels.hasNext() );
xaCon.getTransaction().nodeDelete( node );
}
private void deleteNode2( long node, DefinedProperty prop1,
DefinedProperty prop2, DefinedProperty prop3 )
throws IOException
{
ArrayMap<Integer, Pair<DefinedProperty,Long>> props = new ArrayMap<>();
xaCon.getTransaction().nodeLoadProperties( node, false, newPropertyReceiver( props ) );
int count = 0;
for ( int keyId : props.keySet() )
{
long id = props.get( keyId ).other();
PropertyRecord record = pStore.getRecord( id );
PropertyBlock block = record.getPropertyBlock( props.get( keyId ).first().propertyKeyId() );
DefinedProperty data = block.newPropertyData( pStore );
if ( data.propertyKeyId() == prop1.propertyKeyId() )
{
assertEquals( "prop1", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( "-string2", data.value() );
}
else if ( data.propertyKeyId() == prop2.propertyKeyId() )
{
assertEquals( "prop2", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( -2, data.value() );
}
else if ( data.propertyKeyId() == prop3.propertyKeyId() )
{
assertEquals( "prop3", MyPropertyKeyToken.getIndexFor(
keyId ).name() );
assertEquals( true, data.value() );
xaCon.getTransaction().nodeRemoveProperty( node, prop3.propertyKeyId() );
}
else
{
throw new IOException();
}
count++;
}
assertEquals( 3, count );
CountingPropertyReceiver propertyCounter = new CountingPropertyReceiver();
xaCon.getTransaction().nodeLoadProperties( node, false, propertyCounter );
assertEquals( 3, propertyCounter.count );
AtomicLong pos = getPosition( xaCon, node );
Iterator<RelationshipRecord> rels = getMore( xaCon, node, pos ).iterator();
assertTrue( rels.hasNext() );
xaCon.getTransaction().nodeDelete( node );
}
private void testGetRels( long relIds[] )
{
for ( long relId : relIds )
{
assertEquals( null, xaCon.getTransaction().relLoadLight( relId ) );
}
}
@Test
public void testRels1() throws Exception
{
initializeStores();
startTx();
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1, "relationshiptype1" );
long nodeIds[] = new long[3];
for ( int i = 0; i < 3; i++ )
{
nodeIds[i] = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( nodeIds[i] );
xaCon.getTransaction().nodeAddProperty( nodeIds[i],
index( "nisse" ), new Integer( 10 - i ) );
}
for ( int i = 0; i < 2; i++ )
{
xaCon.getTransaction().relationshipCreate( ds.nextId( Relationship.class ),
relType1, nodeIds[i], nodeIds[i + 1] );
}
commitTx();
startTx();
for ( int i = 0; i < 3; i += 2 )
{
AtomicLong pos = getPosition( xaCon, nodeIds[i] );
for ( RelationshipRecord rel : getMore( xaCon, nodeIds[i], pos ) )
{
xaCon.getTransaction().relDelete( rel.getId() );
}
xaCon.getTransaction().nodeDelete( nodeIds[i] );
}
commitTx();
ds.stop();
}
@Test
@Ignore
public void testRels2() throws Exception
{
initializeStores();
startTx();
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1, "relationshiptype1" );
long nodeIds[] = new long[3];
for ( int i = 0; i < 3; i++ )
{
nodeIds[i] = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( nodeIds[i] );
xaCon.getTransaction().nodeAddProperty( nodeIds[i],
index( "nisse" ), new Integer( 10 - i ) );
}
for ( int i = 0; i < 2; i++ )
{
xaCon.getTransaction().relationshipCreate( ds.nextId( Relationship.class ),
relType1, nodeIds[i], nodeIds[i + 1] );
}
xaCon.getTransaction().relationshipCreate( ds.nextId( Relationship.class ),
relType1, nodeIds[0], nodeIds[2] );
commitTx();
startTx();
for ( int i = 0; i < 3; i++ )
{
AtomicLong pos = getPosition( xaCon, nodeIds[i] );
for ( RelationshipRecord rel : getMore( xaCon, nodeIds[i], pos ) )
{
xaCon.getTransaction().relDelete( rel.getId() );
}
xaCon.getTransaction().nodeDelete( nodeIds[i] );
}
commitTx();
ds.stop();
}
@Test
public void testRels3() throws Exception
{
// test linked list stuff during relationship delete
initializeStores();
startTx();
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1, "relationshiptype1" );
long nodeIds[] = new long[8];
for ( int i = 0; i < nodeIds.length; i++ )
{
nodeIds[i] = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( nodeIds[i] );
}
for ( int i = 0; i < nodeIds.length / 2; i++ )
{
xaCon.getTransaction().relationshipCreate( ds.nextId( Relationship.class ),
relType1, nodeIds[i], nodeIds[i * 2] );
}
long rel5 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel5, relType1, nodeIds[0], nodeIds[5] );
long rel2 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel2, relType1, nodeIds[1], nodeIds[2] );
long rel3 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel3, relType1, nodeIds[1], nodeIds[3] );
long rel6 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel6, relType1, nodeIds[1], nodeIds[6] );
long rel1 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel1, relType1, nodeIds[0], nodeIds[1] );
long rel4 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel4, relType1, nodeIds[0], nodeIds[4] );
long rel7 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel7, relType1, nodeIds[0], nodeIds[7] );
commitTx();
startTx();
xaCon.getTransaction().relDelete( rel7 );
xaCon.getTransaction().relDelete( rel4 );
xaCon.getTransaction().relDelete( rel1 );
xaCon.getTransaction().relDelete( rel6 );
xaCon.getTransaction().relDelete( rel3 );
xaCon.getTransaction().relDelete( rel2 );
xaCon.getTransaction().relDelete( rel5 );
commitTx();
ds.stop();
}
@Test
public void testProps1() throws Exception
{
initializeStores();
startTx();
long nodeId = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( nodeId );
pStore.nextId();
DefinedProperty prop = xaCon.getTransaction().nodeAddProperty(
nodeId, index( "nisse" ),
new Integer( 10 ) );
commitTx();
ds.stop();
initializeStores();
startTx();
xaCon.getTransaction().nodeChangeProperty( nodeId, prop.propertyKeyId(), new Integer( 5 ) );
xaCon.getTransaction().nodeRemoveProperty( nodeId, prop.propertyKeyId() );
xaCon.getTransaction().nodeDelete( nodeId );
commitTx();
ds.stop();
}
@Test
public void testSetBlockSize() throws Exception
{
targetDirectory.cleanup();
Config config = new Config( MapUtil.stringMap( "string_block_size", "62", "array_block_size", "302" ),
GraphDatabaseSettings.class );
StoreFactory sf = new StoreFactory( config, new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(),
fs.get(), StringLogger.DEV_NULL, null );
sf.createNeoStore( file( "neo" ) ).close();
initializeStores();
assertEquals( 62 + AbstractDynamicStore.BLOCK_HEADER_SIZE,
pStore.getStringBlockSize() );
assertEquals( 302 + AbstractDynamicStore.BLOCK_HEADER_SIZE,
pStore.getArrayBlockSize() );
ds.stop();
}
@Test
public void setVersion() throws Exception
{
String storeDir = "target/test-data/set-version";
new TestGraphDatabaseFactory().setFileSystem( fs.get() ).newImpermanentDatabase( storeDir ).shutdown();
assertEquals( 1, NeoStore.setVersion( fs.get(), new File( storeDir ), 10 ) );
assertEquals( 10, NeoStore.setVersion( fs.get(), new File( storeDir ), 12 ) );
StoreFactory sf = new StoreFactory( new Config( new HashMap<String, String>(), GraphDatabaseSettings.class ),
new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(), fs.get(), StringLogger.DEV_NULL, null );
NeoStore neoStore = sf.newNeoStore( new File( storeDir, NeoStore.DEFAULT_NAME ) );
assertEquals( 12, neoStore.getVersion() );
neoStore.close();
}
@Test
public void testSetLatestConstraintTx() throws Exception
{
// given
new GraphDatabaseFactory().newEmbeddedDatabase( testDir.absolutePath() ).shutdown();
StoreFactory sf = new StoreFactory( new Config( new HashMap<String, String>(), GraphDatabaseSettings.class ),
new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(), new DefaultFileSystemAbstraction(),
StringLogger.DEV_NULL, null );
// when
NeoStore neoStore = sf.newNeoStore( new File( testDir.absolutePath(), NeoStore.DEFAULT_NAME ) );
// then the default is 0
assertEquals( 0l, neoStore.getLatestConstraintIntroducingTx() );
// when
neoStore.setLatestConstraintIntroducingTx( 10l );
// then
assertEquals( 10l, neoStore.getLatestConstraintIntroducingTx() );
// when
neoStore.flushAll();
neoStore.close();
neoStore = sf.newNeoStore( new File( testDir.absolutePath(), NeoStore.DEFAULT_NAME ) );
// then the value should have been stored
assertEquals( 10l, neoStore.getLatestConstraintIntroducingTx() );
neoStore.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestNeoStore.java
|
1,247
|
public class TestLongerShortString
{
@Test
public void testMasks() throws Exception {
assertEquals(0,1 & LongerShortString.invertedBitMask(LongerShortString.NUMERICAL));
assertEquals(0,2 & LongerShortString.invertedBitMask(LongerShortString.DATE));
assertEquals(LongerShortString.NUMERICAL.bitMask(),3 & LongerShortString.invertedBitMask(LongerShortString.DATE));
assertEquals(0, (LongerShortString.NUMERICAL.bitMask()|LongerShortString.NUMERICAL.bitMask()) & LongerShortString.invertedBitMask(LongerShortString.NUMERICAL, LongerShortString.DATE));
}
@Test
public void canEncodeEmptyString()
{
assertCanEncodeAndDecodeToSame( "" );
}
@Test
public void canEncodeNumerical()
{
assertCanEncodeAndDecodeToSame( "12345678901234567890" );
assertCanEncodeAndDecodeToSame( "12345678901234567890 +-.,' 321,3" );
}
@Test
public void canEncodeDate() throws Exception
{
assertCanEncodeAndDecodeToSame( "2011-10-10 12:45:22+0200" );
assertCanEncodeAndDecodeToSame( "2011/10/10 12:45:22+0200" );
}
@Test
public void testRandomStrings() throws Exception
{
for ( int i = 0; i < 1000; i++ )
{
for ( Charset charset : Charset.values() )
{
List<String> list = TestShortString.randomStrings( 100, charset, 30 );
for ( String string : list )
{
PropertyBlock record = new PropertyBlock();
if ( LongerShortString.encode( 10, string, record, PropertyStore.DEFAULT_PAYLOAD_SIZE ) )
{
assertEquals( string, LongerShortString.decode( record ) );
}
}
}
}
}
@Test
public void canEncodeEmailAndUri() throws Exception
{
assertCanEncodeAndDecodeToSame( "mattias@neotechnology.com" );
assertCanEncodeAndDecodeToSame( "http://domain:7474/" );
}
@Test
public void canEncodeLower() throws Exception
{
assertCanEncodeAndDecodeToSame( "folder/generators/templates/controller.ext" );
assertCanEncodeAndDecodeToSame( "folder/generators/templates/controller.extr" );
assertCannotEncode( "folder/generators/templates/controller.extra" );
}
@Test
public void canEncodeLowerHex() throws Exception
{
assertCanEncodeAndDecodeToSame( "da39a3ee5e6b4b0d3255bfef95601890afd80709" ); // sha1hex('') len=40
assertCanEncodeAndDecodeToSame( "0123456789" + "abcdefabcd" + "0a0b0c0d0e" + "1a1b1c1d1e" + "f9e8d7c6b5" + "a4f3" ); // len=54
assertCannotEncode( "da39a3ee5e6b4b0d3255bfef95601890afd80709" + "0123456789" + "abcde" ); // len=55
// test not failing on long illegal hex
assertCannotEncode( "aaaaaaaaaa" + "bbbbbbbbbb" + "cccccccccc" + "dddddddddd" + "eeeeeeeeee" + "x");
}
@Test
public void canEncodeUpperHex() throws Exception
{
assertCanEncodeAndDecodeToSame( "DA39A3EE5E6B4B0D3255BFEF95601890AFD80709" ); // sha1HEX('') len=40
assertCanEncodeAndDecodeToSame( "0123456789" + "ABCDEFABCD" + "0A0B0C0D0E" + "1A1B1C1D1E" + "F9E8D7C6B5" + "A4F3" ); // len=54
assertCannotEncode( "DA39A3EE5E6B4B0D3255BFEF95601890AFD80709" + "0123456789" + "ABCDE" ); // len=55
// test not failing on long illegal HEX
assertCannotEncode( "AAAAAAAAAA" + "BBBBBBBBBB" + "CCCCCCCCCC" + "DDDDDDDDDD" + "EEEEEEEEEE" + "X");
}
@Test
public void checkMarginalFit() throws Exception
{
assertCanEncodeAndDecodeToSame( "^aaaaaaaaaaaaaaaaaaaaaaaaaa" );
assertCannotEncode( "^aaaaaaaaaaaaaaaaaaaaaaaaaaa" );
}
@Test
public void canEncodeUUIDString() throws Exception
{
assertCanEncodeAndDecodeToSame( "81fe144f-484b-4a34-8e36-17a021540318" );
}
private void assertCanEncodeAndDecodeToSame( String string )
{
assertCanEncodeAndDecodeToSame( string, PropertyStore.DEFAULT_PAYLOAD_SIZE );
}
private void assertCanEncodeAndDecodeToSame( String string, int payloadSize )
{
PropertyBlock target = new PropertyBlock();
assertTrue( LongerShortString.encode( 0, string, target, payloadSize ) );
assertEquals( string, LongerShortString.decode( target ) );
}
private void assertCannotEncode( String string )
{
assertCannotEncode( string, PropertyStore.DEFAULT_PAYLOAD_SIZE );
}
private void assertCannotEncode( String string, int payloadSize )
{
assertFalse( LongerShortString.encode( 0, string, new PropertyBlock(),
payloadSize ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestLongerShortString.java
|
1,248
|
public class TestIdGenerator
{
@Rule public EphemeralFileSystemRule fsRule = new EphemeralFileSystemRule();
private EphemeralFileSystemAbstraction fs;
@Before
public void doBefore()
{
fs = fsRule.get();
}
private void deleteIdGeneratorFile()
{
fs.deleteFile( idGeneratorFile() );
}
private File path()
{
String path = AbstractNeo4jTestCase.getStorePath( "xatest" );
File file = new File( path );
fs.mkdirs( file );
return file;
}
private File file( String name )
{
return new File( path(), name );
}
private File idGeneratorFile()
{
return file( "testIdGenerator.id" );
}
@Test
public void testCreateIdGenerator() throws IOException
{
try
{
IdGeneratorImpl.createGenerator( fs, null );
fail( "Null filename should throw exception" );
}
catch ( IllegalArgumentException e )
{
} // good
try
{
IdGeneratorImpl.createGenerator( null, idGeneratorFile() );
fail( "Null filesystem should throw exception" );
}
catch ( IllegalArgumentException e )
{
} // good
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
new IdGeneratorImpl( fs, idGeneratorFile(), 0, 100, false, 0 ).close();
fail( "Zero grab size should throw exception" );
}
catch ( IllegalArgumentException e )
{
} // good
try
{
new IdGeneratorImpl( fs, new File( "testIdGenerator.id" ), -1, 100, false, 0 ).close();
fail( "Negative grab size should throw exception" );
}
catch ( IllegalArgumentException e )
{
} // good
try
{
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1008, 1000, false, 0 );
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
fail( "Creating a id generator with existing file name " + "should throw exception" );
}
catch ( IllegalStateException e )
{
} // good
closeIdGenerator( idGenerator );
// verify that id generator is ok
StoreChannel fileChannel = fs.open( idGeneratorFile(), "rw" );
ByteBuffer buffer = ByteBuffer.allocate( 9 );
assertEquals( 9, fileChannel.read( buffer ) );
buffer.flip();
assertEquals( (byte) 0, buffer.get() );
assertEquals( 0l, buffer.getLong() );
buffer.flip();
int readCount = fileChannel.read( buffer );
if ( readCount != -1 && readCount != 0 )
{
fail( "Id generator header not ok read 9 + " + readCount + " bytes from file" );
}
fileChannel.close();
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
private void closeIdGenerator( IdGenerator idGenerator )
{
idGenerator.close();
}
@Test
public void testStickyGenerator()
{
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGen = new IdGeneratorImpl( fs, idGeneratorFile(), 3, 1000, false, 0 );
try
{
new IdGeneratorImpl( fs, idGeneratorFile(), 3, 1000, false, 0 );
fail( "Opening sticky id generator should throw exception" );
}
catch ( StoreFailureException e )
{ // good
}
closeIdGenerator( idGen );
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void testNextId()
{
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 3, 1000, false, 0 );
for ( long i = 0; i < 7; i++ )
{
assertEquals( i, idGenerator.nextId() );
}
idGenerator.freeId( 1 );
idGenerator.freeId( 3 );
idGenerator.freeId( 5 );
assertEquals( 7l, idGenerator.nextId() );
idGenerator.freeId( 6 );
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 5, 1000, false, 0 );
idGenerator.freeId( 2 );
idGenerator.freeId( 4 );
assertEquals( 1l, idGenerator.nextId() );
idGenerator.freeId( 1 );
assertEquals( 3l, idGenerator.nextId() );
idGenerator.freeId( 3 );
assertEquals( 5l, idGenerator.nextId() );
idGenerator.freeId( 5 );
assertEquals( 6l, idGenerator.nextId() );
idGenerator.freeId( 6 );
assertEquals( 8l, idGenerator.nextId() );
idGenerator.freeId( 8 );
assertEquals( 9l, idGenerator.nextId() );
idGenerator.freeId( 9 );
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 3, 1000, false, 0 );
assertEquals( 2l, idGenerator.nextId() );
assertEquals( 4l, idGenerator.nextId() );
assertEquals( 1l, idGenerator.nextId() );
assertEquals( 3l, idGenerator.nextId() );
assertEquals( 5l, idGenerator.nextId() );
assertEquals( 6l, idGenerator.nextId() );
assertEquals( 8l, idGenerator.nextId() );
assertEquals( 9l, idGenerator.nextId() );
assertEquals( 10l, idGenerator.nextId() );
assertEquals( 11l, idGenerator.nextId() );
closeIdGenerator( idGenerator );
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void testFreeId()
{
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 3, 1000, false, 0 );
for ( long i = 0; i < 7; i++ )
{
assertEquals( i, idGenerator.nextId() );
}
try
{
idGenerator.freeId( -1 );
fail( "Negative id should throw exception" );
}
catch ( IllegalArgumentException e )
{ // good
}
try
{
idGenerator.freeId( 7 );
fail( "Greater id than ever returned should throw exception" );
}
catch ( IllegalArgumentException e )
{ // good
}
for ( int i = 0; i < 7; i++ )
{
idGenerator.freeId( i );
}
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 2, 1000, false, 0 );
assertEquals( 0l, idGenerator.nextId() );
assertEquals( 1l, idGenerator.nextId() );
assertEquals( 2l, idGenerator.nextId() );
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 30, 1000, false, 0 );
// Since idGenerator.nextId() (which returns 2) will read ids 2 and
// 3, then
// 3 will be written at the end during the next close. And hence
// will be returned
// after 6.
assertEquals( 4l, idGenerator.nextId() );
assertEquals( 5l, idGenerator.nextId() );
assertEquals( 6l, idGenerator.nextId() );
assertEquals( 3l, idGenerator.nextId() );
closeIdGenerator( idGenerator );
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void testClose()
{
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 2, 1000, false, 0 );
closeIdGenerator( idGenerator );
try
{
idGenerator.nextId();
fail( "nextId after close should throw exception" );
}
catch ( IllegalStateException e )
{ // good
}
try
{
idGenerator.freeId( 0 );
fail( "freeId after close should throw exception" );
}
catch ( IllegalStateException e )
{ // good
}
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 2, 1000, false, 0 );
assertEquals( 0l, idGenerator.nextId() );
assertEquals( 1l, idGenerator.nextId() );
assertEquals( 2l, idGenerator.nextId() );
closeIdGenerator( idGenerator );
try
{
idGenerator.nextId();
fail( "nextId after close should throw exception" );
}
catch ( IllegalStateException e )
{ // good
}
try
{
idGenerator.freeId( 0 );
fail( "freeId after close should throw exception" );
}
catch ( IllegalStateException e )
{ // good
}
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void testOddAndEvenWorstCase()
{
int capacity = 1024 * 8 + 1;
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 128, capacity * 2, false, 0 );
for ( int i = 0; i < capacity; i++ )
{
idGenerator.nextId();
}
Map<Long, Object> freedIds = new HashMap<Long, Object>();
for ( long i = 1; i < capacity; i += 2 )
{
idGenerator.freeId( i );
freedIds.put( i, this );
}
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 2000, capacity * 2, false, 0 );
long oldId = -1;
for ( int i = 0; i < capacity - 1; i += 2 )
{
long id = idGenerator.nextId();
if ( freedIds.remove( id ) == null )
{
throw new RuntimeException( "Id=" + id + " prevId=" + oldId + " list.size()=" + freedIds.size() );
}
oldId = id;
}
assertTrue( freedIds.values().size() == 0 );
closeIdGenerator( idGenerator );
}
finally
{
File file = idGeneratorFile();
if ( fs.fileExists( file ) )
{
assertTrue( fs.deleteFile( file ) );
}
}
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 128, capacity * 2, false, 0 );
for ( int i = 0; i < capacity; i++ )
{
idGenerator.nextId();
}
Map<Long, Object> freedIds = new HashMap<Long, Object>();
for ( long i = 0; i < capacity; i += 2 )
{
idGenerator.freeId( i );
freedIds.put( i, this );
}
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 2000, capacity * 2, false, 0 );
for ( int i = 0; i < capacity; i += 2 )
{
assertEquals( this, freedIds.remove( idGenerator.nextId() ) );
}
assertEquals( 0, freedIds.values().size() );
closeIdGenerator( idGenerator );
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void testRandomTest()
{
int numberOfCloses = 0;
java.util.Random random = new java.util.Random( System.currentTimeMillis() );
int capacity = random.nextInt( 1024 ) + 1024;
int grabSize = random.nextInt( 128 ) + 128;
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), grabSize, capacity * 2, false, 0 );
List<Long> idsTaken = new ArrayList<Long>();
float releaseIndex = 0.25f;
float closeIndex = 0.05f;
int currentIdCount = 0;
try
{
while ( currentIdCount < capacity )
{
float rIndex = random.nextFloat();
if ( rIndex < releaseIndex && currentIdCount > 0 )
{
idGenerator.freeId( idsTaken.remove( random.nextInt( currentIdCount ) ).intValue() );
currentIdCount--;
}
else
{
idsTaken.add( idGenerator.nextId() );
currentIdCount++;
}
if ( rIndex > (1.0f - closeIndex) || rIndex < closeIndex )
{
closeIdGenerator( idGenerator );
grabSize = random.nextInt( 128 ) + 128;
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), grabSize, capacity * 2, false, 0 );
numberOfCloses++;
}
}
closeIdGenerator( idGenerator );
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void testUnsignedId()
{
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1,
IdType.PROPERTY_KEY_TOKEN.getMaxValue(), false, 0 );
idGenerator.setHighId( IdType.PROPERTY_KEY_TOKEN.getMaxValue() - 1 );
long id = idGenerator.nextId();
assertEquals( IdType.PROPERTY_KEY_TOKEN.getMaxValue() - 1, id );
idGenerator.freeId( id );
try
{
idGenerator.nextId();
fail( "Shouldn't be able to get next ID" );
}
catch ( StoreFailureException e )
{ // good, capacity exceeded
}
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1, IdType.PROPERTY_KEY_TOKEN.getMaxValue(), false, 0 );
assertEquals( IdType.PROPERTY_KEY_TOKEN.getMaxValue() + 1, idGenerator.getHighId() );
id = idGenerator.nextId();
assertEquals( IdType.PROPERTY_KEY_TOKEN.getMaxValue() - 1, id );
try
{
idGenerator.nextId();
}
catch ( StoreFailureException e )
{ // good, capacity exceeded
}
closeIdGenerator( idGenerator );
}
finally
{
File file = idGeneratorFile();
if ( file.exists() )
{
assertTrue( file.delete() );
}
}
}
@Test
public void makeSureIdCapacityCannotBeExceeded() throws Exception
{
for ( IdType type : IdType.values() )
{
makeSureIdCapacityCannotBeExceeded( type );
}
}
private void makeSureIdCapacityCannotBeExceeded( IdType type )
{
deleteIdGeneratorFile();
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
long maxValue = type.getMaxValue();
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1, maxValue, false, 0 );
long id = maxValue - 2;
idGenerator.setHighId( id );
assertEquals( id, idGenerator.nextId() );
assertEquals( id + 1, idGenerator.nextId() );
if ( maxValue != (long) Math.pow( 2, 32 ) - 1 )
{
// This is for the special -1 value
assertEquals( id + 2, idGenerator.nextId() );
}
try
{
idGenerator.nextId();
fail( "Id capacity shouldn't be able to be exceeded for " + type );
}
catch ( StoreFailureException e )
{ // Good
}
closeIdGenerator( idGenerator );
}
@Test
public void makeSureMagicMinusOneIsntReturnedFromNodeIdGenerator() throws Exception
{
makeSureMagicMinusOneIsSkipped( IdType.NODE );
makeSureMagicMinusOneIsSkipped( IdType.RELATIONSHIP );
makeSureMagicMinusOneIsSkipped( IdType.PROPERTY );
}
private void makeSureMagicMinusOneIsSkipped( IdType type )
{
deleteIdGeneratorFile();
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1, type.getMaxValue(), false, 0 );
long id = (long) Math.pow( 2, 32 ) - 3;
idGenerator.setHighId( id );
assertEquals( id, idGenerator.nextId() );
assertEquals( id + 1, idGenerator.nextId() );
// Here we make sure that id+2 (integer -1) is skipped
assertEquals( id + 3, idGenerator.nextId() );
assertEquals( id + 4, idGenerator.nextId() );
assertEquals( id + 5, idGenerator.nextId() );
closeIdGenerator( idGenerator );
}
@Test
public void makeSureMagicMinusOneCannotBeReturnedEvenIfFreed() throws Exception
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGenerator idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1, IdType.NODE.getMaxValue(), false, 0 );
long magicMinusOne = (long) Math.pow( 2, 32 ) - 1;
idGenerator.setHighId( magicMinusOne );
assertEquals( magicMinusOne + 1, idGenerator.nextId() );
idGenerator.freeId( magicMinusOne - 1 );
idGenerator.freeId( magicMinusOne );
closeIdGenerator( idGenerator );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 1, IdType.NODE.getMaxValue(), false, 0 );
assertEquals( magicMinusOne - 1, idGenerator.nextId() );
assertEquals( magicMinusOne + 2, idGenerator.nextId() );
closeIdGenerator( idGenerator );
}
@Test
public void commandsGetWrittenOnceSoThatFreedIdsGetsAddedOnlyOnce() throws Exception
{
String storeDir = "target/var/free-id-once";
deleteRecursively( new File( storeDir ) );
GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fs ).newImpermanentDatabase( storeDir );
RelationshipType type = withName( "SOME_TYPE" );
// This transaction will, if some commands may be executed more than
// once,
// add the freed ids to the defrag list more than once - making the id
// generator
// return the same id more than once during the next session.
Set<Long> createdNodeIds = new HashSet<Long>();
Set<Long> createdRelationshipIds = new HashSet<Long>();
Transaction tx = db.beginTx();
Node commonNode = db.createNode();
for ( int i = 0; i < 20; i++ )
{
Node otherNode = db.createNode();
Relationship relationship = commonNode.createRelationshipTo( otherNode, type );
if ( i % 5 == 0 )
{
otherNode.delete();
relationship.delete();
}
else
{
createdNodeIds.add( otherNode.getId() );
createdRelationshipIds.add( relationship.getId() );
}
}
tx.success();
tx.finish();
db.shutdown();
// After a clean shutdown, create new nodes and relationships and see so
// that
// all ids are unique.
db = new TestGraphDatabaseFactory().setFileSystem( fs ).newImpermanentDatabase( storeDir );
tx = db.beginTx();
commonNode = db.getNodeById( commonNode.getId() );
for ( int i = 0; i < 100; i++ )
{
Node otherNode = db.createNode();
if ( !createdNodeIds.add( otherNode.getId() ) )
{
fail( "Managed to create a node with an id that was already in use" );
}
Relationship relationship = commonNode.createRelationshipTo( otherNode, type );
if ( !createdRelationshipIds.add( relationship.getId() ) )
{
fail( "Managed to create a relationship with an id that was already in use" );
}
}
tx.success();
tx.finish();
// Verify by loading everything from scratch
((GraphDatabaseAPI) db).getDependencyResolver().resolveDependency( NodeManager.class ).clearCache();
tx = db.beginTx();
for ( Node node : GlobalGraphOperations.at( db ).getAllNodes() )
{
lastOrNull( node.getRelationships() );
}
tx.finish();
db.shutdown();
}
@Test
public void delete() throws Exception
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
IdGeneratorImpl idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 10, 1000, false, 0 );
long id = idGenerator.nextId();
idGenerator.nextId();
idGenerator.freeId( id );
idGenerator.close();
idGenerator.delete();
assertFalse( idGeneratorFile().exists() );
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), 10, 1000, false, 0 );
assertEquals( id, idGenerator.nextId() );
idGenerator.close();
}
@Test
public void testChurnIdBatchAtGrabsize()
{
IdGenerator idGenerator = null;
try
{
IdGeneratorImpl.createGenerator( fs, idGeneratorFile() );
final int grabSize = 10, rounds = 10;
idGenerator = new IdGeneratorImpl( fs, idGeneratorFile(), grabSize, 1000, true, 0 );
for ( int i = 0; i < rounds; i++ )
{
Set<Long> ids = new HashSet<Long>();
for ( int j = 0; j < grabSize; j++ )
ids.add( idGenerator.nextId() );
for ( Long id : ids )
idGenerator.freeId( id );
}
long newId = idGenerator.nextId();
assertTrue( "Expected IDs to be reused (" + grabSize + " at a time). high ID was: " + newId,
newId < grabSize * rounds );
}
finally
{
if ( idGenerator != null )
closeIdGenerator( idGenerator );
File file = idGeneratorFile();
if ( file.exists() )
assertTrue( file.delete() );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestIdGenerator.java
|
1,249
|
public class TestGrowingFileMemoryMapping
{
private static final int MEGA = 1024 * 1024;
@Test
public void shouldGrowAFileWhileContinuingToMemoryMapNewRegions() throws Exception
{
// don't run on windows because memory mapping doesn't work properly there
assumeTrue( !osIsWindows() );
// given
int NUMBER_OF_RECORDS = 1000000;
File storeDir = TargetDirectory.forTest( getClass() ).makeGraphDbDir();
Config config = new Config( stringMap(
nodestore_mapped_memory_size.name(), mmapSize( NUMBER_OF_RECORDS, NodeStore.RECORD_SIZE ),
NodeStore.Configuration.use_memory_mapped_buffers.name(), "true",
NodeStore.Configuration.store_dir.name(), storeDir.getPath() ), NodeStore.Configuration.class );
DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory();
StoreFactory storeFactory = new StoreFactory( config, idGeneratorFactory,
new DefaultWindowPoolFactory(), new DefaultFileSystemAbstraction(), StringLogger.DEV_NULL,
new DefaultTxHook() );
File fileName = new File( storeDir, NeoStore.DEFAULT_NAME + ".nodestore.db" );
storeFactory.createEmptyStore( fileName, storeFactory.buildTypeDescriptorAndVersion(
NodeStore.TYPE_DESCRIPTOR ) );
NodeStore nodeStore = new NodeStore( fileName, config, idGeneratorFactory, new DefaultWindowPoolFactory(),
new DefaultFileSystemAbstraction(), StringLogger.DEV_NULL, null );
// when
for ( int i = 0; i < 2 * NUMBER_OF_RECORDS; i++ )
{
NodeRecord record = new NodeRecord( nodeStore.nextId(), 0, 0 );
record.setInUse( true );
nodeStore.updateRecord( record );
}
// then
WindowPoolStats stats = nodeStore.getWindowPoolStats();
nodeStore.close();
assertEquals( stats.toString(), 0, stats.getMissCount() );
}
private String mmapSize( int numberOfRecords, int recordSize )
{
int bytes = numberOfRecords * recordSize;
if ( bytes < MEGA )
{
throw new IllegalArgumentException( "too few records: " + numberOfRecords );
}
return bytes / MEGA + "M";
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestGrowingFileMemoryMapping.java
|
1,250
|
LONG
{
@Override
String randomString( int maxLen )
{
return Long.toString( random.nextLong() % ( (long) Math.pow( 10, maxLen ) ) );
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,251
|
UNICODE
{
@Override
String randomString( int maxLen )
{
char[] chars = new char[random.nextInt( maxLen + 1 )];
for ( int i = 0; i < chars.length; i++ )
{
chars[i] = (char) ( 1 + random.nextInt( 0xD7FE ) );
}
return new String( chars );
}
},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestShortString.java
|
1,252
|
public class RelationshipTypeTokenRecord extends TokenRecord
{
public RelationshipTypeTokenRecord( int id )
{
super( id );
}
@Override
protected String simpleName()
{
return "RelationshipType";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_RelationshipTypeTokenRecord.java
|
1,253
|
public class TargetDirectoryTest
{
private static final TargetDirectory target = TargetDirectory.forTest( TargetDirectoryTest.class );
public @Rule TargetDirectory.TestDirectory dir = target.testDirectory();
@Test
public void hasDir() throws Exception
{
assertNotNull( dir );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_metatest_TargetDirectoryTest.java
|
1,254
|
public class LabelIdArray
{
static long[] concatAndSort( long[] existing, long additional )
{
assertNotContains( existing, additional );
long[] result = new long[existing.length + 1];
arraycopy( existing, 0, result, 0, existing.length );
result[existing.length] = additional;
Arrays.sort( result );
return result;
}
private static void assertNotContains( long[] existingLabels, long labelId )
{
if ( Arrays.binarySearch( existingLabels, labelId ) >= 0 )
{
throw new IllegalStateException( "Label " + labelId + " already exists." );
}
}
static long[] filter( long[] ids, long excludeId )
{
boolean found = false;
for ( int i = 0; i < ids.length; i++ )
{
if ( ids[i] == excludeId )
{
found = true;
break;
}
}
if ( !found )
{
throw new IllegalStateException( "Label " + excludeId + " not found." );
}
long[] result = new long[ids.length - 1];
int writerIndex = 0;
for ( int i = 0; i < ids.length; i++ )
{
if ( ids[i] != excludeId )
{
result[writerIndex++] = ids[i];
}
}
return result;
}
public static long[] prependNodeId( long nodeId, long[] labelIds )
{
long[] result = new long[ labelIds.length + 1 ];
arraycopy( labelIds, 0, result, 1, labelIds.length );
result[0] = nodeId;
return result;
}
public static long[] stripNodeId( long[] storedLongs )
{
return Arrays.copyOfRange( storedLongs, 1, storedLongs.length );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_labels_LabelIdArray.java
|
1,255
|
public class InlineNodeLabels implements NodeLabels
{
private static final long[] NO_LABELS = new long[0];
private static final int LABEL_BITS = 36;
private final long labelField;
private final NodeRecord node;
public InlineNodeLabels( long labelField, NodeRecord node )
{
this.labelField = labelField;
this.node = node;
}
@Override
public long[] get( NodeStore nodeStore )
{
return getIfLoaded();
}
@Override
public long[] getIfLoaded()
{
return parseInlined( labelField );
}
@Override
public Collection<DynamicRecord> put( long[] labelIds, NodeStore nodeStore )
{
if ( tryInlineInNodeRecord( labelIds, node.getDynamicLabelRecords() ) )
{
return Collections.emptyList();
}
else
{
return new DynamicNodeLabels( 0, node ).put( labelIds, nodeStore );
}
}
@Override
public Collection<DynamicRecord> add( long labelId, NodeStore nodeStore )
{
long[] augmentedLabelIds = labelCount( labelField ) == 0 ? new long[]{labelId} :
concatAndSort( parseInlined( labelField ), labelId );
return put( augmentedLabelIds, nodeStore );
}
@Override
public Collection<DynamicRecord> remove( long labelId, NodeStore nodeStore )
{
long[] newLabelIds = filter( parseInlined( labelField ), labelId );
boolean inlined = tryInlineInNodeRecord( newLabelIds, node.getDynamicLabelRecords() );
assert inlined;
return Collections.emptyList();
}
@Override
public void ensureHeavy( NodeStore nodeStore )
{
// no dynamic records
}
boolean tryInlineInNodeRecord( long[] ids, Collection<DynamicRecord> changedDynamicRecords )
{
// We reserve the high header bit for future extensions of the format of the in-lined label bits
// i.e. the 0-valued high header bit can allow for 0-7 in-lined labels in the bit-packed format.
if ( ids.length > 7 )
{
return false;
}
byte bitsPerLabel = (byte) (ids.length > 0 ? (LABEL_BITS / ids.length) : LABEL_BITS);
long limit = 1L << bitsPerLabel;
Bits bits = bits( 5 );
for ( long id : ids )
{
if ( highestOneBit( id ) < limit )
{
bits.put( id, bitsPerLabel );
}
else
{
return false;
}
}
node.setLabelField( combineLabelCountAndLabelStorage( (byte) ids.length, bits.getLongs()[0] ),
changedDynamicRecords );
return true;
}
public static long[] parseInlined( long labelField )
{
byte numberOfLabels = labelCount( labelField );
if ( numberOfLabels == 0 )
{
return NO_LABELS;
}
long existingLabelsField = parseLabelsBody( labelField );
byte bitsPerLabel = (byte) (LABEL_BITS / numberOfLabels);
Bits bits = bitsFromLongs( new long[]{existingLabelsField} );
long[] result = new long[numberOfLabels];
for ( int i = 0; i < result.length; i++ )
{
result[i] = bits.getLong( bitsPerLabel );
}
return result;
}
private static long combineLabelCountAndLabelStorage( byte labelCount, long labelBits )
{
return ((((long)labelCount) << 36) | labelBits);
}
private static byte labelCount( long labelField )
{
return (byte) ((labelField & 0xF000000000L) >>> 36);
}
@Override
public boolean isInlined()
{
return true;
}
@Override
public String toString()
{
return format( "Inline(0x%x:%s)", node.getLabelField(), Arrays.toString( getIfLoaded(/*it is*/ ) ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_labels_InlineNodeLabels.java
|
1,256
|
public class DynamicNodeLabels implements NodeLabels
{
private final long labelField;
private final NodeRecord node;
public DynamicNodeLabels( long labelField, NodeRecord node )
{
this.labelField = labelField;
this.node = node;
}
@Override
public long[] get( NodeStore nodeStore )
{
nodeStore.ensureHeavy( node, getFirstDynamicRecordId() );
return nodeStore.getDynamicLabelsArray( node.getUsedDynamicLabelRecords() );
}
@Override
public long[] getIfLoaded()
{
if ( node.isLight() )
{
return null;
}
for ( DynamicRecord dynamic : node.getUsedDynamicLabelRecords() )
{
if ( dynamic.isLight() )
{
return null;
}
}
return stripNodeId( (long[]) getRightArray( readFullByteArrayFromHeavyRecords(
node.getUsedDynamicLabelRecords(), ARRAY ) ) );
}
@Override
public Collection<DynamicRecord> put( long[] labelIds, NodeStore nodeStore )
{
long existingLabelsField = node.getLabelField();
long existingLabelsBits = parseLabelsBody( existingLabelsField );
Collection<DynamicRecord> changedDynamicRecords = node.getDynamicLabelRecords();
if ( labelField != 0 )
{
// There are existing dynamic label records, get them
nodeStore.ensureHeavy( node, existingLabelsBits );
changedDynamicRecords = node.getDynamicLabelRecords();
setNotInUse( changedDynamicRecords );
}
if ( !new InlineNodeLabels( labelField, node ).tryInlineInNodeRecord( labelIds, changedDynamicRecords ) )
{
Set<DynamicRecord> allRecords = new HashSet<>( changedDynamicRecords );
Collection<DynamicRecord> allocatedRecords =
nodeStore.allocateRecordsForDynamicLabels( node.getId(), labelIds,
changedDynamicRecords.iterator() );
allRecords.addAll( allocatedRecords );
node.setLabelField( dynamicPointer( allocatedRecords ), allocatedRecords );
changedDynamicRecords = allRecords;
}
return changedDynamicRecords;
}
@Override
public Collection<DynamicRecord> add( long labelId, NodeStore nodeStore )
{
nodeStore.ensureHeavy( node, parseLabelsBody( labelField ) );
Collection<DynamicRecord> existingRecords = node.getDynamicLabelRecords();
long[] existingLabelIds = nodeStore.getDynamicLabelsArray( existingRecords );
long[] newLabelIds = LabelIdArray.concatAndSort( existingLabelIds, labelId );
Collection<DynamicRecord> changedDynamicRecords =
nodeStore.allocateRecordsForDynamicLabels( node.getId(), newLabelIds, existingRecords.iterator() );
node.setLabelField( dynamicPointer( changedDynamicRecords ), changedDynamicRecords );
return changedDynamicRecords;
}
@Override
public Collection<DynamicRecord> remove( long labelId, NodeStore nodeStore )
{
nodeStore.ensureHeavy( node, parseLabelsBody( labelField ) );
Collection<DynamicRecord> existingRecords = node.getDynamicLabelRecords();
long[] existingLabelIds = nodeStore.getDynamicLabelsArray( existingRecords );
long[] newLabelIds = filter( existingLabelIds, labelId );
if ( new InlineNodeLabels( labelField, node ).tryInlineInNodeRecord( newLabelIds, existingRecords ) )
{
setNotInUse( existingRecords );
}
else
{
Collection<DynamicRecord> newRecords =
nodeStore.allocateRecordsForDynamicLabels( node.getId(), newLabelIds, existingRecords.iterator() );
node.setLabelField( dynamicPointer( newRecords ), existingRecords );
if ( !newRecords.equals( existingRecords ) )
{ // One less dynamic record, mark that one as not in use
for ( DynamicRecord record : existingRecords )
{
if ( !newRecords.contains( record ) )
{
record.setInUse( false );
record.setLength( 0 ); // so that it will not be made heavy again...
}
}
}
}
return existingRecords;
}
@Override
public void ensureHeavy( NodeStore nodeStore )
{
nodeStore.ensureHeavy( node, getFirstDynamicRecordId() );
}
public static long dynamicPointer( Collection<DynamicRecord> newRecords )
{
return 0x8000000000L | first( newRecords ).getId();
}
public long getFirstDynamicRecordId()
{
return parseLabelsBody( labelField );
}
private void setNotInUse( Collection<DynamicRecord> changedDynamicRecords )
{
for ( DynamicRecord record : changedDynamicRecords )
{
record.setInUse( false );
}
}
@Override
public boolean isInlined()
{
return false;
}
@Override
public String toString()
{
if ( node.isLight() )
{
return format( "Dynamic(id:%d)", getFirstDynamicRecordId() );
}
return format( "Dynamic(id:%d,[%s])", getFirstDynamicRecordId(),
Arrays.toString( NodeStore.getDynamicLabelsArrayFromHeavyRecords( node.getDynamicLabelRecords() ) ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_labels_DynamicNodeLabels.java
|
1,257
|
public class WindowPoolStats
{
private final String name;
private final long memAvail;
private final long memUsed;
private final int windowCount;
private final int windowSize;
private final int hitCount;
private final int missCount;
private final int oomCount;
private final int switchCount;
private final int avgRefreshTime;
private final int refreshCount;
private final int avertedRefreshCount;
public WindowPoolStats( File file, long memAvail, long memUsed, int windowCount,
int windowSize, int hitCount, int missCount, int oomCount, int switchCount, int avgRefreshTime,
int refreshCount, int avertedRefreshCount )
{
this.name = file.getName();
this.memAvail = memAvail;
this.memUsed = memUsed;
this.windowCount = windowCount;
this.windowSize = windowSize;
this.hitCount = hitCount;
this.missCount = missCount;
this.oomCount = oomCount;
this.switchCount = switchCount;
this.avgRefreshTime = avgRefreshTime;
this.refreshCount = refreshCount;
this.avertedRefreshCount = avertedRefreshCount;
}
public String getName()
{
return name;
}
public long getMemAvail()
{
return memAvail;
}
public long getMemUsed()
{
return memUsed;
}
public int getWindowCount()
{
return windowCount;
}
public int getWindowSize()
{
return windowSize;
}
public int getHitCount()
{
return hitCount;
}
public int getMissCount()
{
return missCount;
}
public int getOomCount()
{
return oomCount;
}
public int getSwitchCount()
{
return switchCount;
}
public int getAvgRefreshTime()
{
return avgRefreshTime;
}
public int getRefreshCount()
{
return refreshCount;
}
public int getAvertedRefreshCount()
{
return avertedRefreshCount;
}
@Override
public String toString()
{
return "WindowPoolStats['" + name + "', " +
"memAvail:" + memAvail + ", " +
"memUsed:" + memUsed + ", " +
"windowCount:" + windowCount + ", " +
"windowSize:" + windowSize + ", " +
"hitCount:" + hitCount + ", " +
"missCount:" + missCount + ", " +
"oomCount:" + oomCount + ", " +
"switchCount:" + switchCount + ", " +
"avgRefreshTime:" + avgRefreshTime + ", " +
"refreshCount:" + refreshCount + ", " +
"avertedRefreshCount:" + avertedRefreshCount +
"]";
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_WindowPoolStats.java
|
1,258
|
private static class RelationshipTypeTokenStoreWithOneOlderVersion extends RelationshipTypeTokenStore
{
private boolean versionCalled;
public RelationshipTypeTokenStoreWithOneOlderVersion( File fileName, DynamicStringStore stringStore )
{
super( fileName, new Config( stringMap() ), new NoLimitIdGeneratorFactory(), new DefaultWindowPoolFactory(),
new DefaultFileSystemAbstraction(), StringLogger.DEV_NULL, stringStore );
}
@Override
public String getTypeDescriptor()
{
// This funky method will trick the store, telling it that it's the new version
// when it loads (so that it validates OK). Then when closing it and writing
// the version it will write the older version.
if ( !versionCalled )
{
versionCalled = true;
return super.getTypeDescriptor();
}
else
{
// TODO This shouldn't be hard coded like this, boring to keep in sync
// when version changes
return "RelationshipTypeStore v0.9.5";
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_UpgradeStoreIT.java
|
1,259
|
private static class NoLimitIdGeneratorFactory implements IdGeneratorFactory
{
private final Map<IdType, IdGenerator> generators = new HashMap<IdType, IdGenerator>();
@Override
public IdGenerator open( FileSystemAbstraction fs, File fileName, int grabSize, IdType idType, long highId )
{
IdGenerator generator = new IdGeneratorImpl( fs, fileName, grabSize, Long.MAX_VALUE, false, highId );
generators.put( idType, generator );
return generator;
}
@Override
public IdGenerator get( IdType idType )
{
return generators.get( idType );
}
@Override
public void create( FileSystemAbstraction fs, File fileName, long highId )
{
IdGeneratorImpl.createGenerator( fs, fileName, highId );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_UpgradeStoreIT.java
|
1,260
|
@Ignore
public class UpgradeStoreIT
{
private static final String PATH = "target/var/upgrade";
@Before
public void doBefore()
{
deleteFileOrDirectory( PATH );
}
private File path( int i )
{
return new File( PATH, "" + i );
}
@Test
public void makeSureStoreWithTooManyRelationshipTypesCannotBeUpgraded() throws Exception
{
File path = path( 0 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
createManyRelationshipTypes( path, 0x10000 );
assertCannotStart( path, "Shouldn't be able to upgrade with that many types set" );
}
@Test
public void makeSureStoreWithDecentAmountOfRelationshipTypesCanBeUpgraded() throws Exception
{
File path = path( 1 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
createManyRelationshipTypes( path, 0xFFFF );
assertCanStart( path );
}
@Test( expected=TransactionFailureException.class )
public void makeSureStoreWithTooBigStringBlockSizeCannotBeCreated() throws Exception
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path( 2 ).getPath()).setConfig( GraphDatabaseSettings.string_block_size, "" + (0x10000) ).newGraphDatabase().shutdown();
}
@Test
public void makeSureStoreWithDecentStringBlockSizeCanBeCreated() throws Exception
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path( 3 ).getPath()).setConfig(GraphDatabaseSettings.string_block_size, "" + (0xFFFF) ).newGraphDatabase().shutdown();
}
@Test( expected=TransactionFailureException.class )
public void makeSureStoreWithTooBigArrayBlockSizeCannotBeCreated() throws Exception
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path( 4 ).getPath()).setConfig( GraphDatabaseSettings.array_block_size, "" + (0x10000) ).newGraphDatabase().shutdown();
}
@Test
public void makeSureStoreWithDecentArrayBlockSizeCanBeCreated() throws Exception
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path( 5 ).getPath()).setConfig( GraphDatabaseSettings.array_block_size, "" + (0xFFFF) ).newGraphDatabase().shutdown();
}
@Test
public void makeSureStoreWithTooBigStringBlockSizeCannotBeUpgraded() throws Exception
{
File path = path( 6 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setBlockSize( new File( path, "neostore.propertystore.db.strings" ), 0x10000, "StringPropertyStore v0.9.5" );
assertCannotStart( path, "Shouldn't be able to upgrade with block size that big" );
}
@Test
public void makeSureStoreWithDecentStringBlockSizeCanBeUpgraded() throws Exception
{
File path = path( 7 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setBlockSize( new File( path, "neostore.propertystore.db.strings" ), 0xFFFF, "StringPropertyStore v0.9.5" );
assertCanStart( path );
}
@Test
public void makeSureStoreWithTooBigArrayBlockSizeCannotBeUpgraded() throws Exception
{
File path = path( 8 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setBlockSize( new File( path, "neostore.propertystore.db.arrays" ), 0x10000, "ArrayPropertyStore v0.9.5" );
assertCannotStart( path, "Shouldn't be able to upgrade with block size that big" );
}
@Test
public void makeSureStoreWithDecentArrayBlockSizeCanBeUpgraded() throws Exception
{
File path = path( 9 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setBlockSize( new File( path, "neostore.propertystore.db.arrays" ), 0xFFFF, "ArrayPropertyStore v0.9.5" );
assertCanStart( path );
}
@Test
public void makeSureLogsAreMovedWhenUpgrading() throws Exception
{
// Generate some logical logs
File path = path( 10 );
for ( int i = 0; i < 3; i++ )
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path.getPath()).setConfig( GraphDatabaseSettings.keep_logical_logs, Settings.TRUE ).newGraphDatabase().shutdown();
}
setOlderNeoStoreVersion( path );
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path.getPath()).setConfig( GraphDatabaseSettings.allow_store_upgrade, Settings.TRUE ).newGraphDatabase().shutdown();
File oldLogDir = new File( path, "1.2-logs" );
assertTrue( oldLogDir.exists() );
assertTrue( new File( oldLogDir, "nioneo_logical.log.v0" ).exists() );
assertTrue( new File( oldLogDir, "nioneo_logical.log.v1" ).exists() );
assertTrue( new File( oldLogDir, "nioneo_logical.log.v2" ).exists() );
assertFalse( new File( path, "nioneo_logical.log.v0" ).exists() );
assertFalse( new File( path, "nioneo_logical.log.v1" ).exists() );
assertFalse( new File( path, "nioneo_logical.log.v2" ).exists() );
}
@Test
public void makeSureStoreCantBeUpgradedIfNotExplicitlyToldTo() throws Exception
{
File path = path( 11 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setOlderNeoStoreVersion( path );
try
{
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() );
fail( "Shouldn't be able to upgrade if not told to" );
}
catch ( TransactionFailureException e )
{
if ( !( e.getCause() instanceof NotCurrentStoreVersionException) )
{
throw e;
}
}
}
@Test
public void makeSureStoreCantBeUpgradedIfNotExplicitlyToldTo2() throws Exception
{
File path = path( 12 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setOlderNeoStoreVersion( path );
try
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path.getPath()).setConfig( GraphDatabaseSettings.allow_store_upgrade, Settings.TRUE ).newGraphDatabase().shutdown();
fail( "Shouldn't be able to upgrade if not told to" );
}
catch ( TransactionFailureException e )
{
if ( !( e.getCause() instanceof NotCurrentStoreVersionException) )
{
throw e;
}
}
}
@Test
public void makeSureStoreCanBeUpgradedIfExplicitlyToldTo() throws Exception
{
File path = path( 13 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setOlderNeoStoreVersion( path );
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( path.getPath()).setConfig( GraphDatabaseSettings.allow_store_upgrade, Settings.TRUE ).newGraphDatabase().shutdown();
}
@Test
public void makeSureStoreCantBeUpgradedByBatchInserterEvenIfExplicitlyToldTo() throws Exception
{
File path = path( 14 );
new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() ).shutdown();
setOlderNeoStoreVersion( path );
try
{
BatchInserters.inserter( path.getPath(), stringMap( GraphDatabaseSettings.allow_store_upgrade.name(), Settings.TRUE ) );
fail( "Shouldn't be able to upgrade with batch inserter" );
}
catch ( IllegalArgumentException e )
{ // Good
}
}
private void assertCannotStart( File path, String failMessage )
{
GraphDatabaseService db = null;
try
{
db = new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() );
fail( failMessage );
}
catch ( TransactionFailureException e )
{
if ( !( e.getCause() instanceof NotCurrentStoreVersionException) )
{
throw e;
}
// Good
}
finally
{
if ( db != null )
{
db.shutdown();
}
}
}
private void assertCanStart( File path )
{
GraphDatabaseService db = null;
try
{
db = new GraphDatabaseFactory().newEmbeddedDatabase( path.getPath() );
}
finally
{
if ( db != null )
{
db.shutdown();
}
}
}
private void setOlderNeoStoreVersion( File path ) throws IOException
{
String oldVersion = "NeoStore v0.9.6";
FileChannel channel = new RandomAccessFile( new File( path, NeoStore.DEFAULT_NAME ), "rw" ).getChannel();
channel.position( channel.size() - UTF8.encode( oldVersion ).length );
ByteBuffer buffer = ByteBuffer.wrap( UTF8.encode( oldVersion ) );
channel.write( buffer );
channel.close();
}
private void setBlockSize( File file, int blockSize, String oldVersionToSet ) throws IOException
{
FileChannel channel = new RandomAccessFile( file, "rw" ).getChannel();
ByteBuffer buffer = ByteBuffer.wrap( new byte[4] );
buffer.putInt( blockSize + AbstractDynamicStore.BLOCK_HEADER_SIZE );
buffer.flip();
channel.write( buffer );
// It's the same length as the current version
channel.position( channel.size() - UTF8.encode( oldVersionToSet ).length );
buffer = ByteBuffer.wrap( UTF8.encode( oldVersionToSet ) );
channel.write( buffer );
channel.close();
}
private void createManyRelationshipTypes( File path, int numberOfTypes )
{
File fileName = new File( path, "neostore.relationshiptypestore.db" );
DynamicStringStore stringStore = new DynamicStringStore( new File( fileName.getPath() + ".names"), null, IdType.RELATIONSHIP_TYPE_TOKEN_NAME,
new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(), new DefaultFileSystemAbstraction(), StringLogger.DEV_NULL );
RelationshipTypeTokenStore store = new RelationshipTypeTokenStoreWithOneOlderVersion( fileName, stringStore );
for ( int i = 0; i < numberOfTypes; i++ )
{
String name = "type" + i;
RelationshipTypeTokenRecord record = new RelationshipTypeTokenRecord( i );
record.setCreated();
record.setInUse( true );
Collection<DynamicRecord> typeRecords = store.allocateNameRecords( PropertyStore.encodeString( name ) );
record.setNameId( (int) first( typeRecords ).getId() );
record.addNameRecords( typeRecords );
store.setHighId( store.getHighId()+1 );
store.updateRecord( record );
}
store.close();
}
private static class RelationshipTypeTokenStoreWithOneOlderVersion extends RelationshipTypeTokenStore
{
private boolean versionCalled;
public RelationshipTypeTokenStoreWithOneOlderVersion( File fileName, DynamicStringStore stringStore )
{
super( fileName, new Config( stringMap() ), new NoLimitIdGeneratorFactory(), new DefaultWindowPoolFactory(),
new DefaultFileSystemAbstraction(), StringLogger.DEV_NULL, stringStore );
}
@Override
public String getTypeDescriptor()
{
// This funky method will trick the store, telling it that it's the new version
// when it loads (so that it validates OK). Then when closing it and writing
// the version it will write the older version.
if ( !versionCalled )
{
versionCalled = true;
return super.getTypeDescriptor();
}
else
{
// TODO This shouldn't be hard coded like this, boring to keep in sync
// when version changes
return "RelationshipTypeStore v0.9.5";
}
}
}
private static class NoLimitIdGeneratorFactory implements IdGeneratorFactory
{
private final Map<IdType, IdGenerator> generators = new HashMap<IdType, IdGenerator>();
@Override
public IdGenerator open( FileSystemAbstraction fs, File fileName, int grabSize, IdType idType, long highId )
{
IdGenerator generator = new IdGeneratorImpl( fs, fileName, grabSize, Long.MAX_VALUE, false, highId );
generators.put( idType, generator );
return generator;
}
@Override
public IdGenerator get( IdType idType )
{
return generators.get( idType );
}
@Override
public void create( FileSystemAbstraction fs, File fileName, long highId )
{
IdGeneratorImpl.createGenerator( fs, fileName, highId );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_UpgradeStoreIT.java
|
1,261
|
public class UniquenessConstraintRule extends AbstractSchemaRule
{
private final int[] propertyKeyIds;
private final long ownedIndexRule;
/** We currently only support uniqueness constraints on a single property. */
public static UniquenessConstraintRule uniquenessConstraintRule( long id, int labelId, int propertyKeyId,
long ownedIndexRule )
{
return new UniquenessConstraintRule( id, labelId, new int[] {propertyKeyId}, ownedIndexRule );
}
public static UniquenessConstraintRule readUniquenessConstraintRule( long id, int labelId, ByteBuffer buffer )
{
return new UniquenessConstraintRule( id, labelId, readPropertyKeys( buffer ), readOwnedIndexRule( buffer ) );
}
private UniquenessConstraintRule( long id, int labelId, int[] propertyKeyIds, long ownedIndexRule )
{
super( id, labelId, Kind.UNIQUENESS_CONSTRAINT );
this.ownedIndexRule = ownedIndexRule;
assert propertyKeyIds.length == 1; // Only uniqueness of a single property supported for now
this.propertyKeyIds = propertyKeyIds;
}
@Override
public int hashCode()
{
return super.hashCode() | Arrays.hashCode( propertyKeyIds );
}
@Override
public boolean equals( Object obj )
{
return super.equals( obj ) && Arrays.equals( propertyKeyIds, ((UniquenessConstraintRule) obj).propertyKeyIds );
}
@Override
protected String innerToString()
{
return ", propertyKeys=" + Arrays.toString( propertyKeyIds );
}
@Override
public int length()
{
return super.length() +
1 + /* the number of properties that form a unique tuple */
8 * propertyKeyIds.length + /* the property keys themselves */
8; /* owned index rule */
}
@Override
public void serialize( ByteBuffer target )
{
super.serialize( target );
target.put( (byte) propertyKeyIds.length );
for ( int propertyKeyId : propertyKeyIds )
{
target.putLong( propertyKeyId );
}
target.putLong( ownedIndexRule );
}
private static int[] readPropertyKeys( ByteBuffer buffer )
{
int[] keys = new int[buffer.get()];
for ( int i = 0; i < keys.length; i++ )
{
keys[i] = safeCastLongToInt( buffer.getLong() );
}
return keys;
}
private static long readOwnedIndexRule( ByteBuffer buffer )
{
return buffer.getLong();
}
public boolean containsPropertyKeyId( int propertyKeyId )
{
for ( int keyId : propertyKeyIds )
{
if ( keyId == propertyKeyId )
{
return true;
}
}
return false;
}
// This method exists as long as only single property keys are supported
public int getPropertyKey()
{
// Property key "singleness" is checked elsewhere, in the constructor and when deserializing.
return propertyKeyIds[0];
}
public long getOwnedIndex()
{
return ownedIndexRule;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_UniquenessConstraintRule.java
|
1,262
|
public class UnderlyingStorageException extends StoreFailureException
{
public UnderlyingStorageException( String msg )
{
super( msg );
}
public UnderlyingStorageException( Throwable cause )
{
super( cause );
}
public UnderlyingStorageException( String msg, Throwable cause )
{
super( msg, cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_UnderlyingStorageException.java
|
1,263
|
public static abstract class Configuration
extends AbstractStore.Configuration
{
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_TokenStore.java
|
1,264
|
public abstract class TokenStore<T extends TokenRecord> extends AbstractRecordStore<T> implements Store
{
public static abstract class Configuration
extends AbstractStore.Configuration
{
}
private DynamicStringStore nameStore;
public static final int NAME_STORE_BLOCK_SIZE = 30;
public TokenStore( File fileName, Config configuration, IdType idType,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger,
DynamicStringStore nameStore )
{
super( fileName, configuration, idType, idGeneratorFactory, windowPoolFactory,
fileSystemAbstraction, stringLogger );
this.nameStore = nameStore;
}
public DynamicStringStore getNameStore()
{
return nameStore;
}
@Override
public int getRecordHeaderSize()
{
return getRecordSize();
}
@Override
protected void setRecovered()
{
super.setRecovered();
nameStore.setRecovered();
}
@Override
protected void unsetRecovered()
{
super.unsetRecovered();
nameStore.unsetRecovered();
}
@Override
public void makeStoreOk()
{
nameStore.makeStoreOk();
super.makeStoreOk();
}
@Override
public void rebuildIdGenerators()
{
nameStore.rebuildIdGenerators();
super.rebuildIdGenerators();
}
public void updateIdGenerators()
{
nameStore.updateHighId();
this.updateHighId();
}
public void freeId( int id )
{
nameStore.freeId( id );
}
@Override
protected void closeStorage()
{
if ( nameStore != null )
{
nameStore.close();
nameStore = null;
}
}
@Override
public void flushAll()
{
nameStore.flushAll();
super.flushAll();
}
public Token[] getTokens( int maxCount )
{
LinkedList<Token> recordList = new LinkedList<>();
long maxIdInUse = getHighestPossibleIdInUse();
int found = 0;
for ( int i = 0; i <= maxIdInUse && found < maxCount; i++ )
{
T record;
try
{
record = getRecord( i );
}
catch ( InvalidRecordException t )
{
continue;
}
found++;
if ( record != null && record.getNameId() != Record.RESERVED.intValue() )
{
String name = getStringFor( record );
recordList.add( new Token( name, i ) );
}
}
return recordList.toArray( new Token[recordList.size()] );
}
public Token getToken( int id )
{
T record = getRecord( id );
return new Token( getStringFor( record ), record.getId() );
}
public Token getToken( int id, boolean recovered )
{
assert recovered;
try
{
setRecovered();
T record = getRecord( id );
return new Token( getStringFor( record ), record.getId() );
}
finally
{
unsetRecovered();
}
}
public T getRecord( int id )
{
T record;
PersistenceWindow window = acquireWindow( id, OperationType.READ );
try
{
record = getRecord( id, window, false );
}
finally
{
releaseWindow( window );
}
record.addNameRecords( nameStore.getLightRecords( record.getNameId() ) );
return record;
}
@Override
public T getRecord( long id )
{
return getRecord( (int) id );
}
@Override
public T forceGetRecord( long id )
{
PersistenceWindow window;
try
{
window = acquireWindow( id, OperationType.READ );
}
catch ( InvalidRecordException e )
{
return newRecord( (int) id );
}
try
{
return getRecord( (int) id, window, true );
}
finally
{
releaseWindow( window );
}
}
@Override
public T forceGetRaw( T record )
{
return record;
}
@Override
public T forceGetRaw( long id )
{
return forceGetRecord( id );
}
public Collection<DynamicRecord> allocateNameRecords( byte[] chars )
{
return nameStore.allocateRecordsFromBytes( chars );
}
public T getLightRecord( int id )
{
PersistenceWindow window = acquireWindow( id, OperationType.READ );
try
{
T record = getRecord( id, window, false );
record.setIsLight( true );
return record;
}
finally
{
releaseWindow( window );
}
}
@Override
public void updateRecord( T record )
{
PersistenceWindow window = acquireWindow( record.getId(),
OperationType.WRITE );
try
{
updateRecord( record, window );
}
finally
{
releaseWindow( window );
}
if ( !record.isLight() )
{
for ( DynamicRecord keyRecord : record.getNameRecords() )
{
nameStore.updateRecord( keyRecord );
}
}
}
@Override
public void forceUpdateRecord( T record )
{
PersistenceWindow window = acquireWindow( record.getId(),
OperationType.WRITE );
try
{
updateRecord( record, window );
}
finally
{
releaseWindow( window );
}
}
public int nextNameId()
{
return (int) nameStore.nextId();
}
protected abstract T newRecord( int id );
protected T getRecord( int id, PersistenceWindow window, boolean force )
{
Buffer buffer = window.getOffsettedBuffer( id );
byte inUseByte = buffer.get();
boolean inUse = (inUseByte == Record.IN_USE.byteValue());
if ( !inUse && !force )
{
throw new InvalidRecordException( getClass().getSimpleName() + " Record[" + id + "] not in use" );
}
if ( inUseByte != Record.IN_USE.byteValue() && inUseByte != Record.NOT_IN_USE.byteValue() )
{
throw new InvalidRecordException( getClass().getSimpleName() + " Record[" + id + "] unknown in use flag[" + inUse + "]" );
}
T record = newRecord( id );
record.setInUse( inUse );
readRecord( record, buffer );
return record;
}
protected void readRecord( T record, Buffer buffer )
{
record.setNameId( buffer.getInt() );
}
protected void updateRecord( T record, PersistenceWindow window )
{
int id = record.getId();
registerIdFromUpdateRecord( id );
Buffer buffer = window.getOffsettedBuffer( id );
if ( record.inUse() )
{
buffer.put( Record.IN_USE.byteValue() );
writeRecord( record, buffer );
}
else
{
buffer.put( Record.NOT_IN_USE.byteValue() );
if ( !isInRecoveryMode() )
{
freeId( id );
}
}
}
protected void writeRecord( T record, Buffer buffer )
{
buffer.putInt( record.getNameId() );
}
public void ensureHeavy( T record )
{
if (!record.isLight())
return;
record.setIsLight( false );
record.addNameRecords( nameStore.getRecords( record.getNameId() ) );
}
public String getStringFor( T nameRecord )
{
int recordToFind = nameRecord.getNameId();
Iterator<DynamicRecord> records = nameRecord.getNameRecords().iterator();
Collection<DynamicRecord> relevantRecords = new ArrayList<>();
while ( recordToFind != Record.NO_NEXT_BLOCK.intValue() && records.hasNext() )
{
DynamicRecord record = records.next();
if ( record.inUse() && record.getId() == recordToFind )
{
recordToFind = (int) record.getNextBlock();
// // TODO: optimize here, high chance next is right one
relevantRecords.add( record );
records = nameRecord.getNameRecords().iterator();
}
}
return decodeString( nameStore.readFullByteArray( relevantRecords, PropertyType.STRING ).other() );
}
@Override
public List<WindowPoolStats> getAllWindowPoolStats()
{
List<WindowPoolStats> list = new ArrayList<WindowPoolStats>();
list.add( nameStore.getWindowPoolStats() );
list.add( getWindowPoolStats() );
return list;
}
@Override
public void logAllWindowPoolStats( StringLogger.LineLogger logger )
{
super.logAllWindowPoolStats( logger );
logger.logLine( nameStore.getWindowPoolStats().toString() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_TokenStore.java
|
1,265
|
public abstract class TokenRecord extends AbstractRecord
{
private int nameId = Record.NO_NEXT_BLOCK.intValue();
private final List<DynamicRecord> nameRecords = new ArrayList<DynamicRecord>();
private boolean isLight;
TokenRecord( int id )
{
super( id );
}
void setIsLight( boolean status )
{
isLight = status;
}
public boolean isLight()
{
return isLight;
}
public int getNameId()
{
return nameId;
}
public void setNameId( int blockId )
{
this.nameId = blockId;
}
public Collection<DynamicRecord> getNameRecords()
{
return nameRecords;
}
public void addNameRecord( DynamicRecord record )
{
nameRecords.add( record );
}
public void addNameRecords( Iterable<DynamicRecord> records )
{
for ( DynamicRecord record : records )
{
addNameRecord( record );
}
}
@Override
public String toString()
{
StringBuilder buf = new StringBuilder( simpleName() + "[" );
buf.append( getId() ).append( "," ).append( inUse() ? "in" : "no" ).append( " use" );
buf.append( ",nameId=" ).append( nameId );
additionalToString( buf );
if ( !isLight )
{
for ( DynamicRecord dyn : nameRecords )
{
buf.append( ',' ).append( dyn );
}
}
return buf.append( ']' ).toString();
}
protected abstract String simpleName();
protected void additionalToString( StringBuilder buf )
{
// default: nothing additional
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_TokenRecord.java
|
1,266
|
{
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{
if ( propertyKeyId == property.propertyKeyId() )
{
DefinedProperty previous = foundProperty.getAndSet( property );
assertNull( previous );
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestXa.java
|
1,267
|
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return type.cast( config );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestXa.java
|
1,268
|
{
private final LabelScanStoreProvider labelScanStoreProvider =
new LabelScanStoreProvider( new InMemoryLabelScanStore(), 10 );
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector ) throws IllegalArgumentException
{
if ( SchemaIndexProvider.class.isAssignableFrom( type ) )
{
return type.cast( SchemaIndexProvider.NO_INDEX_PROVIDER );
}
else if ( NodeManager.class.isAssignableFrom( type ) )
{
return type.cast( nodeManager );
}
else if ( LabelScanStoreProvider.class.isAssignableFrom( type ) )
{
return type.cast( labelScanStoreProvider );
}
throw new IllegalArgumentException( type.toString() );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestXa.java
|
1,269
|
{
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{ // Hand it over to the void
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestXa.java
|
1,270
|
public class TestXa
{
private final EphemeralFileSystemAbstraction fileSystem = new EphemeralFileSystemAbstraction();
private NeoStoreXaDataSource ds;
private File logBaseFileName;
private NeoStoreXaConnection xaCon;
private Logger log;
private Level level;
private Map<String, Token> propertyKeyTokens;
private File path()
{
String path = "xatest";
File file = new File( path );
fileSystem.mkdirs( file );
return file;
}
private File file( String name )
{
return new File( path(), name);
}
@Before
public void setUpNeoStore() throws Exception
{
log = Logger
.getLogger( "org.neo4j.kernel.impl.transaction.xaframework.XaLogicalLog/"
+ "nioneo_logical.log" );
level = log.getLevel();
log.setLevel( Level.OFF );
log = Logger
.getLogger( "org.neo4j.kernel.impl.nioneo.xa.NeoStoreXaDataSource" );
log.setLevel( Level.OFF );
propertyKeyTokens = new HashMap<>();
StoreFactory sf = new StoreFactory( new Config( Collections.<String, String>emptyMap(),
GraphDatabaseSettings.class ), new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fileSystem, StringLogger.DEV_NULL, null );
sf.createNeoStore( file( "neo" ) ).close();
ds = newNeoStore();
xaCon = ds.getXaConnection();
logBaseFileName = ds.getXaContainer().getLogicalLog().getBaseFileName();
}
@After
public void tearDownNeoStore()
{
ds.stop();
log.setLevel( level );
log = Logger
.getLogger( "org.neo4j.kernel.impl.transaction.xaframework.XaLogicalLog/"
+ "nioneo_logical.log" );
log.setLevel( level );
log = Logger
.getLogger( "org.neo4j.kernel.impl.nioneo.xa.NeoStoreXaDataSource" );
log.setLevel( level );
for ( String file : new String[] {
"neo",
"neo.nodestore.db",
"neo.nodestore.db.labels",
"neo.propertystore.db",
"neo.propertystore.db.index",
"neo.propertystore.db.index.keys",
"neo.propertystore.db.strings",
"neo.propertystore.db.arrays",
"neo.relationshipstore.db",
"neo.relationshiptypestore.db",
"neo.relationshiptypestore.db.names",
"neo.schemastore.db",
} )
{
fileSystem.deleteFile( file( file ) );
fileSystem.deleteFile( file( file + ".id" ) );
}
File file = new File( "." );
for ( File nioFile : fileSystem.listFiles( file ) )
{
if ( nioFile.getName().startsWith( "nioneo_logical.log" ) )
{
assertTrue( "Couldn't delete '" + nioFile.getPath() + "'", fileSystem.deleteFile( nioFile ) );
}
}
}
private void deleteLogicalLogIfExist()
{
File file = new File( logBaseFileName.getPath() + ".1" );
if ( fileSystem.fileExists( file ) )
{
assertTrue( fileSystem.deleteFile( file ) );
}
file = new File( logBaseFileName.getPath() + ".2" );
if ( fileSystem.fileExists( file ) )
{
assertTrue( fileSystem.deleteFile( file ) );
}
file = new File( logBaseFileName.getPath() + ".active" );
assertTrue( fileSystem.deleteFile( file ) );
// Delete the last .v file
for ( int i = 5; i >= 0; i-- )
{
if ( fileSystem.deleteFile( new File( logBaseFileName.getPath() + ".v" + i ) ) )
{
break;
}
}
}
public static void renameCopiedLogicalLog( FileSystemAbstraction fileSystem,
Pair<Pair<File, File>, Pair<File, File>> files ) throws IOException
{
fileSystem.deleteFile( files.first().first() );
fileSystem.renameFile( files.first().other(), files.first().first() );
fileSystem.deleteFile( files.other().first() );
fileSystem.renameFile( files.other().other(), files.other().first() );
}
private void truncateLogicalLog( int size ) throws IOException
{
StoreChannel af = fileSystem.open( new File( logBaseFileName.getPath() + ".active" ), "r" );
ByteBuffer buffer = ByteBuffer.allocate( 1024 );
af.read( buffer );
af.close();
buffer.flip();
char active = buffer.asCharBuffer().get();
buffer.clear();
StoreChannel fileChannel = fileSystem.open( new File( logBaseFileName.getPath() + "." + active ), "rw" );
if ( fileChannel.size() > size )
{
fileChannel.truncate( size );
}
else
{
fileChannel.position( size );
ByteBuffer buf = ByteBuffer.allocate( 1 );
buf.put( (byte) 0 ).flip();
fileChannel.write( buf );
}
fileChannel.force( false );
fileChannel.close();
}
public static Pair<Pair<File, File>, Pair<File, File>> copyLogicalLog( FileSystemAbstraction fileSystem,
File logBaseFileName ) throws IOException
{
File activeLog = new File( logBaseFileName.getPath() + ".active" );
StoreChannel af = fileSystem.open( activeLog, "r" );
ByteBuffer buffer = ByteBuffer.allocate( 1024 );
af.read( buffer );
buffer.flip();
File activeLogBackup = new File( logBaseFileName.getPath() + ".bak.active" );
StoreChannel activeCopy = fileSystem.open( activeLogBackup, "rw" );
activeCopy.write( buffer );
activeCopy.close();
af.close();
buffer.flip();
char active = buffer.asCharBuffer().get();
buffer.clear();
File currentLog = new File( logBaseFileName.getPath() + "." + active );
StoreChannel source = fileSystem.open( currentLog, "r" );
File currentLogBackup = new File( logBaseFileName.getPath() + ".bak." + active );
StoreChannel dest = fileSystem.open( currentLogBackup, "rw" );
int read;
do
{
read = source.read( buffer );
buffer.flip();
dest.write( buffer );
buffer.clear();
}
while ( read == 1024 );
source.close();
dest.close();
return Pair.of( Pair.of( activeLog, activeLogBackup ), Pair.of( currentLog, currentLogBackup ) );
}
private int index( String key )
{
Token result = propertyKeyTokens.get( key );
if ( result != null )
{
return result.id();
}
int id = (int) ds.nextId( PropertyKeyTokenRecord.class );
Token index = new Token( key, id );
propertyKeyTokens.put( key, index );
xaCon.getTransaction().createPropertyKeyToken( key, id );
return id;
}
@Test
public void testLogicalLog() throws Exception
{
Xid xid = new XidImpl( new byte[1], new byte[1] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
DefinedProperty n1prop1 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string1" );
xaCon.getTransaction().nodeLoadProperties( node1, false, VOID );
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1,
"relationshiptype1" );
long rel1 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel1, relType1, node1, node2 );
DefinedProperty r1prop1 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop1" ), "string1" );
n1prop1 = xaCon.getTransaction().nodeChangeProperty( node1,
n1prop1.propertyKeyId(), "string2" );
r1prop1 = xaCon.getTransaction().relChangeProperty( rel1, r1prop1.propertyKeyId(),
"string2" );
xaCon.getTransaction().nodeRemoveProperty( node1, n1prop1.propertyKeyId() );
xaCon.getTransaction().relRemoveProperty( rel1, r1prop1.propertyKeyId() );
xaCon.getTransaction().relDelete( rel1 );
xaCon.getTransaction().nodeDelete( node1 );
xaCon.getTransaction().nodeDelete( node2 );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.commit( xid, true );
Pair<Pair<File, File>, Pair<File, File>> copies = copyLogicalLog( fileSystem, logBaseFileName );
xaCon.clearAllTransactions();
ds.stop();
deleteLogicalLogIfExist();
renameCopiedLogicalLog( fileSystem, copies );
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 0, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaCon.clearAllTransactions();
}
private static final PropertyReceiver VOID = new PropertyReceiver()
{
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{ // Hand it over to the void
}
};
private NeoStoreXaDataSource newNeoStore() throws IOException
{
final Config config = new Config( MapUtil.stringMap(
InternalAbstractGraphDatabase.Configuration.store_dir.name(), path().getPath(),
InternalAbstractGraphDatabase.Configuration.neo_store.name(), file( "neo" ).getPath(),
InternalAbstractGraphDatabase.Configuration.logical_log.name(),
file( LOGICAL_LOG_DEFAULT_NAME ).getPath() ), GraphDatabaseSettings.class );
StoreFactory sf = new StoreFactory( config, new DefaultIdGeneratorFactory(), new DefaultWindowPoolFactory(),
fileSystem, StringLogger.DEV_NULL, null );
PlaceboTm txManager = new PlaceboTm( null, TxIdGenerator.DEFAULT );
// Since these tests fiddle with copying logical logs and such themselves
// make sure all history logs are removed before opening the store
for ( File file : fileSystem.listFiles( path() ) )
{
if ( file.isFile() && file.getName().startsWith( LOGICAL_LOG_DEFAULT_NAME + ".v" ) )
{
fileSystem.deleteFile( file );
}
}
NodeManager nodeManager = mock(NodeManager.class);
@SuppressWarnings( "rawtypes" )
List caches = Arrays.asList(
(Cache) mock( AutoLoadingCache.class ),
(Cache) mock( AutoLoadingCache.class ) );
when( nodeManager.caches() ).thenReturn( caches );
KernelHealth kernelHealth = mock( KernelHealth.class );
NeoStoreXaDataSource neoStoreXaDataSource = new NeoStoreXaDataSource( config, sf,
StringLogger.DEV_NULL,
new XaFactory( config, TxIdGenerator.DEFAULT, txManager,
fileSystem, new Monitors(), new DevNullLoggingService(), RecoveryVerifier.ALWAYS_VALID,
LogPruneStrategies.NO_PRUNING, kernelHealth ), TransactionStateFactory.noStateFactory( new DevNullLoggingService() ),
new TransactionInterceptorProviders(
Collections.<TransactionInterceptorProvider>emptyList(), dependencyResolverForConfig( config ) ), null,
new SingleLoggingService( DEV_NULL ),
new KernelSchemaStateStore(),
mock(TokenNameLookup.class),
dependencyResolverForNoIndexProvider( nodeManager ), txManager,
mock( PropertyKeyTokenHolder.class ), mock(LabelTokenHolder.class),
mock( RelationshipTypeTokenHolder.class), mock(PersistenceManager.class), mock(LockManager.class),
mock( SchemaWriteGuard.class), IndexingService.NO_MONITOR);
neoStoreXaDataSource.init();
neoStoreXaDataSource.start();
return neoStoreXaDataSource;
}
private DependencyResolver dependencyResolverForNoIndexProvider( final NodeManager nodeManager )
{
return new DependencyResolver.Adapter()
{
private final LabelScanStoreProvider labelScanStoreProvider =
new LabelScanStoreProvider( new InMemoryLabelScanStore(), 10 );
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector ) throws IllegalArgumentException
{
if ( SchemaIndexProvider.class.isAssignableFrom( type ) )
{
return type.cast( SchemaIndexProvider.NO_INDEX_PROVIDER );
}
else if ( NodeManager.class.isAssignableFrom( type ) )
{
return type.cast( nodeManager );
}
else if ( LabelScanStoreProvider.class.isAssignableFrom( type ) )
{
return type.cast( labelScanStoreProvider );
}
throw new IllegalArgumentException( type.toString() );
}
};
}
private Adapter dependencyResolverForConfig( final Config config )
{
return new DependencyResolver.Adapter()
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return type.cast( config );
}
};
}
@Test
public void testLogicalLogPrepared() throws Exception
{
Xid xid = new XidImpl( new byte[2], new byte[2] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
DefinedProperty n1prop1 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string1" );
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1,
"relationshiptype1" );
long rel1 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel1, relType1, node1, node2 );
DefinedProperty r1prop1 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop1" ), "string1" );
xaCon.getTransaction().nodeChangeProperty( node1, n1prop1.propertyKeyId(), "string2" );
xaCon.getTransaction().relChangeProperty( rel1, r1prop1.propertyKeyId(),
"string2" );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
ds.rotateLogicalLog();
Pair<Pair<File, File>, Pair<File, File>> copies = copyLogicalLog( fileSystem, logBaseFileName );
xaCon.clearAllTransactions();
ds.stop();
deleteLogicalLogIfExist();
renameCopiedLogicalLog( fileSystem, copies );
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 1, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaRes.commit( xid, true );
xaCon.clearAllTransactions();
}
@Test
public void testLogicalLogPreparedPropertyBlocks() throws Exception
{
Xid xid = new XidImpl( new byte[2], new byte[2] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
DefinedProperty n1prop1 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ),
new long[]{1 << 23, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} );
xaCon.getTransaction().nodeAddProperty(
node1,
index( "prop2" ),
new long[]{1 << 23, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
ds.rotateLogicalLog();
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 1, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaRes.commit( xid, true );
xaCon.clearAllTransactions();
xid = new XidImpl( new byte[2], new byte[2] );
xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
xaCon.getTransaction().nodeRemoveProperty( node1, n1prop1.propertyKeyId() );
xaCon.getTransaction().nodeAddProperty(
node1, index( "prop3" ), new long[]{1 << 23, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
ds.rotateLogicalLog();
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 1, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaRes.commit( xid, true );
xaCon.clearAllTransactions();
}
private void copyClearRename() throws IOException
{
copyClearRename( true );
}
private void copyClearRename( boolean clearTransactions ) throws IOException
{
Pair<Pair<File, File>, Pair<File, File>> copies = copyLogicalLog( fileSystem, logBaseFileName );
if ( clearTransactions )
{
xaCon.clearAllTransactions();
}
ds.stop();
deleteLogicalLogIfExist();
renameCopiedLogicalLog( fileSystem, copies );
}
@Test
public void makeSureRecordsAreCreated() throws Exception
{
Xid xid = new XidImpl( new byte[2], new byte[2] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
xaCon.getTransaction().nodeAddProperty( node1, index( "prop1" ),
new long[]{1l << 63, 1, 1} );
xaCon.getTransaction().nodeAddProperty( node1, index( "prop2" ),
new long[]{1l << 63, 1, 1} );
DefinedProperty toRead = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop3" ),
new long[]{1l << 63, 1, 1} );
DefinedProperty toDelete = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop4" ),
new long[]{1l << 63, 1, 1} );
xaRes.end( xid, XAResource.TMSUCCESS );
// xaRes.prepare( xid );
xaRes.commit( xid, true );
ds.rotateLogicalLog();
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
xid = new XidImpl( new byte[2], new byte[2] );
xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
xaCon.getTransaction().nodeRemoveProperty( node1, toDelete.propertyKeyId() );
xaRes.end( xid, XAResource.TMSUCCESS );
// xaRes.prepare( xid );
xaRes.commit( xid, true );
ds.rotateLogicalLog();
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
xid = new XidImpl( new byte[2], new byte[2] );
xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
assertTrue( Arrays.equals(
(long[]) toRead.value(),
(long[]) loadNodeProperty( xaCon.getTransaction(), node1, toRead.propertyKeyId() ) ) );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
xaCon.clearAllTransactions();
ds.stop();
deleteLogicalLogIfExist();
}
private Object loadNodeProperty( NeoStoreTransaction writeTransaction, long node, final long propertyKeyId )
{
final AtomicReference<DefinedProperty> foundProperty = new AtomicReference<>();
PropertyReceiver receiver = new PropertyReceiver()
{
@Override
public void receive( DefinedProperty property, long propertyRecordId )
{
if ( propertyKeyId == property.propertyKeyId() )
{
DefinedProperty previous = foundProperty.getAndSet( property );
assertNull( previous );
}
}
};
writeTransaction.nodeLoadProperties( node, false, receiver );
assertNotNull( foundProperty.get() );
return foundProperty.get().value();
}
@Test
public void testDynamicRecordsInLog() throws Exception
{
Xid xid = new XidImpl( new byte[2], new byte[2] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
DefinedProperty toChange = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "hi" );
DefinedProperty toRead = xaCon.getTransaction().nodeAddProperty(
node1,
index( "prop2" ),
new long[]{1 << 23, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
ds.rotateLogicalLog();
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 1, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaRes.commit( xid, true );
xaCon.clearAllTransactions();
xid = new XidImpl( new byte[2], new byte[2] );
xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
xaCon.getTransaction().nodeChangeProperty( node1, toChange.propertyKeyId(), "hI" );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
ds.rotateLogicalLog();
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 1, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaRes.commit( xid, true );
xaCon.clearAllTransactions();
assertTrue(
Arrays.equals( new long[]{1 << 23, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
(long[]) loadNodeProperty( xaCon.getTransaction(), node1, toRead.propertyKeyId() ) ) );
}
@Test
public void testLogicalLogPrePrepared() throws Exception
{
Xid xid = new XidImpl( new byte[3], new byte[3] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
DefinedProperty n1prop1 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string1" );
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1,
"relationshiptype1" );
long rel1 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel1, relType1, node1, node2 );
DefinedProperty r1prop1 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop1" ), "string1" );
xaCon.getTransaction().nodeChangeProperty( node1, n1prop1.propertyKeyId(), "string2" );
xaCon.getTransaction().relChangeProperty( rel1, r1prop1.propertyKeyId(),
"string2" );
xaRes.end( xid, XAResource.TMSUCCESS );
copyClearRename();
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 0, xaRes.recover( XAResource.TMNOFLAGS ).length );
}
@Test
public void testBrokenNodeCommand() throws Exception
{
Xid xid = new XidImpl( new byte[4], new byte[4] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
xaCon.clearAllTransactions();
copyClearRename();
truncateLogicalLog( 102 );
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 0, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaCon.clearAllTransactions();
}
@Test
public void testBrokenCommand() throws Exception
{
// Given
Xid xid = new XidImpl( new byte[4], new byte[4] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
copyClearRename();
truncateLogicalLog( 102 );
// When
ds = newNeoStore();
// Then
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 0, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaCon.clearAllTransactions();
}
@Test
public void testBrokenPrepare() throws Exception
{
Xid xid = new XidImpl( new byte[4], new byte[4] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
/*PropertyData n1prop1 = */
xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string value 1" );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
copyClearRename();
truncateLogicalLog( 243 );
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 0, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaCon.clearAllTransactions();
}
@Test
public void testBrokenDone() throws Exception
{
Xid xid = new XidImpl( new byte[4], new byte[4] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
/*PropertyData n1prop1 = */
xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string value 1" );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.prepare( xid );
xaRes.commit( xid, false );
copyClearRename( false );
truncateLogicalLog( 318 );
ds = newNeoStore();
xaCon = ds.getXaConnection();
xaRes = xaCon.getXaResource();
assertEquals( 1, xaRes.recover( XAResource.TMNOFLAGS ).length );
xaCon.clearAllTransactions();
}
@Test
public void testLogVersion()
{
long creationTime = ds.getCreationTime();
long randomIdentifier = ds.getRandomIdentifier();
long currentVersion = ds.getCurrentLogVersion();
assertEquals( currentVersion, ds.incrementAndGetLogVersion() );
assertEquals( currentVersion + 1, ds.incrementAndGetLogVersion() );
assertEquals( creationTime, ds.getCreationTime() );
assertEquals( randomIdentifier, ds.getRandomIdentifier() );
}
@Test
public void testLogicalLogRotation() throws Exception
{
// TODO fix somehow
// ds.keepLogicalLogs( true );
Xid xid = new XidImpl( new byte[1], new byte[1] );
XAResource xaRes = xaCon.getXaResource();
xaRes.start( xid, XAResource.TMNOFLAGS );
long node1 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node1 );
long node2 = ds.nextId( Node.class );
xaCon.getTransaction().nodeCreate( node2 );
DefinedProperty n1prop1 = xaCon.getTransaction().nodeAddProperty(
node1, index( "prop1" ), "string1" );
xaCon.getTransaction().nodeLoadProperties( node1, false, VOID );
int relType1 = (int) ds.nextId( RelationshipType.class );
xaCon.getTransaction().createRelationshipTypeToken( relType1, "relationshiptype1" );
long rel1 = ds.nextId( Relationship.class );
xaCon.getTransaction().relationshipCreate( rel1, relType1, node1, node2 );
DefinedProperty r1prop1 = xaCon.getTransaction().relAddProperty(
rel1, index( "prop1" ), "string1" );
n1prop1 = xaCon.getTransaction().nodeChangeProperty( node1,
n1prop1.propertyKeyId(), "string2" );
r1prop1 = xaCon.getTransaction().relChangeProperty( rel1, r1prop1.propertyKeyId(),
"string2" );
xaCon.getTransaction().nodeRemoveProperty( node1, n1prop1.propertyKeyId() );
xaCon.getTransaction().relRemoveProperty( rel1, r1prop1.propertyKeyId() );
xaCon.getTransaction().relDelete( rel1 );
xaCon.getTransaction().nodeDelete( node1 );
xaCon.getTransaction().nodeDelete( node2 );
xaRes.end( xid, XAResource.TMSUCCESS );
xaRes.commit( xid, true );
long currentVersion = ds.getCurrentLogVersion();
ds.rotateLogicalLog();
assertTrue( logicalLogExists( currentVersion ) );
ds.rotateLogicalLog();
assertTrue( logicalLogExists( currentVersion ) );
assertTrue( logicalLogExists( currentVersion + 1 ) );
}
private boolean logicalLogExists( long version ) throws IOException
{
ReadableByteChannel log = ds.getLogicalLog( version );
if ( log != null )
{
log.close();
return true;
}
return false;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestXa.java
|
1,271
|
public class TestStoreAccess
{
@Test
public void openingThroughStoreAccessShouldNotTriggerRecovery() throws Exception
{
EphemeralFileSystemAbstraction snapshot = produceUncleanStore();
assertTrue( "Store should be unclean", isUnclean( snapshot ) );
File messages = new File( storeDir, "messages.log" );
snapshot.deleteFile( messages );
new StoreAccess( snapshot, storeDir.getPath(), stringMap() ).close();
assertTrue( "Store should be unclean", isUnclean( snapshot ) );
}
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private final File storeDir = new File( "dir" );
private EphemeralFileSystemAbstraction produceUncleanStore()
{
GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fs.get() )
.newImpermanentDatabase( storeDir.getPath() );
EphemeralFileSystemAbstraction snapshot = fs.get().snapshot();
db.shutdown();
return snapshot;
}
private boolean isUnclean( FileSystemAbstraction fileSystem ) throws IOException
{
char chr = activeLog( fileSystem, storeDir );
return chr == '1' || chr == '2';
}
private char activeLog( FileSystemAbstraction fileSystem, File directory ) throws IOException
{
StoreChannel file = fileSystem.open( new File( directory, "nioneo_logical.log.active" ), "r" );
try
{
ByteBuffer buffer = ByteBuffer.wrap( new byte[2] );
file.read( buffer );
buffer.flip();
return buffer.getChar();
}
finally
{
file.close();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestStoreAccess.java
|
1,272
|
private static class Store extends AbstractStore
{
public static final String TYPE_DESCRIPTOR = "TestVersion";
private static final int RECORD_SIZE = 1;
public Store( File fileName ) throws IOException
{
super( fileName, new Config( MapUtil.stringMap( "store_dir", "target/var/teststore" ),
GraphDatabaseSettings.class ),
IdType.NODE, ID_GENERATOR_FACTORY, WINDOW_POOL_FACTORY, FILE_SYSTEM, StringLogger.DEV_NULL );
}
public int getRecordSize()
{
return RECORD_SIZE;
}
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
public static Store createStore( File fileName ) throws IOException
{
new StoreFactory( new Config( Collections.<String, String>emptyMap(), GraphDatabaseSettings.class ),
ID_GENERATOR_FACTORY, new DefaultWindowPoolFactory(),
FILE_SYSTEM, StringLogger.DEV_NULL, null ).
createEmptyStore( fileName, buildTypeDescriptorAndVersion( TYPE_DESCRIPTOR ) );
return new Store( fileName );
}
protected void rebuildIdGenerator()
{
}
@Override
public List<WindowPoolStats> getAllWindowPoolStats()
{
// TODO Auto-generated method stub
return null;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestStore.java
|
1,273
|
{
@Override
public Void doWork( State state )
{
state.properties.setProperty( key, value );
return null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestGraphProperties.java
|
1,274
|
{
@Override
public Void doWork( State state )
{
state.tx = state.db.beginTx();
return null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestGraphProperties.java
|
1,275
|
{
@Override
public Void doWork( State state )
{
state.tx.success();
state.tx.finish();
return null;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestGraphProperties.java
|
1,276
|
{
@Override
public Boolean doWork( State state )
{
return state.properties.hasProperty( key );
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_TestGraphProperties.java
|
1,277
|
BYTE( PropertyType.BYTE, 8, Byte.class, byte.class )
{
int getRequiredBits( byte value )
{
long mask = 1L << maxBits - 1;
for ( int i = maxBits; i > 0; i--, mask >>= 1 )
{
if ( (mask & value) != 0 )
{
return i;
}
}
return 1;
}
@Override
int getRequiredBits( Object array, int arrayLength )
{
int highest = 1;
if ( isPrimitive( array ) )
{
for ( byte value : (byte[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
} else
{
for ( byte value : (Byte[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
}
return highest;
}
@Override
void writeAll( Object array, int length, int requiredBits, Bits result )
{
if ( isPrimitive( array ) )
{
for ( byte b : (byte[]) array )
{
result.put( b, requiredBits );
}
} else
{
for ( byte b : (Byte[]) array )
{
result.put( b, requiredBits );
}
}
}
@Override
Object createArray( int length, Bits bits, int requiredBits )
{
if ( length == 0 )
{
return EMPTY_BYTE_ARRAY;
}
final byte[] result = new byte[length];
for ( int i = 0; i < length; i++ )
{
result[i] = bits.getByte( requiredBits );
}
return result;
}
@Override
public Object createEmptyArray()
{
return EMPTY_BYTE_ARRAY;
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_ShortArray.java
|
1,278
|
BOOLEAN( PropertyType.BOOL, 1, Boolean.class, boolean.class )
{
@Override
int getRequiredBits( Object array, int arrayLength )
{
return 1;
}
@Override
void writeAll( Object array, int length, int requiredBits, Bits result )
{
if ( isPrimitive( array ) )
{
for ( boolean value : (boolean[]) array )
{
result.put( value ? 1 : 0, 1 );
}
} else
{
for ( boolean value : (Boolean[]) array )
{
result.put( value ? 1 : 0, 1 );
}
}
}
@Override
Object createArray( int length, Bits bits, int requiredBits )
{
if ( length == 0 )
{
return EMPTY_BOOLEAN_ARRAY;
}
final boolean[] result = new boolean[length];
for ( int i = 0; i < length; i++ )
{
result[i] = bits.getByte( requiredBits ) != 0;
}
return result;
}
@Override
public Object createEmptyArray()
{
return EMPTY_BOOLEAN_ARRAY;
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_ShortArray.java
|
1,279
|
public class SchemaStoreTest
{
@Test
public void serializationAndDeserialization() throws Exception
{
// GIVEN
int propertyKey = 4;
int labelId = 1;
IndexRule indexRule = IndexRule.indexRule( store.nextId(), labelId, propertyKey, PROVIDER_DESCRIPTOR );
// WHEN
byte[] serialized = new RecordSerializer().append( indexRule ).serialize();
IndexRule readIndexRule = (IndexRule) SchemaRule.Kind.deserialize( indexRule.getId(), wrap( serialized ) );
// THEN
assertEquals( indexRule.getId(), readIndexRule.getId() );
assertEquals( indexRule.getKind(), readIndexRule.getKind() );
assertEquals( indexRule.getLabel(), readIndexRule.getLabel() );
assertEquals( indexRule.getPropertyKey(), readIndexRule.getPropertyKey() );
assertEquals( indexRule.getProviderDescriptor(), readIndexRule.getProviderDescriptor() );
}
@Test
public void storeAndLoadAllShortRules() throws Exception
{
// GIVEN
Collection<SchemaRule> rules = Arrays.<SchemaRule>asList(
IndexRule.indexRule( store.nextId(), 0, 5, PROVIDER_DESCRIPTOR ),
IndexRule.indexRule( store.nextId(), 1, 6, PROVIDER_DESCRIPTOR ),
IndexRule.indexRule( store.nextId(), 1, 7, PROVIDER_DESCRIPTOR ) );
for ( SchemaRule rule : rules )
{
storeRule( rule );
}
// WHEN
Collection<SchemaRule> readRules = asCollection( store.loadAllSchemaRules() );
// THEN
assertEquals( rules, readRules );
}
// ENABLE WHEN MULTIPLE PROPERTY KEYS PER INDEX RULE IS SUPPORTED
// @Test
// public void storeAndLoadSingleLongRule() throws Exception
// {
// // GIVEN
//
// Collection<SchemaRule> rules = Arrays.<SchemaRule>asList( createLongIndexRule( 0, 50 ) );
// for ( SchemaRule rule : rules )
// storeRule( rule );
//
// // WHEN
// Collection<SchemaRule> readRules = asCollection( store.loadAll() );
//
// // THEN
// assertEquals( rules, readRules );
// }
//
// @Test
// public void storeAndLoadAllLongRules() throws Exception
// {
// // GIVEN
// Collection<SchemaRule> rules = Arrays.<SchemaRule>asList(
// createLongIndexRule( 0, 100 ), createLongIndexRule( 1, 6 ), createLongIndexRule( 2, 50 ) );
// for ( SchemaRule rule : rules )
// storeRule( rule );
//
// // WHEN
// Collection<SchemaRule> readRules = asCollection( store.loadAll() );
//
// // THEN
// assertEquals( rules, readRules );
// }
//
// private IndexRule createLongIndexRule( long label, int numberOfPropertyKeys )
// {
// long[] propertyKeys = new long[numberOfPropertyKeys];
// for ( int i = 0; i < propertyKeys.length; i++ )
// propertyKeys[i] = i;
// return new IndexRule( store.nextId(), label, POPULATING, propertyKeys );
// }
private long storeRule( SchemaRule rule )
{
Collection<DynamicRecord> records = store.allocateFrom( rule );
for ( DynamicRecord record : records )
{
store.updateRecord( record );
}
return first( records ).getId();
}
private Config config;
private SchemaStore store;
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private StoreFactory storeFactory;
@Before
public void before() throws Exception
{
config = new Config( stringMap() );
DefaultIdGeneratorFactory idGeneratorFactory = new DefaultIdGeneratorFactory();
DefaultWindowPoolFactory windowPoolFactory = new DefaultWindowPoolFactory();
storeFactory = new StoreFactory( config, idGeneratorFactory, windowPoolFactory, fs.get(), DEV_NULL,
new DefaultTxHook() );
File file = new File( "schema-store" );
storeFactory.createSchemaStore( file );
store = storeFactory.newSchemaStore( file );
}
@After
public void after() throws Exception
{
store.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_store_SchemaStoreTest.java
|
1,280
|
public class SchemaStore extends AbstractDynamicStore implements Iterable<SchemaRule>
{
// store version, each store ends with this string (byte encoded)
public static final String TYPE_DESCRIPTOR = "SchemaStore";
public static final String VERSION = buildTypeDescriptorAndVersion( TYPE_DESCRIPTOR );
public static final int BLOCK_SIZE = 56; // + BLOCK_HEADER_SIZE == 64
@SuppressWarnings("deprecation")
public SchemaStore( File fileName, Config conf, IdType idType, IdGeneratorFactory idGeneratorFactory,
WindowPoolFactory windowPoolFactory, FileSystemAbstraction fileSystemAbstraction,
StringLogger stringLogger )
{
super( fileName, conf, idType, idGeneratorFactory, windowPoolFactory, fileSystemAbstraction, stringLogger );
}
@Override
public <FAILURE extends Exception> void accept( Processor<FAILURE> processor, DynamicRecord record ) throws FAILURE
{
processor.processSchema( this, record );
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
public Collection<DynamicRecord> allocateFrom( SchemaRule rule )
{
RecordSerializer serializer = new RecordSerializer();
serializer = serializer.append( rule );
return allocateRecordsFromBytes( serializer.serialize(), asList( forceGetRecord( rule.getId() ) ).iterator(),
recordAllocator );
}
public Iterator<SchemaRule> loadAllSchemaRules()
{
return new SchemaStorage( this ).loadAllSchemaRules();
}
@Override
public Iterator<SchemaRule> iterator()
{
return loadAllSchemaRules();
}
public static SchemaRule readSchemaRule( long id, Collection<DynamicRecord> records )
throws MalformedSchemaRuleException
{
return readSchemaRule( id, records, new byte[ BLOCK_SIZE * 4 ] );
}
static SchemaRule readSchemaRule( long id, Collection<DynamicRecord> records, byte[] buffer )
throws MalformedSchemaRuleException
{
ByteBuffer scratchBuffer = concatData( records, buffer );
return deserialize( id, scratchBuffer );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStore.java
|
1,281
|
ALL
{
@Override
public boolean isOfKind( IndexRule rule )
{
return true;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,282
|
CONSTRAINT
{
@Override
public boolean isOfKind( IndexRule rule )
{
return rule.isConstraintIndex();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,283
|
INDEX
{
@Override
public boolean isOfKind( IndexRule rule )
{
return !rule.isConstraintIndex();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,284
|
{
@Override
public boolean accept( UniquenessConstraintRule item )
{
return item.containsPropertyKeyId( propertyKeyId );
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,285
|
{
private final long highestId = schemaStore.getHighestPossibleIdInUse();
private long currentId = 1; /*record 0 contains the block size*/
private final byte[] scratchData = newRecordBuffer();
@Override
protected SchemaRule fetchNextOrNull()
{
while ( currentId <= highestId )
{
long id = currentId++;
DynamicRecord record = schemaStore.forceGetRecord( id );
if ( record.inUse() && record.isStartRecord() )
{
try
{
return getSchemaRule( id, scratchData );
}
catch ( MalformedSchemaRuleException e )
{
// TODO remove this and throw this further up
throw new RuntimeException( e );
}
}
}
return null;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,286
|
{
@Override
public boolean accept( SchemaRule rule )
{
return ruleClass.isInstance( rule );
}
}, loadAllSchemaRules() );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,287
|
{
@SuppressWarnings("unchecked")
@Override
public boolean accept( SchemaRule rule )
{
return rule.getKind() == kind &&
predicate.accept( (R) rule );
}
}, loadAllSchemaRules() ) );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,288
|
{
@SuppressWarnings("unchecked")
@Override
public boolean accept( SchemaRule rule )
{
return rule.getLabel() == labelId &&
rule.getKind().getRuleClass() == ruleType &&
predicate.accept( (R) rule );
}
}, loadAllSchemaRules() ) );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,289
|
{
@Override
public boolean accept( IndexRule item )
{
return item.getPropertyKey() == propertyKeyId;
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,290
|
public class SchemaStorage implements SchemaRuleAccess
{
public static enum IndexRuleKind
{
INDEX
{
@Override
public boolean isOfKind( IndexRule rule )
{
return !rule.isConstraintIndex();
}
},
CONSTRAINT
{
@Override
public boolean isOfKind( IndexRule rule )
{
return rule.isConstraintIndex();
}
},
ALL
{
@Override
public boolean isOfKind( IndexRule rule )
{
return true;
}
};
public abstract boolean isOfKind( IndexRule rule );
}
private final RecordStore<DynamicRecord> schemaStore;
public SchemaStorage( RecordStore<DynamicRecord> schemaStore )
{
this.schemaStore = schemaStore;
}
/**
* Find the IndexRule, of any kind, for the given label and property key.
*
* Otherwise throw if there are not exactly one matching candidate rule.
*/
public IndexRule indexRule( int labelId, int propertyKeyId ) throws SchemaRuleNotFoundException
{
return indexRule( labelId, propertyKeyId, IndexRuleKind.ALL );
}
/**
* Find and IndexRule of the given kind, for the given label and property.
*
* Otherwise throw if there are not exactly one matching candidate rule.
*/
public IndexRule indexRule( int labelId, final int propertyKeyId, IndexRuleKind kind ) throws SchemaRuleNotFoundException
{
Iterator<IndexRule> rules = schemaRules(
IndexRule.class, labelId,
new Predicate<IndexRule>()
{
@Override
public boolean accept( IndexRule item )
{
return item.getPropertyKey() == propertyKeyId;
}
} );
IndexRule foundRule = null;
while ( rules.hasNext() )
{
IndexRule candidate = rules.next();
if ( kind.isOfKind( candidate ) )
{
if ( foundRule != null )
{
throw new SchemaRuleNotFoundException( labelId, propertyKeyId, String.format("found more than one matching index rule, %s and %s", foundRule, candidate) );
}
foundRule = candidate;
}
}
if ( foundRule == null )
{
throw new SchemaRuleNotFoundException( labelId, propertyKeyId, "not found" );
}
return foundRule;
}
public Iterator<IndexRule> allIndexRules()
{
return schemaRules( IndexRule.class );
}
public <T extends SchemaRule> Iterator<T> schemaRules( final Class<T> type, int labelId, Predicate<T> predicate )
{
return schemaRules( Functions.cast( type ), type, labelId, predicate );
}
public <R extends SchemaRule, T> Iterator<T> schemaRules(
Function<? super R, T> conversion, final Class<R> ruleType,
final int labelId, final Predicate<R> predicate )
{
@SuppressWarnings("unchecked"/*the predicate ensures that this is safe*/)
Function<SchemaRule, T> ruleConversion = (Function) conversion;
return map( ruleConversion, filter( new Predicate<SchemaRule>()
{
@SuppressWarnings("unchecked")
@Override
public boolean accept( SchemaRule rule )
{
return rule.getLabel() == labelId &&
rule.getKind().getRuleClass() == ruleType &&
predicate.accept( (R) rule );
}
}, loadAllSchemaRules() ) );
}
public <R extends SchemaRule, T> Iterator<T> schemaRules(
Function<? super R, T> conversion, final SchemaRule.Kind kind,
final Predicate<R> predicate )
{
@SuppressWarnings("unchecked"/*the predicate ensures that this is safe*/)
Function<SchemaRule, T> ruleConversion = (Function) conversion;
return map( ruleConversion, filter( new Predicate<SchemaRule>()
{
@SuppressWarnings("unchecked")
@Override
public boolean accept( SchemaRule rule )
{
return rule.getKind() == kind &&
predicate.accept( (R) rule );
}
}, loadAllSchemaRules() ) );
}
public <R extends SchemaRule> Iterator<R> schemaRules( final Class<R> ruleClass )
{
@SuppressWarnings({"UnnecessaryLocalVariable", "unchecked"/*the predicate ensures that this cast is safe*/})
Iterator<R> result = (Iterator)filter( new Predicate<SchemaRule>()
{
@Override
public boolean accept( SchemaRule rule )
{
return ruleClass.isInstance( rule );
}
}, loadAllSchemaRules() );
return result;
}
public Iterator<SchemaRule> loadAllSchemaRules()
{
return new PrefetchingIterator<SchemaRule>()
{
private final long highestId = schemaStore.getHighestPossibleIdInUse();
private long currentId = 1; /*record 0 contains the block size*/
private final byte[] scratchData = newRecordBuffer();
@Override
protected SchemaRule fetchNextOrNull()
{
while ( currentId <= highestId )
{
long id = currentId++;
DynamicRecord record = schemaStore.forceGetRecord( id );
if ( record.inUse() && record.isStartRecord() )
{
try
{
return getSchemaRule( id, scratchData );
}
catch ( MalformedSchemaRuleException e )
{
// TODO remove this and throw this further up
throw new RuntimeException( e );
}
}
}
return null;
}
};
}
@Override
public SchemaRule loadSingleSchemaRule( long ruleId ) throws MalformedSchemaRuleException
{
return getSchemaRule( ruleId, newRecordBuffer() );
}
private byte[] newRecordBuffer()
{
return new byte[schemaStore.getRecordSize()*4];
}
private SchemaRule getSchemaRule( long id, byte[] buffer ) throws MalformedSchemaRuleException
{
Collection<DynamicRecord> records;
try
{
records = schemaStore.getRecords( id );
}
catch ( Exception e )
{
throw new MalformedSchemaRuleException( e.getMessage(), e );
}
return SchemaStore.readSchemaRule( id, records, buffer );
}
public long newRuleId()
{
return schemaStore.nextId();
}
public UniquenessConstraintRule uniquenessConstraint( int labelId, final int propertyKeyId )
throws SchemaRuleNotFoundException
{
Iterator<UniquenessConstraintRule> rules = schemaRules(
UniquenessConstraintRule.class, labelId,
new Predicate<UniquenessConstraintRule>()
{
@Override
public boolean accept( UniquenessConstraintRule item )
{
return item.containsPropertyKeyId( propertyKeyId );
}
} );
if ( !rules.hasNext() )
{
throw new SchemaRuleNotFoundException( labelId, propertyKeyId, "not found" );
}
UniquenessConstraintRule rule = rules.next();
if ( rules.hasNext() )
{
throw new SchemaRuleNotFoundException( labelId, propertyKeyId, "found more than one matching index" );
}
return rule;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaStorage.java
|
1,291
|
UNIQUENESS_CONSTRAINT( 3, UniquenessConstraintRule.class )
{
@Override
protected SchemaRule newRule( long id, int labelId, ByteBuffer buffer )
{
return UniquenessConstraintRule.readUniquenessConstraintRule( id, labelId, buffer );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaRule.java
|
1,292
|
CONSTRAINT_INDEX_RULE( 2, IndexRule.class )
{
@Override
protected SchemaRule newRule( long id, int labelId, ByteBuffer buffer )
{
return IndexRule.readIndexRule( id, true, labelId, buffer );
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaRule.java
|
1,293
|
INDEX_RULE( 1, IndexRule.class )
{
@Override
protected SchemaRule newRule( long id, int labelId, ByteBuffer buffer )
{
return IndexRule.readIndexRule( id, false, labelId, buffer );
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_SchemaRule.java
|
1,294
|
public static abstract class Configuration
extends TokenStore.Configuration
{
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_RelationshipTypeTokenStore.java
|
1,295
|
public class RelationshipTypeTokenStore extends TokenStore<RelationshipTypeTokenRecord>
{
public static abstract class Configuration
extends TokenStore.Configuration
{
}
public static final String TYPE_DESCRIPTOR = "RelationshipTypeStore";
private static final int RECORD_SIZE = 1/*inUse*/ + 4/*nameId*/;
public RelationshipTypeTokenStore( File fileName, Config config,
IdGeneratorFactory idGeneratorFactory, WindowPoolFactory windowPoolFactory,
FileSystemAbstraction fileSystemAbstraction, StringLogger stringLogger,
DynamicStringStore nameStore )
{
super(fileName, config, IdType.RELATIONSHIP_TYPE_TOKEN, idGeneratorFactory, windowPoolFactory,
fileSystemAbstraction, stringLogger, nameStore);
}
@Override
public <FAILURE extends Exception> void accept( Processor<FAILURE> processor, RelationshipTypeTokenRecord record ) throws FAILURE
{
processor.processRelationshipTypeToken( this, record );
}
@Override
// TODO: Remove this method?
protected void rebuildIdGenerator()
{
stringLogger.debug( "Rebuilding id generator for[" + getStorageFileName()
+ "] ..." );
closeIdGenerator();
if ( fileSystemAbstraction.fileExists( new File( getStorageFileName().getPath() + ".id" )) )
{
boolean success = fileSystemAbstraction.deleteFile( new File( getStorageFileName().getPath() + ".id" ));
assert success;
}
createIdGenerator( new File( getStorageFileName().getPath() + ".id" ));
openIdGenerator();
StoreChannel fileChannel = getFileChannel();
long highId = -1;
int recordSize = getRecordSize();
try
{
long fileSize = fileChannel.size();
ByteBuffer byteBuffer = ByteBuffer.wrap( new byte[recordSize] );
for ( int i = 0; i * recordSize < fileSize; i++ )
{
fileChannel.read( byteBuffer, i * recordSize );
byteBuffer.flip();
byte inUse = byteBuffer.get();
byteBuffer.flip();
if ( inUse != Record.IN_USE.byteValue() )
{
// hole found, marking as reserved
byteBuffer.clear();
byteBuffer.put( Record.IN_USE.byteValue() ).putInt(
Record.RESERVED.intValue() );
byteBuffer.flip();
fileChannel.write( byteBuffer, i * recordSize );
byteBuffer.clear();
}
else
{
highId = i;
}
// nextId();
}
highId++;
fileChannel.truncate( highId * recordSize );
}
catch ( IOException e )
{
throw new UnderlyingStorageException(
"Unable to rebuild id generator " + getStorageFileName(), e );
}
setHighId( highId );
stringLogger.debug( "[" + getStorageFileName() + "] high id=" + getHighId() );
closeIdGenerator();
openIdGenerator();
}
@Override
protected RelationshipTypeTokenRecord newRecord( int id )
{
return new RelationshipTypeTokenRecord( id );
}
@Override
public int getRecordSize()
{
return RECORD_SIZE;
}
@Override
public String getTypeDescriptor()
{
return TYPE_DESCRIPTOR;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_RelationshipTypeTokenStore.java
|
1,296
|
SHORT( PropertyType.SHORT, 16, Short.class, short.class )
{
int getRequiredBits( short value )
{
long mask = 1L << maxBits - 1;
for ( int i = maxBits; i > 0; i--, mask >>= 1 )
{
if ( (mask & value) != 0 )
{
return i;
}
}
return 1;
}
@Override
int getRequiredBits( Object array, int arrayLength )
{
int highest = 1;
if ( isPrimitive( array ) )
{
for ( short value : (short[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
} else
{
for ( short value : (Short[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
}
return highest;
}
@Override
void writeAll( Object array, int length, int requiredBits, Bits result )
{
if ( isPrimitive( array ) )
{
for ( short value : (short[]) array )
{
result.put( value, requiredBits );
}
} else
{
for ( short value : (Short[]) array )
{
result.put( value, requiredBits );
}
}
}
@Override
Object createArray( int length, Bits bits, int requiredBits )
{
if ( length == 0 )
{
return EMPTY_SHORT_ARRAY;
}
final short[] result = new short[length];
for ( int i = 0; i < length; i++ )
{
result[i] = bits.getShort( requiredBits );
}
return result;
}
@Override
public Object createEmptyArray()
{
return EMPTY_SHORT_ARRAY;
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_ShortArray.java
|
1,297
|
CHAR( PropertyType.CHAR, 16, Character.class , char.class)
{
int getRequiredBits( char value )
{
long mask = 1L << maxBits - 1;
for ( int i = maxBits; i > 0; i--, mask >>= 1 )
{
if ( (mask & value) != 0 )
{
return i;
}
}
return 1;
}
@Override
int getRequiredBits( Object array, int arrayLength )
{
int highest = 1;
if ( isPrimitive( array ) )
{
for ( char value : (char[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
} else
{
for ( char value : (Character[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
}
return highest;
}
@Override
void writeAll( Object array, int length, int requiredBits, Bits result )
{
if ( isPrimitive( array ) )
{
for ( char value : (char[]) array )
{
result.put( value, requiredBits );
}
} else
{
for ( char value : (Character[]) array )
{
result.put( value, requiredBits );
}
}
}
@Override
Object createArray( int length, Bits bits, int requiredBits )
{
if ( length == 0 )
{
return EMPTY_CHAR_ARRAY;
}
final char[] result = new char[length];
for ( int i = 0; i < length; i++ )
{
result[i] = (char) bits.getShort( requiredBits );
}
return result;
}
@Override
public Object createEmptyArray()
{
return EMPTY_CHAR_ARRAY;
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_ShortArray.java
|
1,298
|
INT( PropertyType.INT, 32, Integer.class , int.class)
{
int getRequiredBits( int value )
{
long mask = 1L << maxBits - 1;
for ( int i = maxBits; i > 0; i--, mask >>= 1 )
{
if ( (mask & value) != 0 )
{
return i;
}
}
return 1;
}
@Override
int getRequiredBits( Object array, int arrayLength )
{
int highest = 1;
if ( isPrimitive( array ) )
{
for ( int value : (int[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
} else
{
for ( int value : (Integer[]) array )
{
highest = Math.max( getRequiredBits( value ), highest );
}
}
return highest;
}
@Override
void writeAll( Object array, int length, int requiredBits, Bits result )
{
if ( isPrimitive( array ) )
{
for ( int value : (int[]) array )
{
result.put( value, requiredBits );
}
} else
{
for ( int value : (Integer[]) array )
{
result.put( value, requiredBits );
}
}
}
@Override
Object createArray( int length, Bits bits, int requiredBits )
{
if ( length == 0 )
{
return EMPTY_INT_ARRAY;
}
final int[] result = new int[length];
for ( int i = 0; i < length; i++ )
{
result[i] = bits.getInt( requiredBits );
}
return result;
}
@Override
public Object createEmptyArray()
{
return EMPTY_INT_ARRAY;
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_ShortArray.java
|
1,299
|
public class StoreIdIterator implements PrimitiveLongIterator
{
private final RecordStore<?> store;
private long highId, id;
public StoreIdIterator( RecordStore<?> store )
{
this.store = store;
}
@Override
public String toString()
{
return format( "%s[id=%s/%s; store=%s]", getClass().getSimpleName(), id, highId, store );
}
@Override
public boolean hasNext()
{
if ( id < highId )
{
return true;
}
highId = store.getHighId();
return id < highId;
}
@Override
public long next()
{
if ( !hasNext() )
{
throw new NoSuchElementException(
format( "ID [%s] has exceeded the high ID [%s] of %s.", id, highId, store ) );
}
return id++;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_store_StoreIdIterator.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.