Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
300
|
private final RecordChanges<Long, Collection<DynamicRecord>, SchemaRule> schemaRuleChanges = new RecordChanges<>(new RecordChanges.Loader<Long, Collection<DynamicRecord>, SchemaRule>() {
@Override
public Collection<DynamicRecord> newUnused(Long key, SchemaRule additionalData)
{
return getSchemaStore().allocateFrom(additionalData);
}
@Override
public Collection<DynamicRecord> load(Long key, SchemaRule additionalData)
{
return getSchemaStore().getRecords( key );
}
@Override
public void ensureHeavy(Collection<DynamicRecord> dynamicRecords)
{
SchemaStore schemaStore = getSchemaStore();
for ( DynamicRecord record : dynamicRecords)
{
schemaStore.ensureHeavy(record);
}
}
@Override
public Collection<DynamicRecord> clone(Collection<DynamicRecord> dynamicRecords) {
Collection<DynamicRecord> list = new ArrayList<>( dynamicRecords.size() );
for ( DynamicRecord record : dynamicRecords)
{
list.add( record.clone() );
}
return list;
}
}, true);
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
301
|
{
@Override
public RelationshipRecord newUnused( Long key, Void additionalData )
{
return new RelationshipRecord( key );
}
@Override
public RelationshipRecord load( Long key, Void additionalData )
{
return getRelationshipStore().getRecord( key );
}
@Override
public void ensureHeavy( RelationshipRecord record )
{
}
@Override
public RelationshipRecord clone(RelationshipRecord relationshipRecord) {
// Not needed because we don't manage before state for relationship records.
throw new UnsupportedOperationException("Unexpected call to clone on a relationshipRecord");
}
}, false );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
302
|
{
@Override
public PropertyRecord newUnused( Long key, PrimitiveRecord additionalData )
{
PropertyRecord record = new PropertyRecord( key );
setOwner( record, additionalData );
return record;
}
private void setOwner( PropertyRecord record, PrimitiveRecord owner )
{
if ( owner != null )
{
owner.setIdTo( record );
}
}
@Override
public PropertyRecord load( Long key, PrimitiveRecord additionalData )
{
PropertyRecord record = getPropertyStore().getRecord( key.longValue() );
setOwner( record, additionalData );
return record;
}
@Override
public void ensureHeavy( PropertyRecord record )
{
for ( PropertyBlock block : record.getPropertyBlocks() )
{
getPropertyStore().ensureHeavy( block );
}
}
@Override
public PropertyRecord clone(PropertyRecord propertyRecord)
{
return propertyRecord.clone();
}
}, true );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
303
|
public class NeoStoreTransaction extends XaTransaction
{
private final RecordChanges<Long, NodeRecord, Void> nodeRecords =
new RecordChanges<>( new RecordChanges.Loader<Long, NodeRecord, Void>()
{
@Override
public NodeRecord newUnused( Long key, Void additionalData )
{
return new NodeRecord( key, Record.NO_NEXT_RELATIONSHIP.intValue(),
Record.NO_NEXT_PROPERTY.intValue() );
}
@Override
public NodeRecord load( Long key, Void additionalData )
{
return getNodeStore().getRecord( key );
}
@Override
public void ensureHeavy( NodeRecord record )
{
getNodeStore().ensureHeavy( record );
}
@Override
public NodeRecord clone(NodeRecord nodeRecord)
{
return nodeRecord.clone();
}
}, true );
private final RecordChanges<Long, PropertyRecord, PrimitiveRecord> propertyRecords =
new RecordChanges<>( new RecordChanges.Loader<Long, PropertyRecord, PrimitiveRecord>()
{
@Override
public PropertyRecord newUnused( Long key, PrimitiveRecord additionalData )
{
PropertyRecord record = new PropertyRecord( key );
setOwner( record, additionalData );
return record;
}
private void setOwner( PropertyRecord record, PrimitiveRecord owner )
{
if ( owner != null )
{
owner.setIdTo( record );
}
}
@Override
public PropertyRecord load( Long key, PrimitiveRecord additionalData )
{
PropertyRecord record = getPropertyStore().getRecord( key.longValue() );
setOwner( record, additionalData );
return record;
}
@Override
public void ensureHeavy( PropertyRecord record )
{
for ( PropertyBlock block : record.getPropertyBlocks() )
{
getPropertyStore().ensureHeavy( block );
}
}
@Override
public PropertyRecord clone(PropertyRecord propertyRecord)
{
return propertyRecord.clone();
}
}, true );
private final RecordChanges<Long, RelationshipRecord, Void> relRecords =
new RecordChanges<>( new RecordChanges.Loader<Long, RelationshipRecord, Void>()
{
@Override
public RelationshipRecord newUnused( Long key, Void additionalData )
{
return new RelationshipRecord( key );
}
@Override
public RelationshipRecord load( Long key, Void additionalData )
{
return getRelationshipStore().getRecord( key );
}
@Override
public void ensureHeavy( RelationshipRecord record )
{
}
@Override
public RelationshipRecord clone(RelationshipRecord relationshipRecord) {
// Not needed because we don't manage before state for relationship records.
throw new UnsupportedOperationException("Unexpected call to clone on a relationshipRecord");
}
}, false );
private final RecordChanges<Long, Collection<DynamicRecord>, SchemaRule> schemaRuleChanges = new RecordChanges<>(new RecordChanges.Loader<Long, Collection<DynamicRecord>, SchemaRule>() {
@Override
public Collection<DynamicRecord> newUnused(Long key, SchemaRule additionalData)
{
return getSchemaStore().allocateFrom(additionalData);
}
@Override
public Collection<DynamicRecord> load(Long key, SchemaRule additionalData)
{
return getSchemaStore().getRecords( key );
}
@Override
public void ensureHeavy(Collection<DynamicRecord> dynamicRecords)
{
SchemaStore schemaStore = getSchemaStore();
for ( DynamicRecord record : dynamicRecords)
{
schemaStore.ensureHeavy(record);
}
}
@Override
public Collection<DynamicRecord> clone(Collection<DynamicRecord> dynamicRecords) {
Collection<DynamicRecord> list = new ArrayList<>( dynamicRecords.size() );
for ( DynamicRecord record : dynamicRecords)
{
list.add( record.clone() );
}
return list;
}
}, true);
private Map<Integer, RelationshipTypeTokenRecord> relationshipTypeTokenRecords;
private Map<Integer, LabelTokenRecord> labelTokenRecords;
private Map<Integer, PropertyKeyTokenRecord> propertyKeyTokenRecords;
private RecordChanges<Long, NeoStoreRecord, Void> neoStoreRecord;
private final Map<Long, Command.NodeCommand> nodeCommands = new TreeMap<>();
private final ArrayList<Command.PropertyCommand> propCommands = new ArrayList<>();
private final ArrayList<Command.RelationshipCommand> relCommands = new ArrayList<>();
private final ArrayList<Command.SchemaRuleCommand> schemaRuleCommands = new ArrayList<>();
private ArrayList<Command.RelationshipTypeTokenCommand> relationshipTypeTokenCommands;
private ArrayList<Command.LabelTokenCommand> labelTokenCommands;
private ArrayList<Command.PropertyKeyTokenCommand> propertyKeyTokenCommands;
private Command.NeoStoreCommand neoStoreCommand;
private boolean committed = false;
private boolean prepared = false;
private final long lastCommittedTxWhenTransactionStarted;
private final TransactionState state;
private final CacheAccessBackDoor cacheAccess;
private final IndexingService indexes;
private final NeoStore neoStore;
private final LabelScanStore labelScanStore;
private final IntegrityValidator integrityValidator;
private final KernelTransactionImplementation kernelTransaction;
private final LockService locks;
/**
* @param lastCommittedTxWhenTransactionStarted is the highest committed transaction id when this transaction
* begun. No operations in this transaction are allowed to have
* taken place before that transaction id. This is used by
* constraint validation - if a constraint was not online when this
* transaction begun, it will be verified during prepare. If you are
* writing code against this API and are unsure about what to set
* this value to, 0 is a safe choice. That will ensure all
* constraints are checked.
* @param kernelTransaction is the vanilla sauce to the WriteTransaction apple pie.
*/
NeoStoreTransaction( long lastCommittedTxWhenTransactionStarted, XaLogicalLog log,
TransactionState state, NeoStore neoStore, CacheAccessBackDoor cacheAccess,
IndexingService indexingService, LabelScanStore labelScanStore,
IntegrityValidator integrityValidator, KernelTransactionImplementation kernelTransaction,
LockService locks )
{
super( log, state );
this.lastCommittedTxWhenTransactionStarted = lastCommittedTxWhenTransactionStarted;
this.neoStore = neoStore;
this.state = state;
this.cacheAccess = cacheAccess;
this.indexes = indexingService;
this.labelScanStore = labelScanStore;
this.integrityValidator = integrityValidator;
this.kernelTransaction = kernelTransaction;
this.locks = locks;
}
/**
* This is a smell, a result of the kernel refactorings. Right now, both NeoStoreTransaction and KernelTransaction
* are "publicly" consumable, and one owns the other. In the future, they should be merged such that
* KernelTransaction rules supreme, and has internal components to manage the responsibilities currently handled by
* WriteTransaction and ReadTransaction.
*/
public KernelTransactionImplementation kernelTransaction()
{
return kernelTransaction;
}
@Override
public boolean isReadOnly()
{
if ( isRecovered() )
{
return nodeCommands.size() == 0 && propCommands.size() == 0 &&
relCommands.size() == 0 && schemaRuleCommands.size() == 0 && relationshipTypeTokenCommands == null &&
labelTokenCommands == null && propertyKeyTokenCommands == null && kernelTransaction.isReadOnly();
}
return nodeRecords.changeSize() == 0 && relRecords.changeSize() == 0 && schemaRuleChanges.changeSize() == 0 &&
propertyRecords.changeSize() == 0 && relationshipTypeTokenRecords == null && labelTokenRecords == null &&
propertyKeyTokenRecords == null && kernelTransaction.isReadOnly();
}
// Make this accessible in this package
@Override
protected void setRecovered()
{
super.setRecovered();
}
@Override
public void doAddCommand( XaCommand command )
{
// override
}
@Override
protected void doPrepare() throws XAException
{
if ( committed )
{
throw new XAException( "Cannot prepare committed transaction["
+ getIdentifier() + "]" );
}
if ( prepared )
{
throw new XAException( "Cannot prepare prepared transaction["
+ getIdentifier() + "]" );
}
kernelTransaction.prepare();
prepared = true;
int noOfCommands = nodeRecords.changeSize() +
relRecords.changeSize() +
propertyRecords.changeSize() +
schemaRuleChanges.changeSize() +
(propertyKeyTokenRecords != null ? propertyKeyTokenRecords.size() : 0) +
(relationshipTypeTokenRecords != null ? relationshipTypeTokenRecords.size() : 0) +
(labelTokenRecords != null ? labelTokenRecords.size() : 0);
List<Command> commands = new ArrayList<>( noOfCommands );
if ( relationshipTypeTokenRecords != null )
{
relationshipTypeTokenCommands = new ArrayList<>();
for ( RelationshipTypeTokenRecord record : relationshipTypeTokenRecords.values() )
{
Command.RelationshipTypeTokenCommand command =
new Command.RelationshipTypeTokenCommand(
neoStore.getRelationshipTypeStore(), record );
relationshipTypeTokenCommands.add( command );
commands.add( command );
}
}
if ( labelTokenRecords != null )
{
labelTokenCommands = new ArrayList<>();
for ( LabelTokenRecord record : labelTokenRecords.values() )
{
Command.LabelTokenCommand command =
new Command.LabelTokenCommand(
neoStore.getLabelTokenStore(), record );
labelTokenCommands.add( command );
commands.add( command );
}
}
for ( RecordChange<Long, NodeRecord, Void> change : nodeRecords.changes() )
{
NodeRecord record = change.forReadingLinkage();
integrityValidator.validateNodeRecord( record );
Command.NodeCommand command = new Command.NodeCommand(
neoStore.getNodeStore(), change.getBefore(), record );
nodeCommands.put( record.getId(), command );
commands.add( command );
}
for ( RecordChange<Long, RelationshipRecord, Void> record : relRecords.changes() )
{
Command.RelationshipCommand command = new Command.RelationshipCommand(
neoStore.getRelationshipStore(), record.forReadingLinkage() );
relCommands.add( command );
commands.add( command );
}
if ( neoStoreRecord != null )
{
for ( RecordChange<Long, NeoStoreRecord, Void> change : neoStoreRecord.changes() )
{
neoStoreCommand = new Command.NeoStoreCommand( neoStore, change.forReadingData() );
addCommand( neoStoreCommand );
}
}
if ( propertyKeyTokenRecords != null )
{
propertyKeyTokenCommands = new ArrayList<>();
for ( PropertyKeyTokenRecord record : propertyKeyTokenRecords.values() )
{
Command.PropertyKeyTokenCommand command =
new Command.PropertyKeyTokenCommand(
neoStore.getPropertyStore().getPropertyKeyTokenStore(), record );
propertyKeyTokenCommands.add( command );
commands.add( command );
}
}
for ( RecordChange<Long, PropertyRecord, PrimitiveRecord> change : propertyRecords.changes() )
{
Command.PropertyCommand command = new Command.PropertyCommand(
neoStore.getPropertyStore(), change.getBefore(), change.forReadingLinkage() );
propCommands.add( command );
commands.add( command );
}
for ( RecordChange<Long, Collection<DynamicRecord>, SchemaRule> change : schemaRuleChanges.changes() )
{
integrityValidator.validateSchemaRule( change.getAdditionalData() );
Command.SchemaRuleCommand command = new Command.SchemaRuleCommand(
neoStore,
neoStore.getSchemaStore(),
indexes,
change.getBefore(),
change.forChangingData(),
change.getAdditionalData(),
-1 );
schemaRuleCommands.add( command );
commands.add( command );
}
assert commands.size() == noOfCommands : "Expected " + noOfCommands
+ " final commands, got "
+ commands.size() + " instead";
intercept( commands );
for ( Command command : commands )
{
addCommand( command );
}
integrityValidator.validateTransactionStartKnowledge( lastCommittedTxWhenTransactionStarted );
}
protected void intercept( List<Command> commands )
{
// default no op
}
@Override
protected void injectCommand( XaCommand xaCommand )
{
if ( xaCommand instanceof Command.NodeCommand )
{
NodeCommand nodeCommand = (Command.NodeCommand) xaCommand;
nodeCommands.put( nodeCommand.getKey(), nodeCommand );
}
else if ( xaCommand instanceof Command.RelationshipCommand )
{
relCommands.add( (Command.RelationshipCommand) xaCommand );
}
else if ( xaCommand instanceof Command.PropertyCommand )
{
propCommands.add( (Command.PropertyCommand) xaCommand );
}
else if ( xaCommand instanceof Command.PropertyKeyTokenCommand )
{
if ( propertyKeyTokenCommands == null )
{
propertyKeyTokenCommands = new ArrayList<>();
}
propertyKeyTokenCommands.add( (Command.PropertyKeyTokenCommand) xaCommand );
}
else if ( xaCommand instanceof Command.RelationshipTypeTokenCommand )
{
if ( relationshipTypeTokenCommands == null )
{
relationshipTypeTokenCommands = new ArrayList<>();
}
relationshipTypeTokenCommands.add( (Command.RelationshipTypeTokenCommand) xaCommand );
}
else if ( xaCommand instanceof Command.LabelTokenCommand )
{
if ( labelTokenCommands == null )
{
labelTokenCommands = new ArrayList<>();
}
labelTokenCommands.add( (Command.LabelTokenCommand) xaCommand );
}
else if ( xaCommand instanceof Command.NeoStoreCommand )
{
assert neoStoreCommand == null;
neoStoreCommand = (Command.NeoStoreCommand) xaCommand;
}
else if ( xaCommand instanceof Command.SchemaRuleCommand )
{
schemaRuleCommands.add( (Command.SchemaRuleCommand) xaCommand );
}
else
{
throw new IllegalArgumentException( "Unknown command " + xaCommand );
}
}
@Override
public void doRollback() throws XAException
{
if ( committed )
{
throw new XAException( "Cannot rollback partialy commited "
+ "transaction[" + getIdentifier() + "]. Recover and "
+ "commit" );
}
try
{
boolean freeIds = neoStore.freeIdsDuringRollback();
if ( relationshipTypeTokenRecords != null )
{
for ( RelationshipTypeTokenRecord record : relationshipTypeTokenRecords.values() )
{
if ( record.isCreated() )
{
if ( freeIds )
{
getRelationshipTypeStore().freeId( record.getId() );
}
for ( DynamicRecord dynamicRecord : record.getNameRecords() )
{
if ( dynamicRecord.isCreated() )
{
getRelationshipTypeStore().freeId(
(int) dynamicRecord.getId() );
}
}
}
removeRelationshipTypeFromCache( record.getId() );
}
}
for ( RecordChange<Long, NodeRecord, Void> change : nodeRecords.changes() )
{
NodeRecord record = change.forReadingLinkage();
if ( freeIds && record.isCreated() )
{
getNodeStore().freeId( record.getId() );
}
removeNodeFromCache( record.getId() );
}
for ( RecordChange<Long, RelationshipRecord, Void> change : relRecords.changes() )
{
long id = change.getKey();
RelationshipRecord record = change.forReadingLinkage();
if ( freeIds && change.isCreated() )
{
getRelationshipStore().freeId( id );
}
removeRelationshipFromCache( id );
patchDeletedRelationshipNodes( id, record.getFirstNode(), record.getFirstNextRel(),
record.getSecondNode(), record.getSecondNextRel() );
}
if ( neoStoreRecord != null )
{
removeGraphPropertiesFromCache();
}
if ( propertyKeyTokenRecords != null )
{
for ( PropertyKeyTokenRecord record : propertyKeyTokenRecords.values() )
{
if ( record.isCreated() )
{
if ( freeIds )
{
getPropertyStore().getPropertyKeyTokenStore().freeId( record.getId() );
}
for ( DynamicRecord dynamicRecord : record.getNameRecords() )
{
if ( dynamicRecord.isCreated() )
{
getPropertyStore().getPropertyKeyTokenStore().freeId(
(int) dynamicRecord.getId() );
}
}
}
}
}
for ( RecordChange<Long, PropertyRecord, PrimitiveRecord> change : propertyRecords.changes() )
{
PropertyRecord record = change.forReadingLinkage();
if ( record.getNodeId() != -1 )
{
removeNodeFromCache( record.getNodeId() );
}
else if ( record.getRelId() != -1 )
{
removeRelationshipFromCache( record.getRelId() );
}
if ( record.isCreated() )
{
if ( freeIds )
{
getPropertyStore().freeId( record.getId() );
}
for ( PropertyBlock block : record.getPropertyBlocks() )
{
for ( DynamicRecord dynamicRecord : block.getValueRecords() )
{
if ( dynamicRecord.isCreated() )
{
if ( dynamicRecord.getType() == PropertyType.STRING.intValue() )
{
getPropertyStore().freeStringBlockId(
dynamicRecord.getId() );
}
else if ( dynamicRecord.getType() == PropertyType.ARRAY.intValue() )
{
getPropertyStore().freeArrayBlockId(
dynamicRecord.getId() );
}
else
{
throw new InvalidRecordException(
"Unknown type on " + dynamicRecord );
}
}
}
}
}
}
for ( RecordChange<Long, Collection<DynamicRecord>, SchemaRule> records : schemaRuleChanges.changes() )
{
long id = -1;
for ( DynamicRecord record : records.forChangingData() )
{
if ( id == -1 )
{
id = record.getId();
}
if ( freeIds && record.isCreated() )
{
getSchemaStore().freeId( record.getId() );
}
}
}
}
finally
{
clear();
}
}
private void removeRelationshipTypeFromCache( int id )
{
cacheAccess.removeRelationshipTypeFromCache( id );
}
private void patchDeletedRelationshipNodes( long id, long firstNodeId, long firstNodeNextRelId, long secondNodeId,
long secondNextRelId )
{
cacheAccess.patchDeletedRelationshipNodes( id, firstNodeId, firstNodeNextRelId, secondNodeId, secondNextRelId );
}
private void removeRelationshipFromCache( long id )
{
cacheAccess.removeRelationshipFromCache( id );
}
private void removeNodeFromCache( long id )
{
cacheAccess.removeNodeFromCache( id );
}
private void removeGraphPropertiesFromCache()
{
cacheAccess.removeGraphPropertiesFromCache();
}
private void addRelationshipType( int id )
{
setRecovered();
Token type = isRecovered() ?
neoStore.getRelationshipTypeStore().getToken( id, true ) :
neoStore.getRelationshipTypeStore().getToken( id );
cacheAccess.addRelationshipTypeToken( type );
}
private void addLabel( int id )
{
Token labelId = isRecovered() ?
neoStore.getLabelTokenStore().getToken( id, true ) :
neoStore.getLabelTokenStore().getToken( id );
cacheAccess.addLabelToken( labelId );
}
private void addPropertyKey( int id )
{
Token index = isRecovered() ?
neoStore.getPropertyStore().getPropertyKeyTokenStore().getToken( id, true ) :
neoStore.getPropertyStore().getPropertyKeyTokenStore().getToken( id );
cacheAccess.addPropertyKeyToken( index );
}
@Override
public void doCommit() throws XAException
{
if ( !isRecovered() && !prepared )
{
throw new XAException( "Cannot commit non prepared transaction[" + getIdentifier() + "]" );
}
if ( isRecovered() )
{
boolean wasInRecovery = neoStore.isInRecoveryMode();
neoStore.setRecoveredStatus( true );
try
{
applyCommit( true );
return;
}
finally
{
neoStore.setRecoveredStatus( wasInRecovery );
}
}
if ( getCommitTxId() != neoStore.getLastCommittedTx() + 1 )
{
throw new RuntimeException( "Tx id: " + getCommitTxId() +
" not next transaction (" + neoStore.getLastCommittedTx() + ")" );
}
applyCommit( false );
}
private void applyCommit( boolean isRecovered )
{
try ( LockGroup lockGroup = new LockGroup() )
{
committed = true;
CommandSorter sorter = new CommandSorter();
// reltypes
if ( relationshipTypeTokenCommands != null )
{
java.util.Collections.sort( relationshipTypeTokenCommands, sorter );
for ( Command.RelationshipTypeTokenCommand command : relationshipTypeTokenCommands )
{
command.execute();
if ( isRecovered )
{
addRelationshipType( (int) command.getKey() );
}
}
}
// label keys
if ( labelTokenCommands != null )
{
java.util.Collections.sort( labelTokenCommands, sorter );
for ( Command.LabelTokenCommand command : labelTokenCommands )
{
command.execute();
if ( isRecovered )
{
addLabel( (int) command.getKey() );
}
}
}
// property keys
if ( propertyKeyTokenCommands != null )
{
java.util.Collections.sort( propertyKeyTokenCommands, sorter );
for ( Command.PropertyKeyTokenCommand command : propertyKeyTokenCommands )
{
command.execute();
if ( isRecovered )
{
addPropertyKey( (int) command.getKey() );
}
}
}
// primitives
java.util.Collections.sort( relCommands, sorter );
java.util.Collections.sort( propCommands, sorter );
executeCreated( lockGroup, isRecovered, propCommands, relCommands, nodeCommands.values() );
executeModified( lockGroup, isRecovered, propCommands, relCommands, nodeCommands.values() );
executeDeleted( lockGroup, propCommands, relCommands, nodeCommands.values() );
// property change set for index updates
Collection<NodeLabelUpdate> labelUpdates = gatherLabelUpdatesSortedByNodeId();
if ( !labelUpdates.isEmpty() )
{
updateLabelScanStore( labelUpdates );
cacheAccess.applyLabelUpdates( labelUpdates );
}
if ( !nodeCommands.isEmpty() || !propCommands.isEmpty() )
{
indexes.updateIndexes( new LazyIndexUpdates(
getNodeStore(), getPropertyStore(),
groupedNodePropertyCommands( propCommands ), new HashMap<>( nodeCommands ) ) );
}
// schema rules. Execute these after generating the property updates so. If executed
// before and we've got a transaction that sets properties/labels as well as creating an index
// we might end up with this corner-case:
// 1) index rule created and index population job started
// 2) index population job processes some nodes, but doesn't complete
// 3) we gather up property updates and send those to the indexes. The newly created population
// job might get those as updates
// 4) the population job will apply those updates as added properties, and might end up with duplicate
// entries for the same property
for ( SchemaRuleCommand command : schemaRuleCommands )
{
command.setTxId( getCommitTxId() );
command.execute();
switch ( command.getMode() )
{
case DELETE:
cacheAccess.removeSchemaRuleFromCache( command.getKey() );
break;
default:
cacheAccess.addSchemaRule( command.getSchemaRule() );
}
}
if ( neoStoreCommand != null )
{
neoStoreCommand.execute();
if ( isRecovered )
{
removeGraphPropertiesFromCache();
}
}
if ( !isRecovered )
{
updateFirstRelationships();
// Update of the cached primitives will happen when calling commitChangesToCache,
// which should be done after applyCommit and after the XaResourceManager monitor
// has been released.
}
neoStore.setLastCommittedTx( getCommitTxId() );
if ( isRecovered )
{
neoStore.updateIdGenerators();
}
}
finally
{
// clear() will be called in commitChangesToCache outside of the XaResourceManager monitor
}
}
private Collection<List<PropertyCommand>> groupedNodePropertyCommands( Iterable<PropertyCommand> propCommands )
{
// A bit too expensive data structure, but don't know off the top of my head how to make it better.
Map<Long, List<PropertyCommand>> groups = new HashMap<>();
for ( PropertyCommand command : propCommands )
{
PropertyRecord record = command.getAfter();
if ( !record.isNodeSet() )
{
continue;
}
long nodeId = command.getAfter().getNodeId();
List<PropertyCommand> group = groups.get( nodeId );
if ( group == null )
{
groups.put( nodeId, group = new ArrayList<>() );
}
group.add( command );
}
return groups.values();
}
public void commitChangesToCache()
{
try
{
if ( !isRecovered() )
{
state.commitCows(); // updates the cached primitives
}
}
finally
{
clear();
}
}
private Collection<NodeLabelUpdate> gatherLabelUpdatesSortedByNodeId()
{
List<NodeLabelUpdate> labelUpdates = new ArrayList<>();
for ( NodeCommand nodeCommand : nodeCommands.values() )
{
NodeLabels labelFieldBefore = parseLabelsField( nodeCommand.getBefore() );
NodeLabels labelFieldAfter = parseLabelsField( nodeCommand.getAfter() );
if ( labelFieldBefore.isInlined() && labelFieldAfter.isInlined()
&& nodeCommand.getBefore().getLabelField() == nodeCommand.getAfter().getLabelField() )
{
continue;
}
long[] labelsBefore = labelFieldBefore.getIfLoaded();
long[] labelsAfter = labelFieldAfter.getIfLoaded();
if ( labelsBefore == null || labelsAfter == null )
{
continue;
}
labelUpdates.add( NodeLabelUpdate.labelChanges( nodeCommand.getKey(), labelsBefore, labelsAfter ) );
}
Collections.sort(labelUpdates, new NodeLabelUpdateNodeIdComparator());
return labelUpdates;
}
private void updateLabelScanStore( Iterable<NodeLabelUpdate> labelUpdates )
{
try ( LabelScanWriter writer = labelScanStore.newWriter() )
{
for ( NodeLabelUpdate update : labelUpdates )
{
writer.write( update );
}
}
catch ( IOException e )
{
throw new UnderlyingStorageException( e );
}
}
static class LabelChangeSummary
{
private static final long[] NO_LABELS = new long[0];
private final long[] addedLabels;
private final long[] removedLabels;
LabelChangeSummary( long[] labelsBefore, long[] labelsAfter )
{
// Ids are sorted in the store
long[] addedLabels = new long[labelsAfter.length];
long[] removedLabels = new long[labelsBefore.length];
int addedLabelsCursor = 0, removedLabelsCursor = 0;
for ( long labelAfter : labelsAfter )
{
if ( binarySearch( labelsBefore, labelAfter ) < 0 )
{
addedLabels[addedLabelsCursor++] = labelAfter;
}
}
for ( long labelBefore : labelsBefore )
{
if ( binarySearch( labelsAfter, labelBefore ) < 0 )
{
removedLabels[removedLabelsCursor++] = labelBefore;
}
}
// For each property on the node, produce one update for added labels and one for removed labels.
this.addedLabels = shrink( addedLabels, addedLabelsCursor );
this.removedLabels = shrink( removedLabels, removedLabelsCursor );
}
private long[] shrink( long[] array, int toLength )
{
if ( toLength == 0 )
{
return NO_LABELS;
}
return array.length == toLength ? array : copyOf( array, toLength );
}
public boolean hasAddedLabels()
{
return addedLabels.length > 0;
}
public boolean hasRemovedLabels()
{
return removedLabels.length > 0;
}
public long[] getAddedLabels()
{
return addedLabels;
}
public long[] getRemovedLabels()
{
return removedLabels;
}
}
private void updateFirstRelationships()
{
for ( RecordChange<Long, NodeRecord, Void> change : nodeRecords.changes() )
{
NodeRecord record = change.forReadingLinkage();
state.setFirstIds( record.getId(), record.getNextRel(), record.getNextProp() );
}
}
@SafeVarargs
private final void executeCreated( LockGroup lockGroup, boolean removeFromCache,
Collection<? extends Command>... commands )
{
for ( Collection<? extends Command> c : commands )
{
for ( Command command : c )
{
if ( command.getMode() == CREATE )
{
lockEntity( lockGroup, command );
command.execute();
if ( removeFromCache )
{
command.removeFromCache( cacheAccess );
}
}
}
}
}
@SafeVarargs
private final void executeModified( LockGroup lockGroup, boolean removeFromCache,
Collection<? extends Command>... commands )
{
for ( Collection<? extends Command> c : commands )
{
for ( Command command : c )
{
if ( command.getMode() == UPDATE )
{
lockEntity( lockGroup, command );
command.execute();
if ( removeFromCache )
{
command.removeFromCache( cacheAccess );
}
}
}
}
}
@SafeVarargs
private final void executeDeleted( LockGroup lockGroup, Collection<? extends Command>... commands )
{
for ( Collection<? extends Command> c : commands )
{
for ( Command command : c )
{
if ( command.getMode() == DELETE )
{
/*
* We always update the disk image and then always invalidate the cache. In the case of relationships
* this is expected to also patch the relChainPosition in the start and end NodeImpls (if they actually
* are in cache).
*/
lockEntity( lockGroup, command );
command.execute();
command.removeFromCache( cacheAccess );
}
}
}
}
private void lockEntity( LockGroup lockGroup, Command command )
{
if ( command instanceof NodeCommand )
{
lockGroup.add( locks.acquireNodeLock( command.getKey(), LockService.LockType.WRITE_LOCK ) );
}
if ( command instanceof Command.PropertyCommand )
{
long nodeId = ((Command.PropertyCommand) command).getNodeId();
if ( nodeId != -1 )
{
lockGroup.add( locks.acquireNodeLock( nodeId, LockService.LockType.WRITE_LOCK ) );
}
}
}
private void clear()
{
nodeRecords.clear();
propertyRecords.clear();
relRecords.clear();
schemaRuleChanges.clear();
relationshipTypeTokenRecords = null;
propertyKeyTokenRecords = null;
neoStoreRecord = null;
nodeCommands.clear();
propCommands.clear();
propertyKeyTokenCommands = null;
relCommands.clear();
schemaRuleCommands.clear();
relationshipTypeTokenCommands = null;
labelTokenCommands = null;
neoStoreCommand = null;
}
private RelationshipTypeTokenStore getRelationshipTypeStore()
{
return neoStore.getRelationshipTypeStore();
}
private LabelTokenStore getLabelTokenStore()
{
return neoStore.getLabelTokenStore();
}
private int getRelGrabSize()
{
return neoStore.getRelationshipGrabSize();
}
private NodeStore getNodeStore()
{
return neoStore.getNodeStore();
}
private SchemaStore getSchemaStore()
{
return neoStore.getSchemaStore();
}
private RelationshipStore getRelationshipStore()
{
return neoStore.getRelationshipStore();
}
private PropertyStore getPropertyStore()
{
return neoStore.getPropertyStore();
}
/**
* Tries to load the light node with the given id, returns true on success.
*
* @param nodeId The id of the node to load.
* @return True iff the node record can be found.
*/
public NodeRecord nodeLoadLight( long nodeId )
{
try
{
return nodeRecords.getOrLoad( nodeId, null ).forReadingLinkage();
}
catch ( InvalidRecordException e )
{
return null;
}
}
/**
* Tries to load the light relationship with the given id, returns the
* record on success.
*
* @param id The id of the relationship to load.
* @return The light RelationshipRecord if it was found, null otherwise.
*/
public RelationshipRecord relLoadLight( long id )
{
try
{
return relRecords.getOrLoad( id, null ).forReadingLinkage();
}
catch ( InvalidRecordException e )
{
return null;
}
}
/**
* Deletes a node by its id, returning its properties which are now removed.
*
* @param nodeId The id of the node to delete.
* @return The properties of the node that were removed during the delete.
*/
public ArrayMap<Integer, DefinedProperty> nodeDelete( long nodeId )
{
NodeRecord nodeRecord = nodeRecords.getOrLoad( nodeId, null ).forChangingData();
if ( !nodeRecord.inUse() )
{
throw new IllegalStateException( "Unable to delete Node[" + nodeId +
"] since it has already been deleted." );
}
nodeRecord.setInUse( false );
nodeRecord.setLabelField( 0, Collections.<DynamicRecord>emptyList() );
return getAndDeletePropertyChain( nodeRecord );
}
/**
* Deletes a relationship by its id, returning its properties which are now
* removed. It is assumed that the nodes it connects have already been
* deleted in this
* transaction.
*
* @param id The id of the relationship to delete.
* @return The properties of the relationship that were removed during the
* delete.
*/
public ArrayMap<Integer, DefinedProperty> relDelete( long id )
{
RelationshipRecord record = relRecords.getOrLoad( id, null ).forChangingLinkage();
if ( !record.inUse() )
{
throw new IllegalStateException( "Unable to delete relationship[" +
id + "] since it is already deleted." );
}
ArrayMap<Integer, DefinedProperty> propertyMap = getAndDeletePropertyChain( record );
disconnectRelationship( record );
updateNodes( record );
record.setInUse( false );
return propertyMap;
}
private ArrayMap<Integer, DefinedProperty> getAndDeletePropertyChain( PrimitiveRecord primitive )
{
ArrayMap<Integer, DefinedProperty> result = new ArrayMap<>( (byte) 9, false, true );
long nextProp = primitive.getNextProp();
while ( nextProp != Record.NO_NEXT_PROPERTY.intValue() )
{
RecordChange<Long, PropertyRecord, PrimitiveRecord> propertyChange =
propertyRecords.getOrLoad( nextProp, primitive );
// TODO forChanging/forReading piggy-backing
PropertyRecord propRecord = propertyChange.forChangingData();
PropertyRecord before = propertyChange.getBefore();
for ( PropertyBlock block : before.getPropertyBlocks() )
{
result.put( block.getKeyIndexId(), block.newPropertyData( getPropertyStore() ) );
}
for ( PropertyBlock block : propRecord.getPropertyBlocks() )
{
for ( DynamicRecord valueRecord : block.getValueRecords() )
{
assert valueRecord.inUse();
valueRecord.setInUse( false );
propRecord.addDeletedRecord( valueRecord );
}
}
nextProp = propRecord.getNextProp();
propRecord.setInUse( false );
propRecord.setChanged( primitive );
// We do not remove them individually, but all together here
propRecord.getPropertyBlocks().clear();
}
return result;
}
private void disconnectRelationship( RelationshipRecord rel )
{
// update first node prev
if ( rel.getFirstPrevRel() != Record.NO_NEXT_RELATIONSHIP.intValue() )
{
Relationship lockableRel = new LockableRelationship( rel.getFirstPrevRel() );
getWriteLock( lockableRel );
RelationshipRecord prevRel = relRecords.getOrLoad( rel.getFirstPrevRel(), null ).forChangingLinkage();
boolean changed = false;
if ( prevRel.getFirstNode() == rel.getFirstNode() )
{
prevRel.setFirstNextRel( rel.getFirstNextRel() );
changed = true;
}
if ( prevRel.getSecondNode() == rel.getFirstNode() )
{
prevRel.setSecondNextRel( rel.getFirstNextRel() );
changed = true;
}
if ( !changed )
{
throw new InvalidRecordException(
prevRel + " don't match " + rel );
}
}
// update first node next
if ( rel.getFirstNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() )
{
Relationship lockableRel = new LockableRelationship( rel.getFirstNextRel() );
getWriteLock( lockableRel );
RelationshipRecord nextRel = relRecords.getOrLoad( rel.getFirstNextRel(), null ).forChangingLinkage();
boolean changed = false;
if ( nextRel.getFirstNode() == rel.getFirstNode() )
{
nextRel.setFirstPrevRel( rel.getFirstPrevRel() );
changed = true;
}
if ( nextRel.getSecondNode() == rel.getFirstNode() )
{
nextRel.setSecondPrevRel( rel.getFirstPrevRel() );
changed = true;
}
if ( !changed )
{
throw new InvalidRecordException( nextRel + " don't match " + rel );
}
}
// update second node prev
if ( rel.getSecondPrevRel() != Record.NO_NEXT_RELATIONSHIP.intValue() )
{
Relationship lockableRel = new LockableRelationship( rel.getSecondPrevRel() );
getWriteLock( lockableRel );
RelationshipRecord prevRel = relRecords.getOrLoad( rel.getSecondPrevRel(), null ).forChangingLinkage();
boolean changed = false;
if ( prevRel.getFirstNode() == rel.getSecondNode() )
{
prevRel.setFirstNextRel( rel.getSecondNextRel() );
changed = true;
}
if ( prevRel.getSecondNode() == rel.getSecondNode() )
{
prevRel.setSecondNextRel( rel.getSecondNextRel() );
changed = true;
}
if ( !changed )
{
throw new InvalidRecordException( prevRel + " don't match " + rel );
}
}
// update second node next
if ( rel.getSecondNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() )
{
Relationship lockableRel = new LockableRelationship( rel.getSecondNextRel() );
getWriteLock( lockableRel );
RelationshipRecord nextRel = relRecords.getOrLoad( rel.getSecondNextRel(), null ).forChangingLinkage();
boolean changed = false;
if ( nextRel.getFirstNode() == rel.getSecondNode() )
{
nextRel.setFirstPrevRel( rel.getSecondPrevRel() );
changed = true;
}
if ( nextRel.getSecondNode() == rel.getSecondNode() )
{
nextRel.setSecondPrevRel( rel.getSecondPrevRel() );
changed = true;
}
if ( !changed )
{
throw new InvalidRecordException( nextRel + " don't match " + rel );
}
}
}
private void getWriteLock( Relationship lockableRel )
{
state.acquireWriteLock( lockableRel );
}
public long getRelationshipChainPosition( long nodeId )
{
return nodeRecords.getOrLoad( nodeId, null ).getBefore().getNextRel();
}
/*
* List<Iterable<RelationshipRecord>> is a list with three items:
* 0: outgoing relationships
* 1: incoming relationships
* 2: loop relationships
*
* Long is the relationship chain position as it stands after this
* batch of relationships has been loaded.
*/
public Pair<Map<DirectionWrapper, Iterable<RelationshipRecord>>, Long> getMoreRelationships( long nodeId,
long position )
{
return getMoreRelationships( nodeId, position, getRelGrabSize(), getRelationshipStore() );
}
private void updateNodes( RelationshipRecord rel )
{
if ( rel.getFirstPrevRel() == Record.NO_PREV_RELATIONSHIP.intValue() )
{
NodeRecord firstNode = nodeRecords.getOrLoad( rel.getFirstNode(), null ).forChangingLinkage();
firstNode.setNextRel( rel.getFirstNextRel() );
}
if ( rel.getSecondPrevRel() == Record.NO_PREV_RELATIONSHIP.intValue() )
{
NodeRecord secondNode = nodeRecords.getOrLoad( rel.getSecondNode(), null ).forChangingLinkage();
secondNode.setNextRel( rel.getSecondNextRel() );
}
}
/**
* Removes the given property identified by its index from the relationship
* with the given id.
*
* @param relId The id of the relationship that is to have the property
* removed.
* @param propertyKey The index key of the property.
*/
public void relRemoveProperty( long relId, int propertyKey )
{
RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getOrLoad( relId, null );
RelationshipRecord relRecord = rel.forReadingLinkage();
if ( !relRecord.inUse() )
{
throw new IllegalStateException( "Property remove on relationship[" +
relId + "] illegal since it has been deleted." );
}
assert assertPropertyChain( relRecord );
removeProperty( relRecord, rel, propertyKey );
}
/**
* Loads the complete property chain for the given relationship and returns
* it as a map from property index id to property data.
*
* @param relId The id of the relationship whose properties to load.
* @param light If the properties should be loaded light or not.
* @param receiver receiver of loaded properties.
*/
public void relLoadProperties( long relId, boolean light, PropertyReceiver receiver )
{
RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getIfLoaded( relId );
if ( rel != null )
{
if ( rel.isCreated() )
{
return;
}
if ( !rel.forReadingLinkage().inUse() && !light )
{
throw new IllegalStateException( "Relationship[" + relId + "] has been deleted in this tx" );
}
}
RelationshipRecord relRecord = getRelationshipStore().getRecord( relId );
if ( !relRecord.inUse() )
{
throw new InvalidRecordException( "Relationship[" + relId + "] not in use" );
}
loadProperties( getPropertyStore(), relRecord.getNextProp(), receiver );
}
/**
* Loads the complete property chain for the given node and returns it as a
* map from property index id to property data.
*
* @param nodeId The id of the node whose properties to load.
* @param light If the properties should be loaded light or not.
* @param receiver receiver of loaded properties.
*/
public void nodeLoadProperties( long nodeId, boolean light, PropertyReceiver receiver )
{
RecordChange<Long, NodeRecord, Void> node = nodeRecords.getIfLoaded( nodeId );
if ( node != null )
{
if ( node.isCreated() )
{
return;
}
if ( !node.forReadingLinkage().inUse() && !light )
{
throw new IllegalStateException( "Node[" + nodeId + "] has been deleted in this tx" );
}
}
NodeRecord nodeRecord = getNodeStore().getRecord( nodeId );
if ( !nodeRecord.inUse() )
{
throw new IllegalStateException( "Node[" + nodeId + "] has been deleted in this tx" );
}
loadProperties( getPropertyStore(), nodeRecord.getNextProp(), receiver );
}
/**
* Removes the given property identified by indexKeyId of the node with the
* given id.
*
* @param nodeId The id of the node that is to have the property removed.
* @param propertyKey The index key of the property.
*/
public void nodeRemoveProperty( long nodeId, int propertyKey )
{
RecordChange<Long, NodeRecord, Void> node = nodeRecords.getOrLoad( nodeId, null );
NodeRecord nodeRecord = node.forReadingLinkage();
if ( !nodeRecord.inUse() )
{
throw new IllegalStateException( "Property remove on node[" +
nodeId + "] illegal since it has been deleted." );
}
assert assertPropertyChain( nodeRecord );
removeProperty( nodeRecord, node, propertyKey );
}
private <P extends PrimitiveRecord> void removeProperty( P primitive,
RecordChange<Long, P, Void> primitiveRecordChange, int propertyKey )
{
long propertyId = // propertyData.getId();
findPropertyRecordContaining( primitive, propertyKey );
RecordChange<Long, PropertyRecord, PrimitiveRecord> recordChange =
propertyRecords.getOrLoad( propertyId, primitiveRecordChange.forReadingLinkage() );
PropertyRecord propRecord = recordChange.forChangingData();
if ( !propRecord.inUse() )
{
throw new IllegalStateException( "Unable to delete property[" +
propertyId + "] since it is already deleted." );
}
PropertyBlock block = propRecord.removePropertyBlock( propertyKey );
if ( block == null )
{
throw new IllegalStateException( "Property with index["
+ propertyKey
+ "] is not present in property["
+ propertyId + "]" );
}
for ( DynamicRecord valueRecord : block.getValueRecords() )
{
assert valueRecord.inUse();
valueRecord.setInUse( false, block.getType().intValue() );
propRecord.addDeletedRecord( valueRecord );
}
if ( propRecord.size() > 0 )
{
/*
* There are remaining blocks in the record. We do not unlink yet.
*/
propRecord.setChanged( primitiveRecordChange.forReadingLinkage() );
assert assertPropertyChain( primitiveRecordChange.forReadingLinkage() );
}
else
{
unlinkPropertyRecord( propRecord, primitiveRecordChange );
}
}
private <P extends PrimitiveRecord> void unlinkPropertyRecord( PropertyRecord propRecord,
RecordChange<Long, P, Void> primitiveRecordChange )
{
P primitive = primitiveRecordChange.forReadingLinkage();
assert assertPropertyChain( primitive );
assert propRecord.size() == 0;
long prevProp = propRecord.getPrevProp();
long nextProp = propRecord.getNextProp();
if ( primitive.getNextProp() == propRecord.getId() )
{
assert propRecord.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue() : propRecord
+ " for "
+ primitive;
primitiveRecordChange.forChangingLinkage().setNextProp( nextProp );
}
if ( prevProp != Record.NO_PREVIOUS_PROPERTY.intValue() )
{
PropertyRecord prevPropRecord = propertyRecords.getOrLoad( prevProp, primitive ).forChangingLinkage();
assert prevPropRecord.inUse() : prevPropRecord + "->" + propRecord
+ " for " + primitive;
prevPropRecord.setNextProp( nextProp );
prevPropRecord.setChanged( primitive );
}
if ( nextProp != Record.NO_NEXT_PROPERTY.intValue() )
{
PropertyRecord nextPropRecord = propertyRecords.getOrLoad( nextProp, primitive ).forChangingLinkage();
assert nextPropRecord.inUse() : propRecord + "->" + nextPropRecord
+ " for " + primitive;
nextPropRecord.setPrevProp( prevProp );
nextPropRecord.setChanged( primitive );
}
propRecord.setInUse( false );
/*
* The following two are not needed - the above line does all the work (PropertyStore
* does not write out the prev/next for !inUse records). It is nice to set this
* however to check for consistency when assertPropertyChain().
*/
propRecord.setPrevProp( Record.NO_PREVIOUS_PROPERTY.intValue() );
propRecord.setNextProp( Record.NO_NEXT_PROPERTY.intValue() );
propRecord.setChanged( primitive );
assert assertPropertyChain( primitive );
}
/**
* Changes an existing property's value of the given relationship, with the
* given index to the passed value
*
* @param relId The id of the relationship which holds the property to
* change.
* @param propertyKey The index of the key of the property to change.
* @param value The new value of the property.
* @return The changed property, as a PropertyData object.
*/
public DefinedProperty relChangeProperty( long relId, int propertyKey, Object value )
{
RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getOrLoad( relId, null );
if ( !rel.forReadingLinkage().inUse() )
{
throw new IllegalStateException( "Property change on relationship[" +
relId + "] illegal since it has been deleted." );
}
return primitiveChangeProperty( rel, propertyKey, value );
}
/**
* Changes an existing property of the given node, with the given index to
* the passed value
*
* @param nodeId The id of the node which holds the property to change.
* @param propertyKey The index of the key of the property to change.
* @param value The new value of the property.
* @return The changed property, as a PropertyData object.
*/
public DefinedProperty nodeChangeProperty( long nodeId, int propertyKey, Object value )
{
RecordChange<Long, NodeRecord, Void> node = nodeRecords.getOrLoad( nodeId, null ); //getNodeRecord( nodeId );
if ( !node.forReadingLinkage().inUse() )
{
throw new IllegalStateException( "Property change on node[" +
nodeId + "] illegal since it has been deleted." );
}
return primitiveChangeProperty( node, propertyKey, value );
}
/**
* TODO MP: itroduces performance regression
* This method was introduced during moving handling of entity properties from NodeImpl/RelationshipImpl
* to the {@link KernelAPI}. Reason was that the {@link Property} object at the time didn't have a notion
* of property record id, and didn't want to have it.
*/
private long findPropertyRecordContaining( PrimitiveRecord primitive, int propertyKey )
{
long propertyRecordId = primitive.getNextProp();
while ( !Record.NO_NEXT_PROPERTY.is( propertyRecordId ) )
{
PropertyRecord propertyRecord =
propertyRecords.getOrLoad( propertyRecordId, primitive ).forReadingLinkage();
if ( propertyRecord.getPropertyBlock( propertyKey ) != null )
{
return propertyRecordId;
}
propertyRecordId = propertyRecord.getNextProp();
}
throw new IllegalStateException( "No property record in property chain for " + primitive +
" contained property with key " + propertyKey );
}
private <P extends PrimitiveRecord> DefinedProperty primitiveChangeProperty(
RecordChange<Long, P, Void> primitiveRecordChange, int propertyKey, Object value )
{
P primitive = primitiveRecordChange.forReadingLinkage();
assert assertPropertyChain( primitive );
long propertyId = // propertyData.getId();
findPropertyRecordContaining( primitive, propertyKey );
PropertyRecord propertyRecord = propertyRecords.getOrLoad( propertyId, primitive ).forChangingData();
if ( !propertyRecord.inUse() )
{
throw new IllegalStateException( "Unable to change property["
+ propertyId
+ "] since it has been deleted." );
}
PropertyBlock block = propertyRecord.getPropertyBlock( propertyKey );
if ( block == null )
{
throw new IllegalStateException( "Property with index["
+ propertyKey
+ "] is not present in property["
+ propertyId + "]" );
}
propertyRecord.setChanged( primitive );
for ( DynamicRecord record : block.getValueRecords() )
{
assert record.inUse();
record.setInUse( false, block.getType().intValue() );
propertyRecord.addDeletedRecord( record );
}
getPropertyStore().encodeValue( block, propertyKey, value );
if ( propertyRecord.size() > PropertyType.getPayloadSize() )
{
propertyRecord.removePropertyBlock( propertyKey );
/*
* The record should never, ever be above max size. Less obviously, it should
* never remain empty. If removing a property because it won't fit when changing
* it leaves the record empty it means that this block was the last one which
* means that it doesn't fit in an empty record. Where i come from, we call this
* weird.
*
assert propertyRecord.size() <= PropertyType.getPayloadSize() : propertyRecord;
assert propertyRecord.size() > 0 : propertyRecord;
*/
addPropertyBlockToPrimitive( block, primitiveRecordChange );
}
assert assertPropertyChain( primitive );
return Property.property( propertyKey, value );
}
private <P extends PrimitiveRecord> DefinedProperty addPropertyToPrimitive(
RecordChange<Long, P, Void> node, int propertyKey, Object value )
{
P record = node.forReadingLinkage();
assert assertPropertyChain( record );
PropertyBlock block = new PropertyBlock();
getPropertyStore().encodeValue( block, propertyKey, value );
addPropertyBlockToPrimitive( block, node );
assert assertPropertyChain( record );
return Property.property( propertyKey, value );
}
/**
* Adds a property to the given relationship, with the given index and
* value.
*
* @param relId The id of the relationship to which to add the property.
* @param propertyKey The index of the key of the property to add.
* @param value The value of the property.
* @return The added property, as a PropertyData object.
*/
public DefinedProperty relAddProperty( long relId, int propertyKey, Object value )
{
RecordChange<Long, RelationshipRecord, Void> rel = relRecords.getOrLoad( relId, null );
RelationshipRecord relRecord = rel.forReadingLinkage();
if ( !relRecord.inUse() )
{
throw new IllegalStateException( "Property add on relationship[" +
relId + "] illegal since it has been deleted." );
}
return addPropertyToPrimitive( rel, propertyKey, value );
}
/**
* Adds a property to the given node, with the given index and value.
*
* @param nodeId The id of the node to which to add the property.
* @param propertyKey The index of the key of the property to add.
* @param value The value of the property.
* @return The added property, as a PropertyData object.
*/
public DefinedProperty nodeAddProperty( long nodeId, int propertyKey, Object value )
{
RecordChange<Long, NodeRecord, Void> node = nodeRecords.getOrLoad( nodeId, null );
NodeRecord nodeRecord = node.forReadingLinkage();
if ( !nodeRecord.inUse() )
{
throw new IllegalStateException( "Property add on node[" +
nodeId + "] illegal since it has been deleted." );
}
return addPropertyToPrimitive( node, propertyKey, value );
}
private <P extends PrimitiveRecord> void addPropertyBlockToPrimitive(
PropertyBlock block, RecordChange<Long, P, Void> primitiveRecordChange )
{
P primitive = primitiveRecordChange.forReadingLinkage();
assert assertPropertyChain( primitive );
int newBlockSizeInBytes = block.getSize();
/*
* Here we could either iterate over the whole chain or just go for the first record
* which is the most likely to be the less full one. Currently we opt for the second
* to perform better.
*/
PropertyRecord host = null;
long firstProp = primitive.getNextProp();
if ( firstProp != Record.NO_NEXT_PROPERTY.intValue() )
{
// We do not store in map - might not have enough space
RecordChange<Long, PropertyRecord, PrimitiveRecord> change = propertyRecords
.getOrLoad( firstProp, primitive );
PropertyRecord propRecord = change.forReadingLinkage();
assert propRecord.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue() : propRecord
+ " for "
+ primitive;
assert propRecord.inUse() : propRecord;
int propSize = propRecord.size();
assert propSize > 0 : propRecord;
if ( propSize + newBlockSizeInBytes <= PropertyType.getPayloadSize() )
{
propRecord = change.forChangingData();
host = propRecord;
host.addPropertyBlock( block );
host.setChanged( primitive );
}
}
if ( host == null )
{
// First record in chain didn't fit, make new one
host = propertyRecords.create( getPropertyStore().nextId(), primitive ).forChangingData();
if ( primitive.getNextProp() != Record.NO_NEXT_PROPERTY.intValue() )
{
PropertyRecord prevProp = propertyRecords.getOrLoad( primitive.getNextProp(), primitive )
.forChangingLinkage();
assert prevProp.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue();
prevProp.setPrevProp( host.getId() );
host.setNextProp( prevProp.getId() );
prevProp.setChanged( primitive );
}
primitiveRecordChange.forChangingLinkage().setNextProp( host.getId() );
host.addPropertyBlock( block );
host.setInUse( true );
}
// Ok, here host does for the job. Use it
assert assertPropertyChain( primitive );
}
/**
* Creates a relationship with the given id, from the nodes identified by id
* and of type typeId
*
* @param id The id of the relationship to create.
* @param type The id of the relationship type this relationship will
* have.
* @param firstNodeId The id of the start node.
* @param secondNodeId The id of the end node.
*/
public void relationshipCreate( long id, int type, long firstNodeId, long secondNodeId )
{
NodeRecord firstNode = nodeRecords.getOrLoad( firstNodeId, null ).forChangingLinkage();
if ( !firstNode.inUse() )
{
throw new IllegalStateException( "First node[" + firstNodeId +
"] is deleted and cannot be used to create a relationship" );
}
NodeRecord secondNode = nodeRecords.getOrLoad( secondNodeId, null ).forChangingLinkage();
if ( !secondNode.inUse() )
{
throw new IllegalStateException( "Second node[" + secondNodeId +
"] is deleted and cannot be used to create a relationship" );
}
RelationshipRecord record = relRecords.create( id, null ).forChangingLinkage();
record.setLinks( firstNodeId, secondNodeId, type );
record.setInUse( true );
record.setCreated();
connectRelationship( firstNode, secondNode, record );
}
private void connectRelationship( NodeRecord firstNode,
NodeRecord secondNode, RelationshipRecord rel )
{
assert firstNode.getNextRel() != rel.getId();
assert secondNode.getNextRel() != rel.getId();
rel.setFirstNextRel( firstNode.getNextRel() );
rel.setSecondNextRel( secondNode.getNextRel() );
connect( firstNode, rel );
connect( secondNode, rel );
firstNode.setNextRel( rel.getId() );
secondNode.setNextRel( rel.getId() );
}
private void connect( NodeRecord node, RelationshipRecord rel )
{
if ( node.getNextRel() != Record.NO_NEXT_RELATIONSHIP.intValue() )
{
Relationship lockableRel = new LockableRelationship( node.getNextRel() );
getWriteLock( lockableRel );
RelationshipRecord nextRel = relRecords.getOrLoad( node.getNextRel(), null ).forChangingLinkage();
boolean changed = false;
if ( nextRel.getFirstNode() == node.getId() )
{
nextRel.setFirstPrevRel( rel.getId() );
changed = true;
}
if ( nextRel.getSecondNode() == node.getId() )
{
nextRel.setSecondPrevRel( rel.getId() );
changed = true;
}
if ( !changed )
{
throw new InvalidRecordException( node + " dont match " + nextRel );
}
}
}
/**
* Creates a node for the given id
*
* @param nodeId The id of the node to create.
*/
public void nodeCreate( long nodeId )
{
NodeRecord nodeRecord = nodeRecords.create( nodeId, null ).forChangingData();
nodeRecord.setInUse( true );
nodeRecord.setCreated();
}
/**
* Creates a property index entry out of the given id and string.
*
* @param key The key of the property index, as a string.
* @param id The property index record id.
*/
public void createPropertyKeyToken( String key, int id )
{
PropertyKeyTokenRecord record = new PropertyKeyTokenRecord( id );
record.setInUse( true );
record.setCreated();
PropertyKeyTokenStore propIndexStore = getPropertyStore().getPropertyKeyTokenStore();
Collection<DynamicRecord> nameRecords =
propIndexStore.allocateNameRecords( encodeString( key ) );
record.setNameId( (int) first( nameRecords ).getId() );
record.addNameRecords( nameRecords );
addPropertyKeyTokenRecord( record );
}
/**
* Creates a property index entry out of the given id and string.
*
* @param name The key of the property index, as a string.
* @param id The property index record id.
*/
public void createLabelToken( String name, int id )
{
LabelTokenRecord record = new LabelTokenRecord( id );
record.setInUse( true );
record.setCreated();
LabelTokenStore labelTokenStore = getLabelTokenStore();
Collection<DynamicRecord> nameRecords =
labelTokenStore.allocateNameRecords( encodeString( name ) );
record.setNameId( (int) first( nameRecords ).getId() );
record.addNameRecords( nameRecords );
addLabelIdRecord( record );
}
/**
* Creates a new RelationshipType record with the given id that has the
* given name.
*
* @param id The id of the new relationship type record.
* @param name The name of the relationship type.
*/
public void createRelationshipTypeToken( int id, String name )
{
RelationshipTypeTokenRecord record = new RelationshipTypeTokenRecord( id );
record.setInUse( true );
record.setCreated();
Collection<DynamicRecord> typeNameRecords =
getRelationshipTypeStore().allocateNameRecords( encodeString( name ) );
record.setNameId( (int) first( typeNameRecords ).getId() );
record.addNameRecords( typeNameRecords );
addRelationshipTypeRecord( record );
}
static class CommandSorter implements Comparator<Command>, Serializable
{
@Override
public int compare( Command o1, Command o2 )
{
long id1 = o1.getKey();
long id2 = o2.getKey();
long diff = id1 - id2;
if ( diff > Integer.MAX_VALUE )
{
return Integer.MAX_VALUE;
}
else if ( diff < Integer.MIN_VALUE )
{
return Integer.MIN_VALUE;
}
else
{
return (int) diff;
}
}
@Override
public boolean equals( Object o )
{
return o instanceof CommandSorter;
}
@Override
public int hashCode()
{
return 3217;
}
}
void addRelationshipTypeRecord( RelationshipTypeTokenRecord record )
{
if ( relationshipTypeTokenRecords == null )
{
relationshipTypeTokenRecords = new HashMap<>();
}
relationshipTypeTokenRecords.put( record.getId(), record );
}
void addLabelIdRecord( LabelTokenRecord record )
{
if ( labelTokenRecords == null )
{
labelTokenRecords = new HashMap<>();
}
labelTokenRecords.put( record.getId(), record );
}
void addPropertyKeyTokenRecord( PropertyKeyTokenRecord record )
{
if ( propertyKeyTokenRecords == null )
{
propertyKeyTokenRecords = new HashMap<>();
}
propertyKeyTokenRecords.put( record.getId(), record );
}
private static class LockableRelationship implements Relationship
{
private final long id;
LockableRelationship( long id )
{
this.id = id;
}
@Override
public void delete()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node getEndNode()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public long getId()
{
return this.id;
}
@Override
public GraphDatabaseService getGraphDatabase()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node[] getNodes()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node getOtherNode( Node node )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Object getProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Object getProperty( String key, Object defaultValue )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Iterable<String> getPropertyKeys()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Node getStartNode()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public RelationshipType getType()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public boolean isType( RelationshipType type )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public boolean hasProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public Object removeProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public void setProperty( String key, Object value )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
@Override
public boolean equals( Object o )
{
return o instanceof Relationship && this.getId() == ((Relationship) o).getId();
}
@Override
public int hashCode()
{
return (int) ((id >>> 32) ^ id);
}
@Override
public String toString()
{
return "Lockable relationship #" + this.getId();
}
}
private boolean assertPropertyChain( PrimitiveRecord primitive )
{
List<PropertyRecord> toCheck = new LinkedList<>();
long nextIdToFetch = primitive.getNextProp();
while ( nextIdToFetch != Record.NO_NEXT_PROPERTY.intValue() )
{
PropertyRecord propRecord = propertyRecords.getOrLoad( nextIdToFetch, primitive ).forReadingLinkage();
toCheck.add( propRecord );
assert propRecord.inUse() : primitive + "->"
+ Arrays.toString( toCheck.toArray() );
nextIdToFetch = propRecord.getNextProp();
}
if ( toCheck.isEmpty() )
{
assert primitive.getNextProp() == Record.NO_NEXT_PROPERTY.intValue() : primitive;
return true;
}
PropertyRecord first = toCheck.get( 0 );
PropertyRecord last = toCheck.get( toCheck.size() - 1 );
assert first.getPrevProp() == Record.NO_PREVIOUS_PROPERTY.intValue() : primitive
+ "->"
+ Arrays.toString( toCheck.toArray() );
assert last.getNextProp() == Record.NO_NEXT_PROPERTY.intValue() : primitive
+ "->"
+ Arrays.toString( toCheck.toArray() );
PropertyRecord current, previous = first;
for ( int i = 1; i < toCheck.size(); i++ )
{
current = toCheck.get( i );
assert current.getPrevProp() == previous.getId() : primitive
+ "->"
+ Arrays.toString( toCheck.toArray() );
assert previous.getNextProp() == current.getId() : primitive
+ "->"
+ Arrays.toString( toCheck.toArray() );
previous = current;
}
return true;
}
private RecordChange<Long, NeoStoreRecord, Void> getOrLoadNeoStoreRecord()
{
if ( neoStoreRecord == null )
{
neoStoreRecord = new RecordChanges<>( new RecordChanges.Loader<Long, NeoStoreRecord, Void>()
{
@Override
public NeoStoreRecord newUnused( Long key, Void additionalData )
{
throw new UnsupportedOperationException();
}
@Override
public NeoStoreRecord load( Long key, Void additionalData )
{
return neoStore.asRecord();
}
@Override
public void ensureHeavy( NeoStoreRecord record )
{
}
@Override
public NeoStoreRecord clone(NeoStoreRecord neoStoreRecord) {
// We do not expect to manage the before state, so this operation will not be called.
throw new UnsupportedOperationException("Clone on NeoStoreRecord");
}
}, false );
}
return neoStoreRecord.getOrLoad( 0L, null );
}
/**
* Adds a property to the graph, with the given index and value.
*
* @param propertyKey The index of the key of the property to add.
* @param value The value of the property.
* @return The added property, as a PropertyData object.
*/
public DefinedProperty graphAddProperty( int propertyKey, Object value )
{
PropertyBlock block = new PropertyBlock();
/*
* Encoding has to be set here before anything is changed,
* since an exception could be thrown in encodeValue now and tx not marked
* rollback only.
*/
getPropertyStore().encodeValue( block, propertyKey, value );
RecordChange<Long, NeoStoreRecord, Void> change = getOrLoadNeoStoreRecord();
addPropertyBlockToPrimitive( block, change );
assert assertPropertyChain( change.forReadingLinkage() );
return Property.property( propertyKey, value );
}
/**
* Changes an existing property of the graph, with the given index to
* the passed value
*
* @param propertyKey The index of the key of the property to change.
* @param value The new value of the property.
* @return The changed property, as a PropertyData object.
*/
public DefinedProperty graphChangeProperty( int propertyKey, Object value )
{
return primitiveChangeProperty( getOrLoadNeoStoreRecord(), propertyKey, value );
}
/**
* Removes the given property identified by indexKeyId of the graph with the
* given id.
*
* @param propertyKey The index key of the property.
*/
public void graphRemoveProperty( int propertyKey )
{
RecordChange<Long, NeoStoreRecord, Void> recordChange = getOrLoadNeoStoreRecord();
removeProperty( recordChange.forReadingLinkage(), recordChange, propertyKey );
}
/**
* Loads the complete property chain for the graph and returns it as a
* map from property index id to property data.
*
* @param light If the properties should be loaded light or not.
* @param records receiver of loaded properties.
*/
public void graphLoadProperties( boolean light, PropertyReceiver records )
{
loadProperties( getPropertyStore(), neoStore.asRecord().getNextProp(), records );
}
public void createSchemaRule( SchemaRule schemaRule )
{
for(DynamicRecord change : schemaRuleChanges.create( schemaRule.getId(), schemaRule ).forChangingData())
{
change.setInUse( true );
change.setCreated();
}
}
public void dropSchemaRule( SchemaRule rule )
{
RecordChange<Long, Collection<DynamicRecord>, SchemaRule> change =
schemaRuleChanges.getOrLoad(rule.getId(), rule);
Collection<DynamicRecord> records = change.forChangingData();
for ( DynamicRecord record : records )
{
record.setInUse( false );
}
}
public void addLabelToNode( int labelId, long nodeId )
{
NodeRecord nodeRecord = nodeRecords.getOrLoad( nodeId, null ).forChangingData();
parseLabelsField( nodeRecord ).add( labelId, getNodeStore() );
}
public void removeLabelFromNode( int labelId, long nodeId )
{
NodeRecord nodeRecord = nodeRecords.getOrLoad( nodeId, null ).forChangingData();
parseLabelsField( nodeRecord ).remove( labelId, getNodeStore() );
}
public PrimitiveLongIterator getLabelsForNode( long nodeId )
{
// Don't consider changes in this transaction
NodeRecord node = getNodeStore().getRecord( nodeId );
return asPrimitiveIterator( parseLabelsField( node ).get( getNodeStore() ) );
}
public void setConstraintIndexOwner( IndexRule indexRule, long constraintId )
{
RecordChange<Long, Collection<DynamicRecord>, SchemaRule> change =
schemaRuleChanges.getOrLoad( indexRule.getId(), indexRule );
Collection<DynamicRecord> records = change.forChangingData();
indexRule = indexRule.withOwningConstraint( constraintId );
records.clear();
records.addAll( getSchemaStore().allocateFrom( indexRule ) );
}
private Pair<Map<DirectionWrapper, Iterable<RelationshipRecord>>, Long> getMoreRelationships(
long nodeId, long position, int grabSize, RelationshipStore relStore )
{
// initialCapacity=grabSize saves the lists the trouble of resizing
List<RelationshipRecord> out = new ArrayList<>();
List<RelationshipRecord> in = new ArrayList<>();
List<RelationshipRecord> loop = null;
Map<DirectionWrapper, Iterable<RelationshipRecord>> result = new EnumMap<>( DirectionWrapper.class );
result.put( DirectionWrapper.OUTGOING, out );
result.put( DirectionWrapper.INCOMING, in );
for ( int i = 0; i < grabSize &&
position != Record.NO_NEXT_RELATIONSHIP.intValue(); i++ )
{
RelationshipRecord relRecord = relStore.getChainRecord( position );
if ( relRecord == null )
{
// return what we got so far
return Pair.of( result, position );
}
long firstNode = relRecord.getFirstNode();
long secondNode = relRecord.getSecondNode();
if ( relRecord.inUse() )
{
if ( firstNode == secondNode )
{
if ( loop == null )
{
// This is done lazily because loops are probably quite
// rarely encountered
loop = new ArrayList<>();
result.put( DirectionWrapper.BOTH, loop );
}
loop.add( relRecord );
}
else if ( firstNode == nodeId )
{
out.add( relRecord );
}
else if ( secondNode == nodeId )
{
in.add( relRecord );
}
}
else
{
i--;
}
if ( firstNode == nodeId )
{
position = relRecord.getFirstNextRel();
}
else if ( secondNode == nodeId )
{
position = relRecord.getSecondNextRel();
}
else
{
throw new InvalidRecordException( "Node[" + nodeId +
"] is neither firstNode[" + firstNode +
"] nor secondNode[" + secondNode + "] for Relationship[" + relRecord.getId() + "]" );
}
}
return Pair.of( result, position );
}
private static void loadPropertyChain( Collection<PropertyRecord> chain, PropertyStore propertyStore,
PropertyReceiver receiver )
{
if ( chain != null )
{
for ( PropertyRecord propRecord : chain )
{
for ( PropertyBlock propBlock : propRecord.getPropertyBlocks() )
{
receiver.receive( propBlock.newPropertyData( propertyStore ), propRecord.getId() );
}
}
}
}
static void loadProperties(
PropertyStore propertyStore, long nextProp, PropertyReceiver receiver )
{
Collection<PropertyRecord> chain = propertyStore.getPropertyRecordChain( nextProp );
if ( chain != null )
{
loadPropertyChain( chain, propertyStore, receiver );
}
}
public interface PropertyReceiver
{
void receive( DefinedProperty property, long propertyRecordId );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreTransaction.java
|
304
|
{
@Override
protected NodePropertyUpdate read( NodeRecord node )
{
long[] labels = parseLabelsField( node ).get( nodeStore );
if ( !containsLabel( soughtLabelId, labels ) )
{
return null;
}
for ( PropertyBlock property : properties( node ) )
{
int propertyKeyId = property.getKeyIndexId();
if ( soughtPropertyKeyId == propertyKeyId )
{
return NodePropertyUpdate.add( node.getId(), propertyKeyId, valueOf( property ), labels );
}
}
return null;
}
@Override
protected void process( NodePropertyUpdate update ) throws FAILURE
{
visitor.visit( update );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
305
|
public class NeoStoreInjectedTransactionValidator implements InjectedTransactionValidator
{
private final IntegrityValidator integrityValidator;
public NeoStoreInjectedTransactionValidator( IntegrityValidator integrityValidator )
{
this.integrityValidator = integrityValidator;
}
@Override
public void assertInjectionAllowed( long lastCommittedTxWhenTransactionStarted ) throws XAException
{
integrityValidator.validateTransactionStartKnowledge( lastCommittedTxWhenTransactionStarted );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreInjectedTransactionValidator.java
|
306
|
class NodeUpdateCollectingVisitor implements Visitor<NodePropertyUpdate, Exception>
{
private final Set<NodePropertyUpdate> updates = new HashSet<>();
@Override
public boolean visit( NodePropertyUpdate element ) throws Exception
{
updates.add( element );
return false;
}
Set<NodePropertyUpdate> getUpdates()
{
return updates;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreViewTest.java
|
307
|
{
@Override
public Object answer( InvocationOnMock invocation ) throws Throwable
{
Long nodeId = (Long) invocation.getArguments()[0];
Lock lock = lockMocks.get( nodeId );
if ( lock == null )
{
lockMocks.put( nodeId, lock = mock( Lock.class ) );
}
return lock;
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreViewTest.java
|
308
|
public class NeoStoreIndexStoreViewTest
{
@Rule
public TargetDirectory.TestDirectory testDirectory = TargetDirectory.testDirForTest( getClass() );
Label label = DynamicLabel.label( "Person" );
GraphDatabaseAPI graphDb;
NeoStoreIndexStoreView storeView;
int labelId;
int propertyKeyId;
Node alistair;
Node stefan;
private LockService locks;
@Test
public void shouldScanExistingNodesForALabel() throws Exception
{
// given
NodeUpdateCollectingVisitor visitor = new NodeUpdateCollectingVisitor();
@SuppressWarnings( "unchecked" )
Visitor<NodeLabelUpdate,Exception> labelVisitor = mock( Visitor.class );
StoreScan<Exception> storeScan =
storeView.visitNodes( new int[] { labelId }, new int[] { propertyKeyId }, visitor, labelVisitor );
// when
storeScan.run();
// then
assertEquals(
asSet(
NodePropertyUpdate.add( alistair.getId(), propertyKeyId, "Alistair", new long[] { labelId } ),
NodePropertyUpdate.add( stefan.getId(), propertyKeyId, "Stefan", new long[] { labelId } )
), visitor.getUpdates() );
}
@Test
public void shouldIgnoreDeletedNodesDuringScan() throws Exception
{
// given
deleteAlistairAndStefanNodes();
NodeUpdateCollectingVisitor visitor = new NodeUpdateCollectingVisitor();
@SuppressWarnings( "unchecked" )
Visitor<NodeLabelUpdate,Exception> labelVisitor = mock( Visitor.class );
StoreScan<Exception> storeScan =
storeView.visitNodes( new int[] { labelId }, new int[] { propertyKeyId }, visitor, labelVisitor );
// when
storeScan.run();
// then
assertEquals( emptySetOf( NodePropertyUpdate.class ), visitor.getUpdates() );
}
@Test
public void shouldLockNodesWhileReadingThem() throws Exception
{
// given
@SuppressWarnings("unchecked")
Visitor<NodePropertyUpdate, Exception> visitor = mock( Visitor.class );
StoreScan<Exception> storeScan = storeView
.visitNodesWithPropertyAndLabel( new IndexDescriptor( labelId, propertyKeyId ), visitor );
// when
storeScan.run();
// then
assertEquals( "allocated locks: " + lockMocks.keySet(), 2, lockMocks.size() );
Lock lock0 = lockMocks.get( 0L );
Lock lock1 = lockMocks.get( 1L );
assertNotNull( "Lock[node=0] never acquired", lock0 );
assertNotNull( "Lock[node=1] never acquired", lock1 );
InOrder order = inOrder( locks, lock0, lock1 );
order.verify( locks ).acquireNodeLock( 0, LockService.LockType.READ_LOCK );
order.verify( lock0 ).release();
order.verify( locks ).acquireNodeLock( 1, LockService.LockType.READ_LOCK );
order.verify( lock1 ).release();
order.verifyNoMoreInteractions();
}
@Test
public void shouldReadProperties() throws PropertyNotFoundException, EntityNotFoundException
{
Property property = storeView.getProperty( alistair.getId(), propertyKeyId );
assertTrue( property.valueEquals( "Alistair" ) );
}
Map<Long, Lock> lockMocks = new HashMap<>();
@Before
public void before() throws KernelException
{
String graphDbPath = testDirectory.directory().getAbsolutePath();
graphDb = (GraphDatabaseAPI) new GraphDatabaseFactory().newEmbeddedDatabase( graphDbPath );
createAlistairAndStefanNodes();
getOrCreateIds();
NeoStore neoStore = new StoreAccess( graphDb ).getRawNeoStore();
locks = mock( LockService.class, new Answer()
{
@Override
public Object answer( InvocationOnMock invocation ) throws Throwable
{
Long nodeId = (Long) invocation.getArguments()[0];
Lock lock = lockMocks.get( nodeId );
if ( lock == null )
{
lockMocks.put( nodeId, lock = mock( Lock.class ) );
}
return lock;
}
} );
storeView = new NeoStoreIndexStoreView( locks, neoStore );
}
@After
public void after()
{
graphDb.shutdown();
}
private void createAlistairAndStefanNodes()
{
try ( Transaction tx = graphDb.beginTx() )
{
alistair = graphDb.createNode( label );
alistair.setProperty( "name", "Alistair" );
stefan = graphDb.createNode( label );
stefan.setProperty( "name", "Stefan" );
tx.success();
}
}
private void deleteAlistairAndStefanNodes()
{
try ( Transaction tx = graphDb.beginTx() )
{
alistair.delete();
stefan.delete();
tx.success();
}
}
private void getOrCreateIds() throws KernelException
{
try ( Transaction tx = graphDb.beginTx() )
{
ThreadToStatementContextBridge bridge =
graphDb.getDependencyResolver().resolveDependency( ThreadToStatementContextBridge.class );
try ( Statement statement = bridge.instance() )
{
labelId = statement.dataWriteOperations().labelGetOrCreateForName( "Person" );
propertyKeyId = statement.dataWriteOperations().propertyKeyGetOrCreateForName( "name" );
}
tx.success();
}
}
class NodeUpdateCollectingVisitor implements Visitor<NodePropertyUpdate, Exception>
{
private final Set<NodePropertyUpdate> updates = new HashSet<>();
@Override
public boolean visit( NodePropertyUpdate element ) throws Exception
{
updates.add( element );
return false;
}
Set<NodePropertyUpdate> getUpdates()
{
return updates;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreViewTest.java
|
309
|
private static class Update implements Iterable<NodePropertyUpdate>
{
private final NodeLabelUpdate labels;
private final List<NodePropertyUpdate> propertyUpdates = new ArrayList<>();
Update( long nodeId, long[] labels )
{
this.labels = labelChanges( nodeId, EMPTY_LONG_ARRAY, labels );
}
void add( NodePropertyUpdate update )
{
propertyUpdates.add( update );
}
@Override
public Iterator<NodePropertyUpdate> iterator()
{
return propertyUpdates.iterator();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
310
|
private class PropertyBlockIterator extends PrefetchingIterator<PropertyBlock>
{
private final Iterator<PropertyRecord> records;
private Iterator<PropertyBlock> blocks = IteratorUtil.emptyIterator();
PropertyBlockIterator( NodeRecord node )
{
long firstPropertyId = node.getCommittedNextProp();
if ( firstPropertyId == Record.NO_NEXT_PROPERTY.intValue() )
{
records = IteratorUtil.emptyIterator();
}
else
{
records = propertyStore.getPropertyRecordChain( firstPropertyId ).iterator();
}
}
@Override
protected PropertyBlock fetchNextOrNull()
{
for (; ; )
{
if ( blocks.hasNext() )
{
return blocks.next();
}
if ( !records.hasNext() )
{
return null;
}
blocks = records.next().getPropertyBlocks().iterator();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
311
|
private abstract class NodeStoreScan<RESULT, FAILURE extends Exception> implements StoreScan<FAILURE>
{
private volatile boolean continueScanning;
protected abstract RESULT read( NodeRecord node );
protected abstract void process( RESULT result ) throws FAILURE;
@Override
public void run() throws FAILURE
{
PrimitiveLongIterator nodeIds = new StoreIdIterator( nodeStore );
continueScanning = true;
while ( continueScanning && nodeIds.hasNext() )
{
long id = nodeIds.next();
RESULT result = null;
try ( Lock ignored = locks.acquireNodeLock( id, LockService.LockType.READ_LOCK ) )
{
NodeRecord record = nodeStore.forceGetRecord( id );
if ( record.inUse() )
{
result = read( record );
}
}
if ( result != null )
{
process( result );
}
}
}
@Override
public void stop()
{
continueScanning = false;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
312
|
{
@Override
public Iterator<PropertyBlock> iterator()
{
return new PropertyBlockIterator( node );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
313
|
{
@Override
protected Update read( NodeRecord node )
{
long[] labels = parseLabelsField( node ).get( nodeStore );
Update update = new Update( node.getId(), labels );
if ( !containsAnyLabel( labelIds, labels ) )
{
return update;
}
properties: for ( PropertyBlock property : properties( node ) )
{
int propertyKeyId = property.getKeyIndexId();
for ( int sought : propertyKeyIds )
{
if ( propertyKeyId == sought )
{
update.add( NodePropertyUpdate
.add( node.getId(), propertyKeyId, valueOf( property ), labels ) );
continue properties;
}
}
}
return update;
}
@Override
protected void process( Update update ) throws FAILURE
{
labelUpdateVisitor.visit( update.labels );
for ( NodePropertyUpdate propertyUpdate : update )
{
propertyUpdateVisitor.visit( propertyUpdate );
}
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_nioneo_xa_NeoStoreIndexStoreView.java
|
314
|
public class TestStoreRecoverer
{
@Test
public void shouldNotWantToRecoverIntactStore() throws Exception
{
File store = null;
store = createIntactStore();
StoreRecoverer recoverer = new StoreRecoverer( fileSystem );
assertThat( recoverer.recoveryNeededAt( store, new HashMap<String, String>() ), is( false ) );
}
@Test
public void shouldWantToRecoverBrokenStore() throws Exception
{
File store = createIntactStore();
fileSystem.deleteFile( new File( store, "nioneo_logical.log.active" ) );
StoreRecoverer recoverer = new StoreRecoverer( fileSystem );
assertThat( recoverer.recoveryNeededAt( store, new HashMap<String, String>() ), is( true ) );
}
@Test
public void shouldBeAbleToRecoverBrokenStore() throws Exception
{
File store = createIntactStore();
fileSystem.deleteFile( new File( store, "nioneo_logical.log.active" ) );
StoreRecoverer recoverer = new StoreRecoverer( fileSystem );
assertThat( recoverer.recoveryNeededAt( store, new HashMap<String, String>() ), is( true ) );
// Don't call recoverer.recover, because currently it's hard coded to start an embedded db
new TestGraphDatabaseFactory().setFileSystem( fileSystem ).newImpermanentDatabase( store.getPath() ).shutdown();
assertThat( recoverer.recoveryNeededAt( store, new HashMap<String, String>() ), is( false ) );
}
private File createIntactStore() throws IOException
{
File storeDir = new File( "dir" );
new TestGraphDatabaseFactory().setFileSystem( fileSystem ).newImpermanentDatabase( storeDir.getPath() ).shutdown();
return storeDir;
}
private final EphemeralFileSystemAbstraction fileSystem = new EphemeralFileSystemAbstraction();
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_recovery_TestStoreRecoverer.java
|
315
|
public class ConfigMapUpgradeConfiguration implements UpgradeConfiguration
{
private Config config;
public ConfigMapUpgradeConfiguration( Config config )
{
this.config = config;
}
@Override
public void checkConfigurationAllowsAutomaticUpgrade()
{
if ( !config.get( GraphDatabaseSettings.allow_store_upgrade ) )
{
throw new UpgradeNotAllowedByConfigurationException();
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_ConfigMapUpgradeConfiguration.java
|
316
|
public class ConfigMapUpgradeConfigurationTestIT
{
@Test
public void shouldNotAllowAutomaticUpgradeIfConfigParameterIsMissing()
{
Config config = defaultConfig();
assertFalse( config.get( GraphDatabaseSettings.allow_store_upgrade ) );
try
{
new ConfigMapUpgradeConfiguration( config ).checkConfigurationAllowsAutomaticUpgrade();
fail( "Should throw exception" );
} catch ( UpgradeNotAllowedByConfigurationException e )
{
// expected
}
}
@Test
public void shouldNotAllowAutomaticUpgradeIfConfigParameterIsFalse()
{
Config config = defaultConfig( stringMap( GraphDatabaseSettings.allow_store_upgrade.name(), "false" ) );
try
{
new ConfigMapUpgradeConfiguration( config ).checkConfigurationAllowsAutomaticUpgrade();
fail( "Should throw exception" );
} catch ( UpgradeNotAllowedByConfigurationException e )
{
// expected
}
}
@Test
public void shouldNotAllowAutomaticUpgradeIfConfigParameterIsTrue()
{
Config config = defaultConfig( stringMap( GraphDatabaseSettings.allow_store_upgrade.name(), "false" ) );
try
{
new ConfigMapUpgradeConfiguration( config ).checkConfigurationAllowsAutomaticUpgrade();
fail( "Should throw exception" );
} catch ( UpgradeNotAllowedByConfigurationException e )
{
// expected
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_ConfigMapUpgradeConfigurationTestIT.java
|
317
|
static class DummyXaResource extends XaResourceHelpImpl
{
DummyXaResource( XaResourceManager xaRm )
{
super( xaRm, null );
}
@Override
public boolean isSameRM( XAResource resource )
{
return resource instanceof DummyXaResource;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
318
|
public class KernelHealth
{
private static final String panicMessage = "Kernel has encountered some problem, "
+ "please perform neccesary action (tx recovery/restart)";
// Keep that cozy name for legacy purposes
private volatile boolean tmOk = true; // TODO rather skip volatile if possible here.
private final KernelPanicEventGenerator kpe;
private final StringLogger log;
private Throwable causeOfPanic;
public KernelHealth( KernelPanicEventGenerator kpe, Logging logging )
{
this.kpe = kpe;
this.log = logging.getMessagesLog( getClass() );
}
public <EXCEPTION extends Throwable> void assertHealthy( Class<EXCEPTION> panicDisguise )
throws EXCEPTION
{
if ( !tmOk )
{
EXCEPTION exception = null;
try
{
try
{
exception = panicDisguise.getConstructor( String.class, Throwable.class )
.newInstance( panicMessage, causeOfPanic );
}
catch ( NoSuchMethodException e )
{
exception = withCause( panicDisguise.getConstructor( String.class )
.newInstance( panicMessage ), causeOfPanic );
}
}
catch ( Exception e )
{
throw new Error( panicMessage + ". An exception of type " + panicDisguise.getName() +
" was requested to be thrown but that proved imposslble", e );
}
throw exception;
}
}
public void panic( Throwable cause )
{
if ( !tmOk )
{
return;
}
if ( cause == null )
{
throw new IllegalArgumentException( "Must provide a cause for the kernel panic" );
}
this.causeOfPanic = cause;
this.tmOk = false;
// Keeps the "setting TM not OK string for grep:ability
log.error( "setting TM not OK. " + panicMessage, cause );
kpe.generateEvent( ErrorState.TX_MANAGER_NOT_OK, causeOfPanic );
}
public void healed()
{
tmOk = true;
causeOfPanic = null;
log.info( "Kernel health set to OK" );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_KernelHealth.java
|
319
|
public static class Provider extends TransactionManagerProvider
{
public Provider()
{
super( NAME );
}
@Override
public AbstractTransactionManager loadTransactionManager(
String txLogDir, XaDataSourceManager xaDataSourceManager, KernelPanicEventGenerator kpe,
RemoteTxHook rollbackHook, StringLogger msgLog,
FileSystemAbstraction fileSystem, TransactionStateFactory stateFactory )
{
return new JOTMTransactionManager( xaDataSourceManager, stateFactory );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_JOTMTransactionManager.java
|
320
|
{
@Override
public void returnXAResource( String rmName, XAResource rmXares )
{
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_JOTMTransactionManager.java
|
321
|
public class JOTMTransactionManager extends AbstractTransactionManager
{
@Override
public int getEventIdentifier()
{
return 0;
}
public static class Provider extends TransactionManagerProvider
{
public Provider()
{
super( NAME );
}
@Override
public AbstractTransactionManager loadTransactionManager(
String txLogDir, XaDataSourceManager xaDataSourceManager, KernelPanicEventGenerator kpe,
RemoteTxHook rollbackHook, StringLogger msgLog,
FileSystemAbstraction fileSystem, TransactionStateFactory stateFactory )
{
return new JOTMTransactionManager( xaDataSourceManager, stateFactory );
}
}
public static final String NAME = "JOTM";
private final TransactionManager current;
private final Jotm jotm;
private final XaDataSourceManager xaDataSourceManager;
private final Map<Transaction, TransactionState> states = new HashMap<>();
private final TransactionStateFactory stateFactory;
private JOTMTransactionManager( XaDataSourceManager xaDataSourceManager, TransactionStateFactory stateFactory )
{
this.xaDataSourceManager = xaDataSourceManager;
this.stateFactory = stateFactory;
Registry registry = null;
try
{
registry = LocateRegistry.getRegistry( 1099 );
}
catch ( RemoteException re )
{
// Nothing yet, we can still create it.
}
if ( registry == null )
{
try
{
LocateRegistry.createRegistry( 1099 );
}
catch ( RemoteException re )
{
// Something is fishy here, plus it is impossible to continue.
// So we die.
throw new Error( re );
}
}
try
{
jotm = new Jotm( true, false );
current = jotm.getTransactionManager();
}
catch ( NamingException ne )
{
throw new Error( "Error during JOTM creation", ne );
}
}
/**
* Starts the registry and binds a JOTM instance to it. Registers the
* resource adapters declared by the neo data source manager to get ready
* for possible recovery.
*/
@Override
public void init()
{
}
@Override
public void begin() throws NotSupportedException, SystemException
{
current.begin();
Transaction tx = getTransaction();
states.put( tx, stateFactory.create( tx ) );
}
@Override
public void commit() throws RollbackException, HeuristicMixedException,
HeuristicRollbackException, SecurityException,
IllegalStateException, SystemException
{
current.commit();
}
@Override
public int getStatus() throws SystemException
{
return current.getStatus();
}
@Override
public Transaction getTransaction() throws SystemException
{
if ( current == null )
{
return null;
}
return current.getTransaction();
}
@Override
public void resume( Transaction arg0 ) throws InvalidTransactionException,
IllegalStateException, SystemException
{
current.resume( arg0 );
}
@Override
public void rollback() throws IllegalStateException, SecurityException,
SystemException
{
current.rollback();
}
@Override
public void setRollbackOnly() throws IllegalStateException, SystemException
{
current.setRollbackOnly();
}
@Override
public void setTransactionTimeout( int arg0 ) throws SystemException
{
current.setTransactionTimeout( arg0 );
}
@Override
public Transaction suspend() throws SystemException
{
return current.suspend();
}
@Override
public void start() throws Throwable
{
}
/**
* Stops the JOTM instance.
*/
@Override
public void stop()
{
jotm.stop();
}
@Override
public void shutdown() throws Throwable
{
}
public Jotm getJotmTxManager()
{
return jotm;
}
@Override
public void doRecovery() throws Throwable
{
TransactionResourceManager trm = new TransactionResourceManager()
{
@Override
public void returnXAResource( String rmName, XAResource rmXares )
{
}
};
try
{
for ( XaDataSource xaDs : xaDataSourceManager.getAllRegisteredDataSources() )
{
Current.getTransactionRecovery().registerResourceManager( xaDs.getName(),
xaDs.getXaConnection().getXaResource(), xaDs.getName(), trm );
}
Current.getTransactionRecovery().startResourceManagerRecovery();
}
catch ( XAException e )
{
throw new Error( "Error registering xa datasource", e );
}
}
@Override
public TransactionState getTransactionState()
{
try
{
TransactionState state = states.get( getTransaction() );
return state != null ? state : TransactionState.NO_STATE;
}
catch ( SystemException e )
{
throw new RuntimeException( e );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_JOTMTransactionManager.java
|
322
|
public class IllegalResourceException extends RuntimeException
{
public IllegalResourceException()
{
super();
}
public IllegalResourceException( String message )
{
super( message );
}
public IllegalResourceException( String message, Throwable cause )
{
super( message, cause );
}
public IllegalResourceException( Throwable cause )
{
super( cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_IllegalResourceException.java
|
323
|
public static class FailingFakeXAResource extends FakeXAResource
{
private boolean failInCommit;
public FailingFakeXAResource( String name, boolean failInCommit )
{
super( name );
this.failInCommit = failInCommit;
}
@Override
public void commit( Xid xid, boolean onePhase )
{
if ( failInCommit )
{
throw new RuntimeException( "I was told to fail" );
}
super.commit( xid, onePhase );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_FakeXAResource.java
|
324
|
public class FakeXAResource implements XAResource
{
private String name = null;
private int transactionTimeout = 0;
private ArrayList<MethodCall> methodCalls = new ArrayList<MethodCall>();
public FakeXAResource( String name )
{
this.name = name;
}
public String getName()
{
return name;
}
@Override
public String toString()
{
return name;
}
synchronized MethodCall[] getAndRemoveMethodCalls()
{
if ( methodCalls.size() > 0 )
{
MethodCall methodCallArray[] = new MethodCall[methodCalls.size()];
methodCallArray = methodCalls.toArray( methodCallArray );
methodCalls = new ArrayList<MethodCall>();
return methodCallArray;
}
return new MethodCall[0];
}
private synchronized void addMethodCall( MethodCall methodCall )
{
methodCalls.add( methodCall );
}
@Override
public void commit( Xid xid, boolean onePhase )
{
addMethodCall( new MethodCall( "commit", new Object[] { xid,
new Boolean( onePhase ) }, new String[] {
"javax.transaction.xa.Xid", "java.lang.Boolean" } ) );
}
@Override
public void end( Xid xid, int flags )
{
addMethodCall( new MethodCall( "end", new Object[] { xid,
new Integer( flags ) }, new String[] { "javax.transaction.xa.Xid",
"java.lang.Integer" } ) );
}
@Override
public void forget( Xid xid )
{
addMethodCall( new MethodCall( "forget", new Object[] { xid },
new String[] { "javax.transaction.xa.Xid" } ) );
}
@Override
public int getTransactionTimeout()
{
return transactionTimeout;
}
@Override
public boolean setTransactionTimeout( int timeout )
{
transactionTimeout = timeout;
return true;
}
@Override
public boolean isSameRM( XAResource xares )
{
if ( xares instanceof FakeXAResource )
{
if ( this.name.equals( ((FakeXAResource) xares).getName() ) )
{
return true;
}
}
return false;
}
@Override
public int prepare( Xid xid )
{
addMethodCall( new MethodCall( "prepare", new Object[] { xid },
new String[] { "javax.transaction.xa.Xid" } ) );
return XAResource.XA_OK;
}
@Override
public Xid[] recover( int flag )
{
addMethodCall( new MethodCall( "recover", new Object[] { new Integer(
flag ) }, new String[] { "java.lang.Integer" } ) );
return new Xid[0];
}
@Override
public void rollback( Xid xid )
{
addMethodCall( new MethodCall( "rollback", new Object[] { xid },
new String[] { "javax.transaction.xa.Xid" } ) );
}
@Override
public void start( Xid xid, int flags )
{
addMethodCall( new MethodCall( "start", new Object[] { xid,
new Integer( flags ) }, new String[] { "javax.transaction.xa.Xid",
"java.lang.Integer" } ) );
}
public static class FailingFakeXAResource extends FakeXAResource
{
private boolean failInCommit;
public FailingFakeXAResource( String name, boolean failInCommit )
{
super( name );
this.failInCommit = failInCommit;
}
@Override
public void commit( Xid xid, boolean onePhase )
{
if ( failInCommit )
{
throw new RuntimeException( "I was told to fail" );
}
super.commit( xid, onePhase );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_FakeXAResource.java
|
325
|
{
@Override
public Boolean doWork( Boolean ignore )
{
try
{
tm.resume( jtaTx );
// Then
return node.hasLabel( Labels.MY_LABEL );
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
finally
{
try
{
tm.suspend();
}
catch ( SystemException e )
{
throw new RuntimeException( e );
}
}
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_ExternalTransactionControlIT.java
|
326
|
public class ExternalTransactionControlIT
{
public @Rule
ImpermanentDatabaseRule dbRule = new ImpermanentDatabaseRule();
private enum Labels implements Label
{
MY_LABEL;
}
@Test
public void shouldAllowSuspendingAndResumingTransactions() throws Exception
{
// Given
//noinspection deprecation
GraphDatabaseAPI db = dbRule.getGraphDatabaseAPI();
TransactionManager tm = db.getDependencyResolver().resolveDependency( TransactionManager.class );
Node node = createNode();
// And that I have added a label to a node in a transaction
try ( org.neo4j.graphdb.Transaction ignored = db.beginTx() )
{
node.addLabel( Labels.MY_LABEL );
// When
Transaction jtaTx = tm.suspend();
// Then
assertThat(node, inTx(db, not( hasLabel( Labels.MY_LABEL ) )));
// And when
tm.resume( jtaTx );
// Then
assertTrue("The label should be visible when I've resumed the transaction.", node.hasLabel( Labels.MY_LABEL ));
}
}
@Test
public void shouldBeAbleToUseJTATransactionManagerForTxManagement() throws Exception
{
// Given
//noinspection deprecation
GraphDatabaseAPI db = dbRule.getGraphDatabaseAPI();
TransactionManager tm = db.getDependencyResolver().resolveDependency( TransactionManager.class );
// When
tm.begin();
Node node = db.createNode();
node.addLabel( Labels.MY_LABEL );
tm.commit();
// Then
assertThat(node, inTx(db, hasLabel( Labels.MY_LABEL )));
}
@Test
public void shouldBeAbleToMoveTransactionToAnotherThread() throws Exception
{
// Given
//noinspection deprecation
GraphDatabaseAPI db = dbRule.getGraphDatabaseAPI();
final TransactionManager tm = db.getDependencyResolver().resolveDependency( TransactionManager.class );
final Node node = createNode();
// And that I have added a label to a node in a transaction
try ( org.neo4j.graphdb.Transaction ignored = db.beginTx() )
{
node.addLabel( Labels.MY_LABEL );
// And that I suspend the transaction in this thread
final Transaction jtaTx = tm.suspend();
boolean result;
try
{
// When
OtherThreadExecutor<Boolean> otherThread = new OtherThreadExecutor<>( "Thread to resume tx in", null );
result = otherThread.execute( new OtherThreadExecutor.WorkerCommand<Boolean, Boolean>()
{
@Override
public Boolean doWork( Boolean ignore )
{
try
{
tm.resume( jtaTx );
// Then
return node.hasLabel( Labels.MY_LABEL );
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
finally
{
try
{
tm.suspend();
}
catch ( SystemException e )
{
throw new RuntimeException( e );
}
}
}
} );
}
finally
{
// Need to resume this transaction so that we can close it cleanly.
tm.resume( jtaTx );
}
// Then
assertTrue("The label should be visible when I've resumed the transaction.", result);
}
}
private Node createNode()
{
GraphDatabaseService db = dbRule.getGraphDatabaseService();
try ( org.neo4j.graphdb.Transaction tx = db.beginTx() )
{
Node node = db.createNode();
tx.success();
return node;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_ExternalTransactionControlIT.java
|
327
|
static class DummyXaConnection extends XaConnectionHelpImpl
{
private XAResource xaResource = null;
public DummyXaConnection( XaResourceManager xaRm )
{
super( xaRm );
xaResource = new DummyXaResource( xaRm );
}
@Override
public XAResource getXaResource()
{
return xaResource;
}
public void doStuff1() throws XAException
{
validate();
getTransaction().addCommand( new DummyCommand( 1 ) );
}
public void doStuff2() throws XAException
{
validate();
getTransaction().addCommand( new DummyCommand( 2 ) );
}
public void enlistWithTx( TransactionManager tm ) throws Exception
{
tm.getTransaction().enlistResource( xaResource );
}
public void delistFromTx( TransactionManager tm ) throws Exception
{
tm.getTransaction().delistResource( xaResource,
XAResource.TMSUCCESS );
}
public int getTransactionId() throws Exception
{
return getTransaction().getIdentifier();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
328
|
public class LockException extends RuntimeException
{
public LockException()
{
super();
}
public LockException( String message )
{
super( message );
}
public LockException( String message, Throwable cause )
{
super( message, cause );
}
public LockException( Throwable cause )
{
super( cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockException.java
|
329
|
static class DummyTransactionFactory extends XaTransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state )
{
return new DummyTransaction( getLogicalLog(), state );
}
@Override
public void flushAll()
{
}
@Override
public long getAndSetNewVersion()
{
return 0;
}
@Override
public long getCurrentVersion()
{
return 0;
}
@Override
public void setVersion( long version )
{
}
@Override
public long getLastCommittedTx()
{
return 0;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
330
|
private static class DummyTransaction extends XaTransaction
{
private final java.util.List<XaCommand> commandList = new java.util.ArrayList<XaCommand>();
public DummyTransaction( XaLogicalLog log, TransactionState state )
{
super( log, state );
setCommitTxId( 0 );
}
@Override
public void doAddCommand( XaCommand command )
{
commandList.add( command );
}
@Override
public void doPrepare()
{
}
@Override
public void doRollback()
{
}
@Override
public void doCommit()
{
}
@Override
public boolean isReadOnly()
{
return false;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
331
|
static class DummyCommandFactory extends XaCommandFactory
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
buffer.clear();
buffer.limit( 4 );
if ( byteChannel.read( buffer ) == 4 )
{
buffer.flip();
return new DummyCommand( buffer.getInt() );
}
return null;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
332
|
private static class DummyCommand extends XaCommand
{
private int type = -1;
DummyCommand( int type )
{
this.type = type;
}
@Override
public void execute()
{
}
// public void writeToFile( FileChannel fileChannel, ByteBuffer buffer )
// throws IOException
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// buffer.clear();
buffer.putInt( type );
// buffer.flip();
// fileChannel.write( buffer );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
333
|
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return type.cast( new Config( MapUtil.stringMap(
GraphDatabaseSettings.intercept_committing_transactions.name(),
Settings.FALSE,
GraphDatabaseSettings.intercept_deserialized_transactions.name(),
Settings.FALSE
) ) );
}
} ), false );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
334
|
public class DummyXaDataSource extends XaDataSource
{
private XaContainer xaContainer = null;
public DummyXaDataSource( byte[] branchId, String name, XaFactory xaFactory,
TransactionStateFactory stateFactory, File logFile ) throws InstantiationException
{
super( branchId, name );
try
{
xaContainer = xaFactory.newXaContainer( this, logFile, new DummyCommandFactory(),
ALLOW_ALL, new DummyTransactionFactory(), stateFactory, new TransactionInterceptorProviders(
Iterables.<TransactionInterceptorProvider>empty(),
new DependencyResolver.Adapter()
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
return type.cast( new Config( MapUtil.stringMap(
GraphDatabaseSettings.intercept_committing_transactions.name(),
Settings.FALSE,
GraphDatabaseSettings.intercept_deserialized_transactions.name(),
Settings.FALSE
) ) );
}
} ), false );
xaContainer.openLogicalLog();
}
catch ( IOException e )
{
throw new InstantiationException( "" + e );
}
}
@Override
public void init()
{
}
@Override
public void start()
{
}
@Override
public void stop()
{
xaContainer.close();
// cleanup dummy resource log
// deleteAllResourceFiles();
}
@Override
public void shutdown()
{
}
@Override
public XaConnection getXaConnection()
{
return new DummyXaConnection( xaContainer.getResourceManager() );
}
@Override
public long getLastCommittedTxId()
{
return 0;
}
static class DummyXaResource extends XaResourceHelpImpl
{
DummyXaResource( XaResourceManager xaRm )
{
super( xaRm, null );
}
@Override
public boolean isSameRM( XAResource resource )
{
return resource instanceof DummyXaResource;
}
}
static class DummyXaConnection extends XaConnectionHelpImpl
{
private XAResource xaResource = null;
public DummyXaConnection( XaResourceManager xaRm )
{
super( xaRm );
xaResource = new DummyXaResource( xaRm );
}
@Override
public XAResource getXaResource()
{
return xaResource;
}
public void doStuff1() throws XAException
{
validate();
getTransaction().addCommand( new DummyCommand( 1 ) );
}
public void doStuff2() throws XAException
{
validate();
getTransaction().addCommand( new DummyCommand( 2 ) );
}
public void enlistWithTx( TransactionManager tm ) throws Exception
{
tm.getTransaction().enlistResource( xaResource );
}
public void delistFromTx( TransactionManager tm ) throws Exception
{
tm.getTransaction().delistResource( xaResource,
XAResource.TMSUCCESS );
}
public int getTransactionId() throws Exception
{
return getTransaction().getIdentifier();
}
}
private static class DummyCommand extends XaCommand
{
private int type = -1;
DummyCommand( int type )
{
this.type = type;
}
@Override
public void execute()
{
}
// public void writeToFile( FileChannel fileChannel, ByteBuffer buffer )
// throws IOException
@Override
public void writeToFile( LogBuffer buffer ) throws IOException
{
// buffer.clear();
buffer.putInt( type );
// buffer.flip();
// fileChannel.write( buffer );
}
}
static class DummyCommandFactory extends XaCommandFactory
{
@Override
public XaCommand readCommand( ReadableByteChannel byteChannel,
ByteBuffer buffer ) throws IOException
{
buffer.clear();
buffer.limit( 4 );
if ( byteChannel.read( buffer ) == 4 )
{
buffer.flip();
return new DummyCommand( buffer.getInt() );
}
return null;
}
}
private static class DummyTransaction extends XaTransaction
{
private final java.util.List<XaCommand> commandList = new java.util.ArrayList<XaCommand>();
public DummyTransaction( XaLogicalLog log, TransactionState state )
{
super( log, state );
setCommitTxId( 0 );
}
@Override
public void doAddCommand( XaCommand command )
{
commandList.add( command );
}
@Override
public void doPrepare()
{
}
@Override
public void doRollback()
{
}
@Override
public void doCommit()
{
}
@Override
public boolean isReadOnly()
{
return false;
}
}
static class DummyTransactionFactory extends XaTransactionFactory
{
@Override
public XaTransaction create( long lastCommittedTxWhenTransactionStarted, TransactionState state )
{
return new DummyTransaction( getLogicalLog(), state );
}
@Override
public void flushAll()
{
}
@Override
public long getAndSetNewVersion()
{
return 0;
}
@Override
public long getCurrentVersion()
{
return 0;
}
@Override
public void setVersion( long version )
{
}
@Override
public long getLastCommittedTx()
{
return 0;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DummyXaDataSource.java
|
335
|
public class DoSomeTransactionsThenWait
{
public static void main( String[] args ) throws Exception
{
String storeDir = args[0];
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( storeDir );
int count = Integer.parseInt( args[1] );
for ( int i = 0; i < count; i++ )
{
Transaction tx = db.beginTx();
try
{
db.createNode();
tx.success();
}
finally
{
tx.finish();
}
}
touch( storeDir, "done" );
while ( true ) Thread.sleep( 1000 );
}
private static void touch( String storeDir, String name ) throws Exception
{
new File( storeDir, name ).createNewFile();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_DoSomeTransactionsThenWait.java
|
336
|
class Adapter implements DataSourceRegistrationListener
{
@Override
public void registeredDataSource( XaDataSource ds )
{
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_DataSourceRegistrationListener.java
|
337
|
public class CommitNotificationFailedException extends RuntimeException
{
public CommitNotificationFailedException( Throwable cause )
{
super( cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_CommitNotificationFailedException.java
|
338
|
public class KernelHealthTest
{
@Test
public void shouldGenerateKernelPanicEvents() throws Exception
{
// GIVEN
KernelPanicEventGenerator generator = mock( KernelPanicEventGenerator.class );
KernelHealth kernelHealth = new KernelHealth( generator, new SingleLoggingService( DEV_NULL ) );
kernelHealth.healed();
// WHEN
Exception cause = new Exception( "My own fault" );
kernelHealth.panic( cause );
kernelHealth.panic( cause );
// THEN
verify( generator, times( 1 ) ).generateEvent( TX_MANAGER_NOT_OK, cause );
}
@Test
public void shouldLogKernelPanicEvent() throws Exception
{
// GIVEN
BufferingLogging logging = new BufferingLogging();
KernelHealth kernelHealth = new KernelHealth( mock( KernelPanicEventGenerator.class ), logging );
kernelHealth.healed();
// WHEN
String message = "Listen everybody... panic!";
kernelHealth.panic( new Exception( message ) );
// THEN
assertThat( logging.toString(), containsString( message ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_KernelHealthTest.java
|
339
|
public class LockManagerImpl implements LockManager
{
private final Map<Object,RWLock> resourceLockMap = new HashMap<>();
private final RagManager ragManager;
public LockManagerImpl( RagManager ragManager )
{
this.ragManager = ragManager;
}
@Override
public long getDetectedDeadlockCount()
{
return ragManager.getDeadlockCount();
}
@Override
public void getReadLock( Object resource, Transaction tx )
throws DeadlockDetectedException, IllegalResourceException
{
getRWLockForAcquiring( resource, tx ).acquireReadLock( tx );
}
@Override
public boolean tryReadLock( Object resource, Transaction tx )
throws IllegalResourceException
{
return getRWLockForAcquiring( resource, tx ).tryAcquireReadLock( tx );
}
@Override
public void getWriteLock( Object resource, Transaction tx )
throws DeadlockDetectedException, IllegalResourceException
{
getRWLockForAcquiring( resource, tx ).acquireWriteLock( tx );
}
@Override
public boolean tryWriteLock( Object resource, Transaction tx )
throws IllegalResourceException
{
return getRWLockForAcquiring( resource, tx ).tryAcquireWriteLock( tx );
}
@Override
public void releaseReadLock( Object resource, Transaction tx )
throws LockNotFoundException, IllegalResourceException
{
getRWLockForReleasing( resource, tx, 1, 0 ).releaseReadLock( tx );
}
@Override
public void releaseWriteLock( Object resource, Transaction tx )
throws LockNotFoundException, IllegalResourceException
{
getRWLockForReleasing( resource, tx, 0, 1 ).releaseWriteLock( tx );
}
@Override
public void dumpLocksOnResource( Object resource, Logging logging )
{
StringLogger logger = logging.getMessagesLog( LockManager.class );
RWLock lock;
synchronized ( resourceLockMap )
{
if ( !resourceLockMap.containsKey( resource ) )
{
logger.info( "No locks on " + resource );
return;
}
lock = resourceLockMap.get( resource );
}
logger.logLongMessage( "Dump locks on resource " + resource, lock );
}
@Override
public List<LockInfo> getAllLocks()
{
return eachLock( new ListAppendingVisitor() ).result;
}
@Override
public List<LockInfo> getAwaitedLocks( long minWaitTime )
{
return eachAwaitedLock( new ListAppendingVisitor(), minWaitTime ).result;
}
/**
* Visit all locks.
*
* The supplied visitor may not block.
*
* @param visitor visitor for visiting each lock.
*/
private <V extends Visitor<LockInfo, RuntimeException>> V eachLock( V visitor )
{
synchronized ( resourceLockMap )
{
for ( RWLock lock : resourceLockMap.values() )
{
if ( visitor.visit( lock.info() ) )
{
break;
}
}
}
return visitor;
}
/**
* Visit all locks that some thread has been waiting for at least the
* supplied number of milliseconds.
*
* The supplied visitor may not block.
*
* @param visitor visitor for visiting each lock that has had a thread
* waiting at least the specified time.
* @param minWaitTime the number of milliseconds a thread should have waited
* on a lock for it to be visited.
*/
private <V extends Visitor<LockInfo, RuntimeException>> V eachAwaitedLock( V visitor, long minWaitTime )
{
long waitStart = System.currentTimeMillis() - minWaitTime;
synchronized ( resourceLockMap )
{
for ( RWLock lock : resourceLockMap.values() )
{
if ( lock.acceptVisitorIfWaitedSinceBefore( visitor, waitStart ) )
{
break;
}
}
}
return visitor;
}
@Override
public void dumpRagStack( Logging logging )
{
logging.getMessagesLog( getClass() ).logLongMessage( "RAG stack", ragManager );
}
@Override
public void dumpAllLocks( Logging logging )
{
DumpVisitor dump = new DumpVisitor( logging );
eachLock( dump );
dump.done();
}
private void assertValidArguments( Object resource, Transaction tx )
{
if ( resource == null || tx == null )
{
throw new IllegalResourceException( "Null parameter" );
}
}
private RWLock getRWLockForAcquiring( Object resource, Transaction tx )
{
assertValidArguments( resource, tx );
synchronized ( resourceLockMap )
{
RWLock lock = resourceLockMap.get( resource );
if ( lock == null )
{
lock = new RWLock( resource, ragManager );
resourceLockMap.put( resource, lock );
}
lock.mark();
return lock;
}
}
private RWLock getRWLockForReleasing( Object resource, Transaction tx, int readCountPrerequisite,
int writeCountPrerequisite )
{
assertValidArguments( resource, tx );
synchronized ( resourceLockMap )
{
RWLock lock = resourceLockMap.get( resource );
if ( lock == null )
{
throw new LockNotFoundException( "Lock not found for: "
+ resource + " tx:" + tx );
}
if ( !lock.isMarked() && lock.getReadCount() == readCountPrerequisite &&
lock.getWriteCount() == writeCountPrerequisite &&
lock.getWaitingThreadsCount() == 0 )
{
resourceLockMap.remove( resource );
}
return lock;
}
}
private static class ListAppendingVisitor implements Visitor<LockInfo, RuntimeException>
{
private final List<LockInfo> result = new ArrayList<>();
@Override
public boolean visit( LockInfo element )
{
result.add( element );
return false;
}
}
private static class DumpVisitor implements Visitor<LockInfo, RuntimeException>
{
private final StringLogger logger;
DumpVisitor( Logging logging )
{
logger = logging.getMessagesLog( LockManager.class );
}
int emptyLockCount = 0;
@Override
public boolean visit( LockInfo lock )
{
if ( lock.getWriteCount() > 0 || lock.getReadCount() > 0 )
{
dumpStack( lock );
}
else
{
if ( lock.getWaitingThreadsCount() > 0 )
{
dumpStack( lock );
}
emptyLockCount++;
}
return false;
}
private void dumpStack( LockInfo lock )
{
logger.info( "Total lock count: readCount=" + lock.getReadCount() + " writeCount="
+ lock.getWriteCount() + " for "
+ lock.getResourceType().toString( lock.getResourceId() ) );
logger.info( "Waiting list:" );
StringBuilder waitlist = new StringBuilder();
String sep = "";
for ( WaitingThread we : lock.getWaitingThreads() )
{
waitlist.append( sep ).append( "[tid=" ).append( we.getThreadId() ).append( "(" ).append(
we.getReadCount() ).append( "r," ).append( we.getWriteCount() ).append( "w )," ).append(
we.isWaitingOnWriteLock() ? "Write" : "Read" ).append( "Lock]" );
sep = ", ";
}
logger.info( waitlist.toString() );
for ( LockingTransaction tle : lock.getLockingTransactions() )
{
logger.info( "" + tle.getTransaction() + "(" + tle.getReadCount() + "r," + tle.getWriteCount()
+ "w)" );
}
}
void done()
{
if ( emptyLockCount > 0 )
{
logger.info( "There are " + emptyLockCount + " empty locks" );
}
else
{
logger.info( "There are no empty locks" );
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockManagerImpl.java
|
340
|
public class CurrentDatabase
{
private final StoreVersionCheck storeVersionCheck;
private static final Map<String, String> fileNamesToTypeDescriptors = new HashMap<String, String>();
static
{
fileNamesToTypeDescriptors.put( NeoStore.DEFAULT_NAME, NeoStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.nodestore.db", NodeStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.propertystore.db", PropertyStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.propertystore.db.arrays", DynamicArrayStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.propertystore.db.index", PropertyKeyTokenStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.propertystore.db.index.keys", DynamicStringStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.propertystore.db.strings", DynamicStringStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.relationshipstore.db", RelationshipStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.relationshiptypestore.db", RelationshipTypeTokenStore.TYPE_DESCRIPTOR );
fileNamesToTypeDescriptors.put( "neostore.relationshiptypestore.db.names", DynamicStringStore.TYPE_DESCRIPTOR );
}
public CurrentDatabase(StoreVersionCheck storeVersionCheck)
{
this.storeVersionCheck = storeVersionCheck;
}
public boolean storeFilesAtCurrentVersion( File storeDirectory )
{
for ( String fileName : fileNamesToTypeDescriptors.keySet() )
{
String expectedVersion = buildTypeDescriptorAndVersion( fileNamesToTypeDescriptors.get( fileName ) );
if ( !storeVersionCheck.hasVersion(
new File( storeDirectory, fileName ), expectedVersion ).first().isSuccessful() )
{
return false;
}
}
return true;
}
public static Collection<String> fileNames()
{
return fileNamesToTypeDescriptors.keySet();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_CurrentDatabase.java
|
341
|
private abstract static class AcquireLockCommand implements WorkerCommand<LockWorkerState, Void>
{
@Override
public Void doWork( LockWorkerState state )
{
try
{
acquireLock( state );
state.deadlockOnLastWait = false;
}
catch ( DeadlockDetectedException e )
{
state.deadlockOnLastWait = true;
}
return null;
}
protected abstract void acquireLock( LockWorkerState state );
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
342
|
private static class TestEmbeddedGraphDatabase extends EmbeddedGraphDatabase
{
public TestEmbeddedGraphDatabase( String storeDir, Map<String, String> params )
{
super( storeDir,
params,
dependencies() );
}
private static Dependencies dependencies()
{
GraphDatabaseFactoryState state = new GraphDatabaseFactoryState();
state.addKernelExtensions( Arrays.asList(
new InMemoryIndexProviderFactory(),
new InMemoryLabelScanStoreExtension() ) );
state.setCacheProviders( Arrays.<CacheProvider>asList( new SoftCacheProvider() ) );
state.setTransactionInterceptorProviders( Iterables.<TransactionInterceptorProvider>empty() );
return state.databaseDependencies();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_PartialTransactionFailureIT.java
|
343
|
{
@Override
public void run()
{
Transaction tx = db.beginTx();
try
{
x.createRelationshipTo( y, DynamicRelationshipType.withName( "r" ) );
tx.success();
latch.await();
tx.finish();
}
catch ( Exception ignore )
{
// We don't care about our transactions failing, as long as we
// can recover our database to a consistent state.
}
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_PartialTransactionFailureIT.java
|
344
|
final EmbeddedGraphDatabase db = new TestEmbeddedGraphDatabase( storeDir, params ) {
@Override
protected FileSystemAbstraction createFileSystemAbstraction()
{
return new AdversarialFileSystemAbstraction( adversary );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_PartialTransactionFailureIT.java
|
345
|
public class PartialTransactionFailureIT
{
@Rule
public TargetDirectory.TestDirectory dir =
TargetDirectory.testDirForTest( PartialTransactionFailureIT.class );
@Test
public void concurrentlyCommittingTransactionsMustNotRotateOutLoggedCommandsOfFailingTransaction()
throws Exception
{
final ClassGuardedAdversary adversary = new ClassGuardedAdversary(
new CountingAdversary( 1, false ),
"org.neo4j.kernel.impl.nioneo.xa.Command$RelationshipCommand" );
adversary.disable();
Map<String, String> params = stringMap(
"logical_log_rotation_threshold", "1",
"use_memory_mapped_buffers", "false");
String storeDir = dir.directory().getAbsolutePath();
final EmbeddedGraphDatabase db = new TestEmbeddedGraphDatabase( storeDir, params ) {
@Override
protected FileSystemAbstraction createFileSystemAbstraction()
{
return new AdversarialFileSystemAbstraction( adversary );
}
};
Node a, b, c, d;
Transaction tx = db.beginTx();
try
{
a = db.createNode();
b = db.createNode();
c = db.createNode();
d = db.createNode();
tx.success();
}
finally
{
tx.finish();
}
adversary.enable();
CountDownLatch latch = new CountDownLatch( 1 );
Thread t1 = new Thread( createRelationship( db, a, b, latch ) );
Thread t2 = new Thread( createRelationship( db, c, d, latch ) );
t1.start();
t2.start();
// Wait for both threads to get going
t1.join( 10 );
t2.join( 10 );
latch.countDown();
// Wait for the transactions to finish
t1.join( 25000 );
t2.join( 25000 );
db.shutdown();
// We should observe the store in a consistent state
EmbeddedGraphDatabase db2 = new TestEmbeddedGraphDatabase( storeDir, stringMap() );
tx = db2.beginTx();
try
{
Node x = db2.getNodeById( a.getId() );
Node y = db2.getNodeById( b.getId() );
Node z = db2.getNodeById( c.getId() );
Node w = db2.getNodeById( d.getId() );
Iterator<Relationship> itrRelX = x.getRelationships().iterator();
Iterator<Relationship> itrRelY = y.getRelationships().iterator();
Iterator<Relationship> itrRelZ = z.getRelationships().iterator();
Iterator<Relationship> itrRelW = w.getRelationships().iterator();
if ( itrRelX.hasNext() != itrRelY.hasNext() )
{
fail( "Node x and y have inconsistent relationship counts" );
}
else if ( itrRelX.hasNext() )
{
Relationship rel = itrRelX.next();
assertEquals( rel, itrRelY.next() );
assertFalse( itrRelX.hasNext() );
assertFalse( itrRelY.hasNext() );
}
if ( itrRelZ.hasNext() != itrRelW.hasNext() )
{
fail( "Node z and w have inconsistent relationship counts" );
}
else if ( itrRelZ.hasNext() )
{
Relationship rel = itrRelZ.next();
assertEquals( rel, itrRelW.next() );
assertFalse( itrRelZ.hasNext() );
assertFalse( itrRelW.hasNext() );
}
}
finally
{
try
{
tx.finish();
}
finally
{
db2.shutdown();
}
}
}
private Runnable createRelationship(
final EmbeddedGraphDatabase db,
final Node x,
final Node y,
final CountDownLatch latch )
{
return new Runnable()
{
@Override
public void run()
{
Transaction tx = db.beginTx();
try
{
x.createRelationshipTo( y, DynamicRelationshipType.withName( "r" ) );
tx.success();
latch.await();
tx.finish();
}
catch ( Exception ignore )
{
// We don't care about our transactions failing, as long as we
// can recover our database to a consistent state.
}
}
};
}
private static class TestEmbeddedGraphDatabase extends EmbeddedGraphDatabase
{
public TestEmbeddedGraphDatabase( String storeDir, Map<String, String> params )
{
super( storeDir,
params,
dependencies() );
}
private static Dependencies dependencies()
{
GraphDatabaseFactoryState state = new GraphDatabaseFactoryState();
state.addKernelExtensions( Arrays.asList(
new InMemoryIndexProviderFactory(),
new InMemoryLabelScanStoreExtension() ) );
state.setCacheProviders( Arrays.<CacheProvider>asList( new SoftCacheProvider() ) );
state.setTransactionInterceptorProviders( Iterables.<TransactionInterceptorProvider>empty() );
return state.databaseDependencies();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_PartialTransactionFailureIT.java
|
346
|
private static class OtherDummyXaConnection implements XaConnection
{
private XAResource xaResource = null;
public OtherDummyXaConnection( XAResource xaResource )
{
this.xaResource = xaResource;
}
@Override
public XAResource getXaResource()
{
return xaResource;
}
@Override
public void destroy()
{
}
@Override
public boolean enlistResource( Transaction javaxTx )
throws SystemException, RollbackException
{
return javaxTx.enlistResource( xaResource );
}
@Override
public boolean delistResource( Transaction tx, int tmsuccess )
throws IllegalStateException, SystemException
{
return tx.delistResource( xaResource, tmsuccess );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_OtherDummyXaDataSource.java
|
347
|
public class OtherDummyXaDataSource extends XaDataSource
{
private XAResource xaResource = null;
public OtherDummyXaDataSource( String name, byte[] branchId, XAResource xaResource )
{
super( branchId, name );
this.xaResource = xaResource;
}
@Override
public XaConnection getXaConnection()
{
return new OtherDummyXaConnection( xaResource );
}
@Override
public long getLastCommittedTxId()
{
return 0;
}
@Override
public void init()
{
}
@Override
public void start()
{
}
@Override
public void stop()
{
}
@Override
public void shutdown()
{
}
private static class OtherDummyXaConnection implements XaConnection
{
private XAResource xaResource = null;
public OtherDummyXaConnection( XAResource xaResource )
{
this.xaResource = xaResource;
}
@Override
public XAResource getXaResource()
{
return xaResource;
}
@Override
public void destroy()
{
}
@Override
public boolean enlistResource( Transaction javaxTx )
throws SystemException, RollbackException
{
return javaxTx.enlistResource( xaResource );
}
@Override
public boolean delistResource( Transaction tx, int tmsuccess )
throws IllegalStateException, SystemException
{
return tx.delistResource( xaResource, tmsuccess );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_OtherDummyXaDataSource.java
|
348
|
class MethodCall
{
private String methodName = null;
private Object args[] = null;
private String signatures[] = null;
MethodCall( String methodName, Object args[], String signatures[] )
{
if ( args.length != signatures.length )
{
throw new IllegalArgumentException(
"Args length not equal to signatures length." );
}
this.methodName = methodName;
this.args = args;
this.signatures = signatures;
}
String getMethodName()
{
return methodName;
}
Object[] getArgs()
{
return args;
}
String[] getSignatures()
{
return signatures;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_MethodCall.java
|
349
|
class LockWorkerState
{
final LockManager grabber;
volatile boolean deadlockOnLastWait;
final List<String> completedOperations = new ArrayList<String>();
String doing;
final Transaction tx = mock( Transaction.class );
public LockWorkerState( LockManager grabber )
{
this.grabber = grabber;
}
public void doing( String doing )
{
this.doing = doing;
}
public void done()
{
this.completedOperations.add( this.doing );
this.doing = null;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorkerState.java
|
350
|
public abstract class XaConnectionHelpImpl implements XaConnection
{
private final XaResourceManager xaRm;
public XaConnectionHelpImpl( XaResourceManager xaRm )
{
if ( xaRm == null )
{
throw new IllegalArgumentException( "XaResourceManager is null" );
}
this.xaRm = xaRm;
}
/**
* Returns the XAResource associated with this connection.
*
* @return The XAResource for this connection
*/
@Override
public abstract XAResource getXaResource();
@Override
public boolean enlistResource( Transaction javaxTx )
throws SystemException, RollbackException
{
return javaxTx.enlistResource( getXaResource() );
}
@Override
public boolean delistResource( Transaction tx, int tmsuccess ) throws IllegalStateException, SystemException
{
return tx.delistResource( getXaResource(), tmsuccess );
}
@Override
public void destroy()
{
// kill xaResource
xaRm.destroy( getXaResource() );
}
/**
* Makes sure the resource is enlisted as active in the transaction.
*
* @throws XAException
* If resource not enlisted or suspended
*/
public void validate() throws XAException
{
xaRm.validate( getXaResource() );
}
/**
* Creates a {@link XaTransaction} for the managed {@link XAResource}.
* @return the created transaction.
* @throws XAException if there were already an associated transaction for this resource and xid.
*/
protected XaTransaction createTransaction() throws XAException
{
return xaRm.createTransaction( getXaResource() );
}
/**
* Returns the {@link XaTransaction} associated with this connection. If
* transaction is already completed it will still be returned.
*
* @return The {@link XaTransaction} associated with this connection
* @throws XAException
* If the transaction hasn't completed and the resource isn't
* enlisted
*/
protected XaTransaction getTransaction() throws XAException
{
XAResource xar = getXaResource();
XaTransaction xat = null;
if ( xar instanceof XaResourceHelpImpl )
{
xat = ((XaResourceHelpImpl) xar).getCompletedTx();
}
if ( xat != null )
{
return xat;
}
return xaRm.getXaTransaction( xar );
}
/**
* Will clear the resource manager of all transactions. Used for testing
* purpose only. Do not use this method unless you know what you're doing
* since it will corrupt the state between the resource and the global
* transaction manager.
*/
public void clearAllTransactions()
{
xaRm.reset();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_xaframework_XaConnectionHelpImpl.java
|
351
|
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "-W " + resource );
state.grabber.releaseWriteLock( resource, state.tx );
state.done();
}
}, true );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
352
|
private static class DumpVisitor implements Visitor<LockInfo, RuntimeException>
{
private final StringLogger logger;
DumpVisitor( Logging logging )
{
logger = logging.getMessagesLog( LockManager.class );
}
int emptyLockCount = 0;
@Override
public boolean visit( LockInfo lock )
{
if ( lock.getWriteCount() > 0 || lock.getReadCount() > 0 )
{
dumpStack( lock );
}
else
{
if ( lock.getWaitingThreadsCount() > 0 )
{
dumpStack( lock );
}
emptyLockCount++;
}
return false;
}
private void dumpStack( LockInfo lock )
{
logger.info( "Total lock count: readCount=" + lock.getReadCount() + " writeCount="
+ lock.getWriteCount() + " for "
+ lock.getResourceType().toString( lock.getResourceId() ) );
logger.info( "Waiting list:" );
StringBuilder waitlist = new StringBuilder();
String sep = "";
for ( WaitingThread we : lock.getWaitingThreads() )
{
waitlist.append( sep ).append( "[tid=" ).append( we.getThreadId() ).append( "(" ).append(
we.getReadCount() ).append( "r," ).append( we.getWriteCount() ).append( "w )," ).append(
we.isWaitingOnWriteLock() ? "Write" : "Read" ).append( "Lock]" );
sep = ", ";
}
logger.info( waitlist.toString() );
for ( LockingTransaction tle : lock.getLockingTransactions() )
{
logger.info( "" + tle.getTransaction() + "(" + tle.getReadCount() + "r," + tle.getWriteCount()
+ "w)" );
}
}
void done()
{
if ( emptyLockCount > 0 )
{
logger.info( "There are " + emptyLockCount + " empty locks" );
}
else
{
logger.info( "There are no empty locks" );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockManagerImpl.java
|
353
|
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "-R " + resource );
state.grabber.releaseReadLock( resource, state.tx );
state.done();
}
}, true );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
354
|
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "+W " + resource + ", wait:" + wait );
state.grabber.getWriteLock( resource, state.tx );
state.done();
}
}, wait );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
355
|
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "+R " + resource + ", wait:" + wait );
state.grabber.getReadLock( resource, state.tx );
state.done();
}
}, wait );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
356
|
public class LockWorker extends OtherThreadExecutor<LockWorkerState>
{
public LockWorker( String name, LockManager grabber )
{
super( name, new LockWorkerState( grabber ) );
}
private Future<Void> perform( AcquireLockCommand acquireLockCommand, boolean wait ) throws Exception
{
Future<Void> future = executeDontWait( acquireLockCommand );
if ( wait )
awaitFuture( future );
else
waitUntilWaiting();
return future;
}
public Future<Void> getReadLock( final ResourceObject resource, final boolean wait ) throws Exception
{
return perform( new AcquireLockCommand()
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "+R " + resource + ", wait:" + wait );
state.grabber.getReadLock( resource, state.tx );
state.done();
}
}, wait );
}
public Future<Void> getWriteLock( final ResourceObject resource, final boolean wait ) throws Exception
{
return perform( new AcquireLockCommand()
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "+W " + resource + ", wait:" + wait );
state.grabber.getWriteLock( resource, state.tx );
state.done();
}
}, wait );
}
public void releaseReadLock( final ResourceObject resource ) throws Exception
{
perform( new AcquireLockCommand()
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "-R " + resource );
state.grabber.releaseReadLock( resource, state.tx );
state.done();
}
}, true );
}
public void releaseWriteLock( final ResourceObject resource ) throws Exception
{
perform( new AcquireLockCommand()
{
@Override
protected void acquireLock( LockWorkerState state )
{
state.doing( "-W " + resource );
state.grabber.releaseWriteLock( resource, state.tx );
state.done();
}
}, true );
}
public boolean isLastGetLockDeadLock()
{
return state.deadlockOnLastWait;
}
@Override
public boolean visit( LineLogger logger )
{
boolean result = super.visit( logger );
logger.logLine( "What have I done up until now?" );
for ( String op : state.completedOperations )
logger.logLine( op );
logger.logLine( "Doing right now:" );
logger.logLine( state.doing );
return result;
}
public static ResourceObject newResourceObject( String name )
{
return new ResourceObject( name );
}
public static class ResourceObject
{
private final String name;
ResourceObject( String name )
{
this.name = name;
}
@Override
public String toString()
{
return this.name;
}
}
private abstract static class AcquireLockCommand implements WorkerCommand<LockWorkerState, Void>
{
@Override
public Void doWork( LockWorkerState state )
{
try
{
acquireLock( state );
state.deadlockOnLastWait = false;
}
catch ( DeadlockDetectedException e )
{
state.deadlockOnLastWait = true;
}
return null;
}
protected abstract void acquireLock( LockWorkerState state );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorker.java
|
357
|
public class LockWorkFailureDump
{
private final Class<?> testClass;
public LockWorkFailureDump( Class<?> testClass )
{
this.testClass = testClass;
}
public File dumpState( LockManager lm, LockWorker... workers )
{
LifeSupport life = new LifeSupport();
File file = forTest( testClass ).file( "failure-dump-" + currentTimeMillis() );
Logging logging = life.add( new SingleLoggingService( logger( file ) ) );
life.start();
try
{
// * locks held by the lock manager
lm.dumpAllLocks( logging );
// * rag manager state
lm.dumpRagStack( logging );
// * workers state
for ( LockWorker worker : workers )
{
// - what each is doing and have up to now
logging.getMessagesLog( getClass() ).logLongMessage( "Worker " + worker, worker );
}
return file;
}
finally
{
life.shutdown();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_LockWorkFailureDump.java
|
358
|
WRITE
{
@Override
public LockElement acquire( TransactionState state, Object resource )
{
return state.acquireWriteLock( resource );
}
@Override
public void release( LockManager lockManager, Object resource, Transaction tx )
{
lockManager.releaseWriteLock( resource, tx );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockType.java
|
359
|
READ
{
@Override
public LockElement acquire( TransactionState state, Object resource )
{
return state.acquireReadLock( resource );
}
@Override
public void release( LockManager lockManager, Object resource, Transaction tx )
{
lockManager.releaseReadLock( resource, tx );
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockType.java
|
360
|
public class LockNotFoundException extends LockException
{
public LockNotFoundException( String message )
{
super( message );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockNotFoundException.java
|
361
|
private static class ListAppendingVisitor implements Visitor<LockInfo, RuntimeException>
{
private final List<LockInfo> result = new ArrayList<>();
@Override
public boolean visit( LockInfo element )
{
result.add( element );
return false;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_LockManagerImpl.java
|
362
|
{
@Override
protected TxIdGenerator createTxIdGenerator()
{
return idGenerator;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_CommitContentionTests.java
|
363
|
{
@Override
public void run()
{
createNode();
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_CommitContentionTests.java
|
364
|
{
public boolean skip;
@Override
public long generate( XaDataSource dataSource, int identifier ) throws XAException
{
return dataSource.getLastCommittedTxId() + 1;
}
@Override
public void committed( XaDataSource dataSource, int identifier, long txId,
Integer externalAuthorServerId )
{
// skip signal and waiting for second transaction
if ( skip == true )
{
return;
}
skip = true;
signalFirstTransactionStartedPushing();
waitForSecondTransactionToFinish();
}
@Override
public int getCurrentMasterId()
{
return 42;
}
@Override
public int getMyId()
{
return 87;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_CommitContentionTests.java
|
365
|
public class StoreMigratorIT
{
@Test
public void shouldMigrate() throws IOException
{
// GIVEN
LegacyStore legacyStore = new LegacyStore( fs,
new File( getClass().getResource( "legacystore/exampledb/neostore" ).getFile() ) );
NeoStore neoStore = storeFactory.createNeoStore( storeFileName );
// WHEN
new StoreMigrator( monitor ).migrate( legacyStore, neoStore );
legacyStore.close();
// THEN
neoStore = storeFactory.newNeoStore( storeFileName );
verifyNeoStore( neoStore );
neoStore.close();
assertEquals( 100, monitor.events.size() );
assertTrue( monitor.started );
assertTrue( monitor.finished );
GraphDatabaseService database = new GraphDatabaseFactory().newEmbeddedDatabase( storeDir );
DatabaseContentVerifier verifier = new DatabaseContentVerifier( database );
verifier.verifyNodes();
verifier.verifyRelationships();
verifier.verifyNodeIdsReused();
verifier.verifyRelationshipIdsReused();
verifier.verifyLegacyIndex();
// CLEANUP
database.shutdown();
}
@Test
public void shouldDedupUniquePropertyIndexKeys() throws Exception
{
// GIVEN
// a store that contains two nodes with property "name" of which there are two key tokens
// that should be merged in the store migration
LegacyStore legacyStore = new LegacyStore( fs,
new File( getClass().getResource( "legacystore/propkeydupdb/neostore" ).getFile() ) );
NeoStore neoStore = storeFactory.createNeoStore( storeFileName );
// WHEN
new StoreMigrator( monitor ).migrate( legacyStore, neoStore );
legacyStore.close();
// THEN
// verify that the "name" property for both the involved nodes
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( storeDir );
Node nodeA = getNodeWithName( db, "A" );
assertThat( nodeA, inTx( db, hasProperty( "name" ).withValue( "A" ) ) );
Node nodeB = getNodeWithName( db, "B" );
assertThat( nodeB, inTx( db, hasProperty( "name" ).withValue( "B" ) ) );
Node nodeC = getNodeWithName( db, "C" );
assertThat( nodeC, inTx( db, hasProperty( "name" ).withValue( "C" ) ) );
assertThat( nodeC, inTx( db, hasProperty( "other" ).withValue( "a value" ) ) );
assertThat( nodeC, inTx( db, hasProperty( "third" ).withValue( "something" ) ) );
db.shutdown();
// THEN
// verify that there are no duplicate keys in the store
PropertyKeyTokenStore tokenStore =
storeFactory.newPropertyKeyTokenStore( new File( storeFileName + PROPERTY_KEY_TOKEN_STORE_NAME ) );
Token[] tokens = tokenStore.getTokens( MAX_VALUE );
tokenStore.close();
assertNuDuplicates( tokens );
}
private void assertNuDuplicates( Token[] tokens )
{
Set<String> visited = new HashSet<String>();
for ( Token token : tokens )
{
assertTrue( visited.add( token.name() ) );
}
}
private Node getNodeWithName( GraphDatabaseService db, String name )
{
Transaction tx = db.beginTx();
try
{
for ( Node node : GlobalGraphOperations.at( db ).getAllNodes() )
{
if ( name.equals( node.getProperty( "name", null ) ) )
{
tx.success();
return node;
}
}
}
finally
{
tx.finish();
}
throw new IllegalArgumentException( name + " not found" );
}
private final FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
private final String storeDir = TargetDirectory.forTest( getClass() ).makeGraphDbDir().getAbsolutePath();
private final ListAccumulatorMigrationProgressMonitor monitor = new ListAccumulatorMigrationProgressMonitor();
private StoreFactory storeFactory;
private File storeFileName;
@Before
public void setUp()
{
Config config = MigrationTestUtils.defaultConfig();
File outputDir = new File( storeDir );
storeFileName = new File( outputDir, NeoStore.DEFAULT_NAME );
storeFactory = new StoreFactory( config, new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fs, StringLogger.DEV_NULL, new DefaultTxHook() );
}
private void verifyNeoStore( NeoStore neoStore )
{
assertEquals( 1317392957120l, neoStore.getCreationTime() );
assertEquals( -472309512128245482l, neoStore.getRandomNumber() );
assertEquals( 1l, neoStore.getVersion() );
assertEquals( CommonAbstractStore.ALL_STORES_VERSION, NeoStore.versionLongToString( neoStore.getStoreVersion() ) );
assertEquals( 1004l, neoStore.getLastCommittedTx() );
}
private static class DatabaseContentVerifier
{
private final String longString = MigrationTestUtils.makeLongString();
private final int[] longArray = MigrationTestUtils.makeLongArray();
private final GraphDatabaseService database;
public DatabaseContentVerifier( GraphDatabaseService database )
{
this.database = database;
}
private void verifyRelationships()
{
Transaction tx = database.beginTx();
int traversalCount = 0;
for ( Relationship rel : GlobalGraphOperations.at( database ).getAllRelationships() )
{
traversalCount++;
verifyProperties( rel );
}
tx.success();
tx.finish();
assertEquals( 500, traversalCount );
}
private void verifyNodes()
{
int nodeCount = 0;
Transaction tx = database.beginTx();
for ( Node node : GlobalGraphOperations.at( database ).getAllNodes() )
{
nodeCount++;
if ( node.getId() > 0 )
{
verifyProperties( node );
}
}
tx.success();
tx.finish();
assertEquals( 501, nodeCount );
}
private void verifyProperties( PropertyContainer node )
{
assertEquals( Integer.MAX_VALUE, node.getProperty( PropertyType.INT.name() ) );
assertEquals( longString, node.getProperty( PropertyType.STRING.name() ) );
assertEquals( true, node.getProperty( PropertyType.BOOL.name() ) );
assertEquals( Double.MAX_VALUE, node.getProperty( PropertyType.DOUBLE.name() ) );
assertEquals( Float.MAX_VALUE, node.getProperty( PropertyType.FLOAT.name() ) );
assertEquals( Long.MAX_VALUE, node.getProperty( PropertyType.LONG.name() ) );
assertEquals( Byte.MAX_VALUE, node.getProperty( PropertyType.BYTE.name() ) );
assertEquals( Character.MAX_VALUE, node.getProperty( PropertyType.CHAR.name() ) );
assertArrayEquals( longArray, (int[]) node.getProperty( PropertyType.ARRAY.name() ) );
assertEquals( Short.MAX_VALUE, node.getProperty( PropertyType.SHORT.name() ) );
assertEquals( "short", node.getProperty( PropertyType.SHORT_STRING.name() ) );
}
private void verifyNodeIdsReused()
{
Transaction transaction = database.beginTx();
try
{
database.getNodeById( 1 );
fail( "Node 2 should not exist" );
}
catch ( NotFoundException e )
{
//expected
}
finally {
transaction.finish();
}
transaction = database.beginTx();
try
{
Node newNode = database.createNode();
assertEquals( 1, newNode.getId() );
transaction.success();
}
finally
{
transaction.finish();
}
}
private void verifyRelationshipIdsReused()
{
Transaction transaction = database.beginTx();
try
{
Node node1 = database.createNode();
Node node2 = database.createNode();
Relationship relationship1 = node1.createRelationshipTo( node2, withName( "REUSE" ) );
assertEquals( 0, relationship1.getId() );
transaction.success();
}
finally
{
transaction.finish();
}
}
public void verifyLegacyIndex()
{
try ( Transaction tx = database.beginTx() )
{
String[] nodeIndexes = database.index().nodeIndexNames();
String[] relationshipIndexes = database.index().relationshipIndexNames();
assertArrayEquals( new String[] { "nodekey" }, nodeIndexes );
assertArrayEquals( new String[] { "relkey" }, relationshipIndexes );
tx.success();
}
}
}
private class ListAccumulatorMigrationProgressMonitor implements MigrationProgressMonitor
{
private final List<Integer> events = new ArrayList<Integer>();
private boolean started = false;
private boolean finished = false;
@Override
public void started()
{
started = true;
}
@Override
public void percentComplete( int percent )
{
events.add( percent );
}
@Override
public void finished()
{
finished = true;
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreMigratorIT.java
|
366
|
public class StoreUpgraderInterruptionTestIT
{
@Test
public void shouldSucceedWithUpgradeAfterPreviousAttemptDiedDuringMigration() throws IOException
{
File workingDirectory = new File( "target/" + StoreUpgraderInterruptionTestIT.class.getSimpleName() );
MigrationTestUtils.prepareSampleLegacyDatabase( fileSystem, workingDirectory );
StoreMigrator failingStoreMigrator = new StoreMigrator( new SilentMigrationProgressMonitor() )
{
@Override
public void migrate( LegacyStore legacyStore, NeoStore neoStore ) throws IOException
{
super.migrate( legacyStore, neoStore );
throw new RuntimeException( "This upgrade is failing" );
}
};
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, LEGACY_VERSION ) );
try
{
newUpgrader( failingStoreMigrator, new DatabaseFiles( fileSystem ) ).attemptUpgrade(
new File( workingDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( RuntimeException e )
{
assertEquals( "This upgrade is failing", e.getMessage() );
}
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, LEGACY_VERSION ) );
newUpgrader( new StoreMigrator( new SilentMigrationProgressMonitor() ), new DatabaseFiles(fileSystem) )
.attemptUpgrade( new File( workingDirectory, NeoStore.DEFAULT_NAME ) );
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, ALL_STORES_VERSION ) );
}
private StoreUpgrader newUpgrader( StoreMigrator migrator, DatabaseFiles files )
{
return new StoreUpgrader( defaultConfig(), alwaysAllowed(), new UpgradableDatabase( new StoreVersionCheck( fileSystem ) ), migrator,
files, defaultIdGeneratorFactory(), defaultFileSystemAbstraction() );
}
@Test
public void shouldFailOnSecondAttemptIfPreviousAttemptMadeABackupToAvoidDamagingBackup() throws IOException
{
File workingDirectory = new File( "target/" + StoreUpgraderInterruptionTestIT.class.getSimpleName() );
MigrationTestUtils.prepareSampleLegacyDatabase( fileSystem, workingDirectory );
DatabaseFiles failsOnBackup = new DatabaseFiles( fileSystem )
{
@Override
public void moveToBackupDirectory( File workingDirectory, File backupDirectory )
{
fileSystem.mkdir( backupDirectory );
throw new RuntimeException( "Failing to backup working directory" );
}
};
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, LEGACY_VERSION ) );
try
{
newUpgrader( new StoreMigrator( new SilentMigrationProgressMonitor() ), failsOnBackup ).attemptUpgrade( new File( workingDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( RuntimeException e )
{
assertEquals( "Failing to backup working directory", e.getMessage() );
}
try
{
newUpgrader( new StoreMigrator( new SilentMigrationProgressMonitor() ) , new DatabaseFiles( fileSystem ) )
.attemptUpgrade( new File( workingDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( Exception e )
{
assertTrue( e.getMessage().startsWith( "Cannot proceed with upgrade because there is an existing upgrade backup in the way at " ) );
}
}
private final FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreUpgraderInterruptionTestIT.java
|
367
|
public static class UpgradingStoreVersionNotFoundException extends UnableToUpgradeException
{
private static final String MESSAGE =
"'%s' does not contain a store version, please ensure that the original database was shut down in a clean state.";
public UpgradingStoreVersionNotFoundException( String filenameWithoutStoreVersion )
{
super( String.format( MESSAGE, filenameWithoutStoreVersion ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreUpgrader.java
|
368
|
public static class UpgradeMissingStoreFilesException extends UnableToUpgradeException
{
private static final String MESSAGE = "Missing required store file '%s'.";
public UpgradeMissingStoreFilesException( String filenameExpectedToExist )
{
super( String.format( MESSAGE, filenameExpectedToExist ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreUpgrader.java
|
369
|
public static class UnexpectedUpgradingStoreVersionException extends UnableToUpgradeException
{
private static final String MESSAGE =
"'%s' has a store version number that we cannot upgrade from. Expected '%s' but file is version '%s'.";
public UnexpectedUpgradingStoreVersionException( String filename, String expectedVersion, String actualVersion )
{
super( String.format( MESSAGE, filename, expectedVersion, actualVersion ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreUpgrader.java
|
370
|
public static class UnableToUpgradeException extends RuntimeException
{
public UnableToUpgradeException( Exception cause )
{
super( cause );
}
public UnableToUpgradeException( String message )
{
super( message );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreUpgrader.java
|
371
|
public class StoreUpgrader
{
private final Config originalConfig;
private final UpgradeConfiguration upgradeConfiguration;
private final UpgradableDatabase upgradableDatabase;
private final StoreMigrator storeMigrator;
private final DatabaseFiles databaseFiles;
private final IdGeneratorFactory idGeneratorFactory;
private final FileSystemAbstraction fileSystem;
public StoreUpgrader( Config originalConfig, UpgradeConfiguration upgradeConfiguration,
UpgradableDatabase upgradableDatabase, StoreMigrator storeMigrator,
DatabaseFiles databaseFiles, IdGeneratorFactory idGeneratorFactory,
FileSystemAbstraction fileSystem )
{
this.idGeneratorFactory = idGeneratorFactory;
this.fileSystem = fileSystem;
this.originalConfig = originalConfig;
this.upgradeConfiguration = upgradeConfiguration;
this.upgradableDatabase = upgradableDatabase;
this.storeMigrator = storeMigrator;
this.databaseFiles = databaseFiles;
}
public void attemptUpgrade( File storageFileName )
{
upgradeConfiguration.checkConfigurationAllowsAutomaticUpgrade();
upgradableDatabase.checkUpgradeable( storageFileName );
File workingDirectory = storageFileName.getParentFile();
File upgradeDirectory = new File( workingDirectory, "upgrade" );
File backupDirectory = new File( workingDirectory, "upgrade_backup" );
migrateToIsolatedDirectory( storageFileName, upgradeDirectory );
databaseFiles.moveToBackupDirectory( workingDirectory, backupDirectory );
backupMessagesLogLeavingInPlaceForNewDatabaseMessages( workingDirectory, backupDirectory );
databaseFiles.moveToWorkingDirectory( upgradeDirectory, workingDirectory );
}
private void backupMessagesLogLeavingInPlaceForNewDatabaseMessages( File workingDirectory, File backupDirectory )
{
try
{
File originalLog = new File( workingDirectory, StringLogger.DEFAULT_NAME );
if ( fileSystem.fileExists( originalLog ))
{
fileSystem.copyFile( originalLog, new File( backupDirectory, StringLogger.DEFAULT_NAME ) );
}
}
catch ( IOException e )
{
throw new UnableToUpgradeException( e );
}
}
private void migrateToIsolatedDirectory( File storageFileName, File upgradeDirectory )
{
if (upgradeDirectory.exists()) {
try
{
fileSystem.deleteRecursively( upgradeDirectory );
}
catch ( IOException e )
{
throw new UnableToUpgradeException( e );
}
}
fileSystem.mkdir( upgradeDirectory );
File upgradeFileName = new File( upgradeDirectory, NeoStore.DEFAULT_NAME );
Map<String, String> upgradeConfig = new HashMap<String, String>( originalConfig.getParams() );
upgradeConfig.put( "neo_store", upgradeFileName.getPath() );
Config upgradeConfiguration = new Config( upgradeConfig );
NeoStore neoStore = new StoreFactory( upgradeConfiguration, idGeneratorFactory, new DefaultWindowPoolFactory(),
fileSystem, StringLogger.DEV_NULL, null ).createNeoStore( upgradeFileName );
try
{
storeMigrator.migrate( new LegacyStore( fileSystem, storageFileName ),
neoStore );
}
catch ( IOException e )
{
throw new UnableToUpgradeException( e );
}
catch ( Exception e )
{
throw Exceptions.launderedException( e );
}
finally
{
neoStore.close();
}
}
public static class UnableToUpgradeException extends RuntimeException
{
public UnableToUpgradeException( Exception cause )
{
super( cause );
}
public UnableToUpgradeException( String message )
{
super( message );
}
}
public static class UpgradeMissingStoreFilesException extends UnableToUpgradeException
{
private static final String MESSAGE = "Missing required store file '%s'.";
public UpgradeMissingStoreFilesException( String filenameExpectedToExist )
{
super( String.format( MESSAGE, filenameExpectedToExist ) );
}
}
public static class UpgradingStoreVersionNotFoundException extends UnableToUpgradeException
{
private static final String MESSAGE =
"'%s' does not contain a store version, please ensure that the original database was shut down in a clean state.";
public UpgradingStoreVersionNotFoundException( String filenameWithoutStoreVersion )
{
super( String.format( MESSAGE, filenameWithoutStoreVersion ) );
}
}
public static class UnexpectedUpgradingStoreVersionException extends UnableToUpgradeException
{
private static final String MESSAGE =
"'%s' has a store version number that we cannot upgrade from. Expected '%s' but file is version '%s'.";
public UnexpectedUpgradingStoreVersionException( String filename, String expectedVersion, String actualVersion )
{
super( String.format( MESSAGE, filename, expectedVersion, actualVersion ) );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreUpgrader.java
|
372
|
public class StoreUpgradeIntegrationTest
{
@Test
public void shouldUpgradeAutomaticallyOnDatabaseStartup() throws IOException
{
prepareSampleLegacyDatabase( fileSystem, workingDirectory );
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, LEGACY_VERSION ) );
HashMap<String, String> params = new HashMap<String, String>();
params.put( GraphDatabaseSettings.allow_store_upgrade.name(), "true" );
GraphDatabaseService database = new GraphDatabaseFactory()
.newEmbeddedDatabaseBuilder( workingDirectory.getPath() ).setConfig( params ).newGraphDatabase();
database.shutdown();
assertTrue( "Some store files did not have the correct version",
allStoreFilesHaveVersion( fileSystem, workingDirectory, ALL_STORES_VERSION ) );
}
@Test
public void shouldAbortOnNonCleanlyShutdown() throws Throwable
{
prepareSampleLegacyDatabase( fileSystem, workingDirectory );
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, LEGACY_VERSION ) );
StoreUpgraderTestIT.truncateAllFiles( fileSystem, workingDirectory );
// Now everything has lost the version info
Map<String, String> params = new HashMap<String, String>();
params.put( GraphDatabaseSettings.allow_store_upgrade.name(), "true" );
try
{
new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(
workingDirectory.getPath()).setConfig( params ).newGraphDatabase();
fail( "Should have been unable to start upgrade on old version" );
}
catch ( RuntimeException e )
{
assertThat( Exceptions.rootCause( e ), Matchers.instanceOf(
StoreUpgrader.UpgradingStoreVersionNotFoundException.class ) );
}
}
@Test
public void shouldAbortOnCorruptStore() throws IOException
{
prepareSampleLegacyDatabase( fileSystem, workingDirectory );
assertTrue( allStoreFilesHaveVersion( fileSystem, workingDirectory, LEGACY_VERSION ) );
truncateFile(fileSystem, new File( workingDirectory,
"neostore.propertystore.db.index.keys" ),
"StringPropertyStore " + LEGACY_VERSION );
Map<String, String> params = new HashMap<String, String>();
params.put( GraphDatabaseSettings.allow_store_upgrade.name(), "true" );
try
{
GraphDatabaseService database = new GraphDatabaseFactory()
.newEmbeddedDatabaseBuilder( workingDirectory.getPath() ).setConfig( params ).newGraphDatabase();
fail( "Should have been unable to start upgrade on old version" );
}
catch ( RuntimeException e )
{
assertThat( Exceptions.rootCause( e ), Matchers.instanceOf( UnableToUpgradeException.class ) );
}
}
private final FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
private final File workingDirectory = TargetDirectory.forTest( getClass() ).makeGraphDbDir();
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreUpgradeIntegrationTest.java
|
373
|
private class ListAccumulatorMigrationProgressMonitor implements MigrationProgressMonitor
{
private final List<Integer> events = new ArrayList<Integer>();
private boolean started = false;
private boolean finished = false;
@Override
public void started()
{
started = true;
}
@Override
public void percentComplete( int percent )
{
events.add( percent );
}
@Override
public void finished()
{
finished = true;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreMigratorIT.java
|
374
|
private static class DatabaseContentVerifier
{
private final String longString = MigrationTestUtils.makeLongString();
private final int[] longArray = MigrationTestUtils.makeLongArray();
private final GraphDatabaseService database;
public DatabaseContentVerifier( GraphDatabaseService database )
{
this.database = database;
}
private void verifyRelationships()
{
Transaction tx = database.beginTx();
int traversalCount = 0;
for ( Relationship rel : GlobalGraphOperations.at( database ).getAllRelationships() )
{
traversalCount++;
verifyProperties( rel );
}
tx.success();
tx.finish();
assertEquals( 500, traversalCount );
}
private void verifyNodes()
{
int nodeCount = 0;
Transaction tx = database.beginTx();
for ( Node node : GlobalGraphOperations.at( database ).getAllNodes() )
{
nodeCount++;
if ( node.getId() > 0 )
{
verifyProperties( node );
}
}
tx.success();
tx.finish();
assertEquals( 501, nodeCount );
}
private void verifyProperties( PropertyContainer node )
{
assertEquals( Integer.MAX_VALUE, node.getProperty( PropertyType.INT.name() ) );
assertEquals( longString, node.getProperty( PropertyType.STRING.name() ) );
assertEquals( true, node.getProperty( PropertyType.BOOL.name() ) );
assertEquals( Double.MAX_VALUE, node.getProperty( PropertyType.DOUBLE.name() ) );
assertEquals( Float.MAX_VALUE, node.getProperty( PropertyType.FLOAT.name() ) );
assertEquals( Long.MAX_VALUE, node.getProperty( PropertyType.LONG.name() ) );
assertEquals( Byte.MAX_VALUE, node.getProperty( PropertyType.BYTE.name() ) );
assertEquals( Character.MAX_VALUE, node.getProperty( PropertyType.CHAR.name() ) );
assertArrayEquals( longArray, (int[]) node.getProperty( PropertyType.ARRAY.name() ) );
assertEquals( Short.MAX_VALUE, node.getProperty( PropertyType.SHORT.name() ) );
assertEquals( "short", node.getProperty( PropertyType.SHORT_STRING.name() ) );
}
private void verifyNodeIdsReused()
{
Transaction transaction = database.beginTx();
try
{
database.getNodeById( 1 );
fail( "Node 2 should not exist" );
}
catch ( NotFoundException e )
{
//expected
}
finally {
transaction.finish();
}
transaction = database.beginTx();
try
{
Node newNode = database.createNode();
assertEquals( 1, newNode.getId() );
transaction.success();
}
finally
{
transaction.finish();
}
}
private void verifyRelationshipIdsReused()
{
Transaction transaction = database.beginTx();
try
{
Node node1 = database.createNode();
Node node2 = database.createNode();
Relationship relationship1 = node1.createRelationshipTo( node2, withName( "REUSE" ) );
assertEquals( 0, relationship1.getId() );
transaction.success();
}
finally
{
transaction.finish();
}
}
public void verifyLegacyIndex()
{
try ( Transaction tx = database.beginTx() )
{
String[] nodeIndexes = database.index().nodeIndexNames();
String[] relationshipIndexes = database.index().relationshipIndexNames();
assertArrayEquals( new String[] { "nodekey" }, nodeIndexes );
assertArrayEquals( new String[] { "relkey" }, relationshipIndexes );
tx.success();
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreMigratorIT.java
|
375
|
protected class Migration
{
private final LegacyStore legacyStore;
private final NeoStore neoStore;
private final long totalEntities;
private int percentComplete;
public Migration( LegacyStore legacyStore, NeoStore neoStore )
{
this.legacyStore = legacyStore;
this.neoStore = neoStore;
totalEntities = legacyStore.getNodeStoreReader().getMaxId();
}
private void migrate() throws IOException
{
// Migrate
migrateNeoStore( neoStore );
migrateNodes( neoStore.getNodeStore() );
migratePropertyIndexes( neoStore.getPropertyStore() );
// Close
neoStore.close();
legacyStore.close();
// Just copy unchanged stores that doesn't need migration
legacyStore.copyRelationshipStore( neoStore );
legacyStore.copyRelationshipTypeTokenStore( neoStore );
legacyStore.copyRelationshipTypeTokenNameStore( neoStore );
legacyStore.copyDynamicStringPropertyStore( neoStore );
legacyStore.copyDynamicArrayPropertyStore( neoStore );
legacyStore.copyLegacyIndexStoreFile( neoStore.getStorageFileName().getParentFile() );
}
private void migratePropertyIndexes( PropertyStore propertyStore ) throws IOException
{
Token[] tokens = legacyStore.getPropertyIndexReader().readTokens();
// dedup and write new property key token store (incl. names)
Map<Integer, Integer> propertyKeyTranslation =
dedupAndWritePropertyKeyTokenStore( propertyStore, tokens );
// read property store, replace property key ids
migratePropertyStore( propertyKeyTranslation, propertyStore );
}
private void migrateNeoStore( NeoStore neoStore ) throws IOException
{
legacyStore.copyNeoStore( neoStore );
neoStore.setStoreVersion( NeoStore.versionStringToLong( CommonAbstractStore.ALL_STORES_VERSION ) );
}
private Map<Integer, Integer> dedupAndWritePropertyKeyTokenStore( PropertyStore propertyStore,
Token[] tokens /*ordered ASC*/ )
{
PropertyKeyTokenStore keyTokenStore = propertyStore.getPropertyKeyTokenStore();
Map<Integer/*duplicate*/, Integer/*use this instead*/> translations = new HashMap<Integer, Integer>();
Map<String, Integer> createdTokens = new HashMap<String, Integer>();
for ( Token token : tokens )
{
Integer id = createdTokens.get( token.name() );
if ( id == null )
{ // Not a duplicate, add to store
id = (int) keyTokenStore.nextId();
PropertyKeyTokenRecord record = new PropertyKeyTokenRecord( id );
Collection<DynamicRecord> nameRecords =
keyTokenStore.allocateNameRecords( encode( token.name() ) );
record.setNameId( (int) first( nameRecords ).getId() );
record.addNameRecords( nameRecords );
record.setInUse( true );
record.setCreated();
keyTokenStore.updateRecord( record );
createdTokens.put( token.name(), id );
}
translations.put( token.id(), id );
}
return translations;
}
private void migratePropertyStore( Map<Integer, Integer> propertyKeyTranslation,
PropertyStore propertyStore ) throws IOException
{
long lastInUseId = -1;
for ( PropertyRecord propertyRecord : loop( legacyStore.getPropertyStoreReader().readPropertyStore() ) )
{
// Translate property keys
for ( PropertyBlock block : propertyRecord.getPropertyBlocks() )
{
int key = block.getKeyIndexId();
Integer translation = propertyKeyTranslation.get( key );
if ( translation != null )
{
block.setKeyIndexId( translation );
}
}
propertyStore.setHighId( propertyRecord.getId()+1 );
propertyStore.updateRecord( propertyRecord );
for ( long id = lastInUseId+1; id < propertyRecord.getId(); id++ )
{
propertyStore.freeId( id );
}
lastInUseId = propertyRecord.getId();
}
}
private void migrateNodes( NodeStore nodeStore ) throws IOException
{
for ( NodeRecord nodeRecord : loop( legacyStore.getNodeStoreReader().readNodeStore() ) )
{
reportProgress( nodeRecord.getId() );
nodeStore.setHighId( nodeRecord.getId() + 1 );
if ( nodeRecord.inUse() )
{
nodeStore.updateRecord( nodeRecord );
}
else
{
nodeStore.freeId( nodeRecord.getId() );
}
}
legacyStore.getNodeStoreReader().close();
}
private void reportProgress( long id )
{
int newPercent = (int) (id * 100 / totalEntities);
if ( newPercent > percentComplete )
{
percentComplete = newPercent;
progressMonitor.percentComplete( percentComplete );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreMigrator.java
|
376
|
public class CommitContentionTests
{
private static final TargetDirectory target = forTest( CommitContentionTests.class );
final Semaphore semaphore1 = new Semaphore( 1 );
final Semaphore semaphore2 = new Semaphore( 1 );
final AtomicReference<Exception> reference = new AtomicReference<>();
final TxIdGenerator txIdGenerator = new TxIdGenerator()
{
public boolean skip;
@Override
public long generate( XaDataSource dataSource, int identifier ) throws XAException
{
return dataSource.getLastCommittedTxId() + 1;
}
@Override
public void committed( XaDataSource dataSource, int identifier, long txId,
Integer externalAuthorServerId )
{
// skip signal and waiting for second transaction
if ( skip == true )
{
return;
}
skip = true;
signalFirstTransactionStartedPushing();
waitForSecondTransactionToFinish();
}
@Override
public int getCurrentMasterId()
{
return 42;
}
@Override
public int getMyId()
{
return 87;
}
};
@Rule
public TargetDirectory.TestDirectory storeLocation = target.testDirectory();
private GraphDatabaseService db;
@Before
public void before() throws Exception
{
semaphore1.acquire();
semaphore2.acquire();
db = createDb( txIdGenerator );
}
@After
public void after() throws Exception
{
db.shutdown();
}
@Test
public void shouldNotContendOnCommitWhenPushingUpdates() throws Exception
{
Thread thread = startFirstTransactionWhichBlocksDuringPushUntilSecondTransactionFinishes();
runAndFinishSecondTransaction();
thread.join();
assertNoFailures();
}
private void assertNoFailures()
{
Exception e = reference.get();
if ( e != null )
{
throw new AssertionError( e );
}
}
private void runAndFinishSecondTransaction()
{
createNode();
signalSecondTransactionFinished();
}
private void createNode()
{
try ( Transaction transaction = db.beginTx() )
{
db.createNode();
transaction.success();
}
}
private Thread startFirstTransactionWhichBlocksDuringPushUntilSecondTransactionFinishes() throws
InterruptedException
{
Thread thread = new Thread( new Runnable()
{
@Override
public void run()
{
createNode();
}
} );
thread.start();
waitForFirstTransactionToStartPushing();
return thread;
}
private GraphDatabaseService createDb( final TxIdGenerator idGenerator )
{
GraphDatabaseFactoryState state = new GraphDatabaseFactoryState();
state.setCacheProviders( asList( (CacheProvider) new NoCacheProvider() ) );
state.setTransactionInterceptorProviders( Arrays.<TransactionInterceptorProvider>asList() );
//noinspection deprecation
return new EmbeddedGraphDatabase( storeLocation.absolutePath(), stringMap( cache_type.name(),
NoCacheProvider.NAME ), state.databaseDependencies() )
{
@Override
protected TxIdGenerator createTxIdGenerator()
{
return idGenerator;
}
};
}
private void waitForFirstTransactionToStartPushing() throws InterruptedException
{
if ( !semaphore1.tryAcquire( 10, SECONDS ) )
{
throw new IllegalStateException( "First transaction never started pushing" );
}
}
private void signalFirstTransactionStartedPushing()
{
semaphore1.release();
}
private void signalSecondTransactionFinished()
{
semaphore2.release();
}
private void waitForSecondTransactionToFinish()
{
try
{
boolean acquired = semaphore2.tryAcquire( 10, SECONDS );
if ( !acquired )
{
reference.set( new IllegalStateException( "Second transaction never finished" ) );
}
}
catch ( InterruptedException e )
{
reference.set( e );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_transaction_CommitContentionTests.java
|
377
|
public class StoreMigrator
{
private final MigrationProgressMonitor progressMonitor;
public StoreMigrator( MigrationProgressMonitor progressMonitor )
{
this.progressMonitor = progressMonitor;
}
public void migrate( LegacyStore legacyStore, NeoStore neoStore ) throws IOException
{
progressMonitor.started();
new Migration( legacyStore, neoStore ).migrate();
progressMonitor.finished();
}
protected class Migration
{
private final LegacyStore legacyStore;
private final NeoStore neoStore;
private final long totalEntities;
private int percentComplete;
public Migration( LegacyStore legacyStore, NeoStore neoStore )
{
this.legacyStore = legacyStore;
this.neoStore = neoStore;
totalEntities = legacyStore.getNodeStoreReader().getMaxId();
}
private void migrate() throws IOException
{
// Migrate
migrateNeoStore( neoStore );
migrateNodes( neoStore.getNodeStore() );
migratePropertyIndexes( neoStore.getPropertyStore() );
// Close
neoStore.close();
legacyStore.close();
// Just copy unchanged stores that doesn't need migration
legacyStore.copyRelationshipStore( neoStore );
legacyStore.copyRelationshipTypeTokenStore( neoStore );
legacyStore.copyRelationshipTypeTokenNameStore( neoStore );
legacyStore.copyDynamicStringPropertyStore( neoStore );
legacyStore.copyDynamicArrayPropertyStore( neoStore );
legacyStore.copyLegacyIndexStoreFile( neoStore.getStorageFileName().getParentFile() );
}
private void migratePropertyIndexes( PropertyStore propertyStore ) throws IOException
{
Token[] tokens = legacyStore.getPropertyIndexReader().readTokens();
// dedup and write new property key token store (incl. names)
Map<Integer, Integer> propertyKeyTranslation =
dedupAndWritePropertyKeyTokenStore( propertyStore, tokens );
// read property store, replace property key ids
migratePropertyStore( propertyKeyTranslation, propertyStore );
}
private void migrateNeoStore( NeoStore neoStore ) throws IOException
{
legacyStore.copyNeoStore( neoStore );
neoStore.setStoreVersion( NeoStore.versionStringToLong( CommonAbstractStore.ALL_STORES_VERSION ) );
}
private Map<Integer, Integer> dedupAndWritePropertyKeyTokenStore( PropertyStore propertyStore,
Token[] tokens /*ordered ASC*/ )
{
PropertyKeyTokenStore keyTokenStore = propertyStore.getPropertyKeyTokenStore();
Map<Integer/*duplicate*/, Integer/*use this instead*/> translations = new HashMap<Integer, Integer>();
Map<String, Integer> createdTokens = new HashMap<String, Integer>();
for ( Token token : tokens )
{
Integer id = createdTokens.get( token.name() );
if ( id == null )
{ // Not a duplicate, add to store
id = (int) keyTokenStore.nextId();
PropertyKeyTokenRecord record = new PropertyKeyTokenRecord( id );
Collection<DynamicRecord> nameRecords =
keyTokenStore.allocateNameRecords( encode( token.name() ) );
record.setNameId( (int) first( nameRecords ).getId() );
record.addNameRecords( nameRecords );
record.setInUse( true );
record.setCreated();
keyTokenStore.updateRecord( record );
createdTokens.put( token.name(), id );
}
translations.put( token.id(), id );
}
return translations;
}
private void migratePropertyStore( Map<Integer, Integer> propertyKeyTranslation,
PropertyStore propertyStore ) throws IOException
{
long lastInUseId = -1;
for ( PropertyRecord propertyRecord : loop( legacyStore.getPropertyStoreReader().readPropertyStore() ) )
{
// Translate property keys
for ( PropertyBlock block : propertyRecord.getPropertyBlocks() )
{
int key = block.getKeyIndexId();
Integer translation = propertyKeyTranslation.get( key );
if ( translation != null )
{
block.setKeyIndexId( translation );
}
}
propertyStore.setHighId( propertyRecord.getId()+1 );
propertyStore.updateRecord( propertyRecord );
for ( long id = lastInUseId+1; id < propertyRecord.getId(); id++ )
{
propertyStore.freeId( id );
}
lastInUseId = propertyRecord.getId();
}
}
private void migrateNodes( NodeStore nodeStore ) throws IOException
{
for ( NodeRecord nodeRecord : loop( legacyStore.getNodeStoreReader().readNodeStore() ) )
{
reportProgress( nodeRecord.getId() );
nodeStore.setHighId( nodeRecord.getId() + 1 );
if ( nodeRecord.inUse() )
{
nodeStore.updateRecord( nodeRecord );
}
else
{
nodeStore.freeId( nodeRecord.getId() );
}
}
legacyStore.getNodeStoreReader().close();
}
private void reportProgress( long id )
{
int newPercent = (int) (id * 100 / totalEntities);
if ( newPercent > percentComplete )
{
percentComplete = newPercent;
progressMonitor.percentComplete( percentComplete );
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreMigrator.java
|
378
|
public class StoreMigrationTool
{
public static void main( String[] args ) throws IOException
{
String legacyStoreDirectory = args[0];
String targetStoreDirectory = args[1];
new StoreMigrationTool().run( legacyStoreDirectory, targetStoreDirectory, StringLogger.SYSTEM );
}
private void run( String legacyStoreDirectory, String targetStoreDirectory, StringLogger log ) throws IOException
{
LegacyStore legacyStore = new LegacyStore( new DefaultFileSystemAbstraction(),
new File( new File( legacyStoreDirectory ), NeoStore.DEFAULT_NAME ) );
Map<String, String> config = new HashMap<String, String>();
File targetStoreDirectoryFile = new File( targetStoreDirectory );
if ( targetStoreDirectoryFile.exists() )
{
throw new IllegalStateException( "Cannot migrate to a directory that already exists, " +
"please delete first and re-run" );
}
boolean success = targetStoreDirectoryFile.mkdirs();
if ( !success )
{
throw new IllegalStateException( "Failed to create directory" );
}
File targetStoreFile = new File( targetStoreDirectory, NeoStore.DEFAULT_NAME );
config.put( "neo_store", targetStoreFile.getPath() );
FileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
NeoStore neoStore = new StoreFactory( new Config( config, GraphDatabaseSettings.class ),
new DefaultIdGeneratorFactory(),
new DefaultWindowPoolFactory(), fileSystem, log, null ).createNeoStore( targetStoreFile );
long startTime = System.currentTimeMillis();
new StoreMigrator( new VisibleMigrationProgressMonitor( log, System.out ) ).migrate( legacyStore, neoStore );
long duration = System.currentTimeMillis() - startTime;
System.out.printf( "Migration completed in %d s%n", duration / 1000 );
neoStore.close();
GraphDatabaseService database =
new GraphDatabaseFactory().newEmbeddedDatabase( targetStoreDirectoryFile.getPath() );
database.shutdown();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreMigrationTool.java
|
379
|
{
@Override
public boolean accept( StoreFile item )
{
return item.existsInBoth;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_StoreFile.java
|
380
|
static class AlwaysAllowedUpgradeConfiguration implements UpgradeConfiguration
{
@Override
public void checkConfigurationAllowsAutomaticUpgrade()
{
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_MigrationTestUtils.java
|
381
|
public class MigrationTestUtils
{
public static Config defaultConfig()
{
return defaultConfig( MapUtil.stringMap() );
}
public static Config defaultConfig( Map<String, String> inputParams )
{
return new Config( inputParams, GraphDatabaseSettings.class );
}
public static int[] makeLongArray()
{
int[] longArray = new int[100];
for ( int i = 0; i < 100; i++ )
{
longArray[i] = i;
}
return longArray;
}
static String makeLongString()
{
StringBuilder builder = new StringBuilder();
for ( int i = 0; i < 100; i++ )
{
builder.append( "characters" );
}
return builder.toString();
}
static void changeVersionNumber( FileSystemAbstraction fileSystem, File storeFile, String versionString )
throws IOException
{
byte[] versionBytes = UTF8.encode( versionString );
StoreChannel fileChannel = fileSystem.open( storeFile, "rw" );
fileChannel.position( fileSystem.getFileSize( storeFile ) - versionBytes.length );
fileChannel.write( ByteBuffer.wrap( versionBytes ) );
fileChannel.close();
}
static void truncateFile( FileSystemAbstraction fileSystem, File storeFile,
String suffixToDetermineTruncationLength ) throws IOException
{
byte[] versionBytes = UTF8.encode( suffixToDetermineTruncationLength );
StoreChannel fileChannel = fileSystem.open( storeFile, "rw" );
fileChannel.truncate( fileSystem.getFileSize( storeFile ) - versionBytes.length );
fileChannel.close();
}
static void truncateToFixedLength( FileSystemAbstraction fileSystem, File storeFile, int newLength )
throws IOException
{
StoreChannel fileChannel = fileSystem.open( storeFile, "rw" );
fileChannel.truncate( newLength );
fileChannel.close();
}
public static void prepareSampleLegacyDatabase( EphemeralFileSystemAbstraction workingFs,
File workingDirectory ) throws IOException
{
File resourceDirectory = findOldFormatStoreDirectory();
workingFs.copyRecursivelyFromOtherFs( resourceDirectory, new DefaultFileSystemAbstraction(), workingDirectory );
}
public static void prepareSampleLegacyDatabase( FileSystemAbstraction workingFs, File workingDirectory ) throws IOException
{
File resourceDirectory = findOldFormatStoreDirectory();
workingFs.deleteRecursively( workingDirectory );
workingFs.mkdirs( workingDirectory );
// TODO only works with DefaultFileSystemAbstraction
FileUtils.copyRecursively( resourceDirectory, workingDirectory );
}
public static File findOldFormatStoreDirectory()
{
return findDatabaseDirectory( LegacyStore.class, "exampledb" );
}
public static File findDatabaseDirectory( Class<?> resourceName, String directoryName )
{
URL legacyStoreResource = resourceName.getResource( directoryName + "/neostore" );
File storeFile = new File( legacyStoreResource.getFile() );
if ( ! storeFile.exists() )
{
throw new RuntimeException( format( "Cannot find %s", storeFile ) );
}
File parentFile = storeFile.getParentFile();
if ( parentFile == null )
{
throw new RuntimeException( format( "No parent for %s", storeFile ) );
}
if ( ! parentFile.exists() )
{
throw new RuntimeException( format( "Cannot find %s", parentFile ) );
}
return parentFile;
}
public static boolean allStoreFilesHaveVersion( FileSystemAbstraction fileSystem, File workingDirectory,
String version ) throws IOException
{
for ( StoreFile storeFile : StoreFile.legacyStoreFiles() )
{
StoreChannel channel = fileSystem.open( new File( workingDirectory, storeFile.storeFileName() ), "r" );
int length = UTF8.encode( version ).length;
byte[] bytes = new byte[length];
ByteBuffer buffer = ByteBuffer.wrap( bytes );
channel.position( channel.size() - length );
channel.read( buffer );
channel.close();
String foundVersion = UTF8.decode( bytes );
if ( !version.equals( foundVersion ) )
{
return false;
}
}
return true;
}
public static boolean containsAnyLogicalLogs( FileSystemAbstraction fileSystem, File directory )
{
boolean containsLogicalLog = false;
for ( File workingFile : fileSystem.listFiles( directory ) )
{
if ( workingFile.getName().contains( "nioneo_logical" ))
{
containsLogicalLog = true;
}
}
return containsLogicalLog;
}
public static boolean containsAnyStoreFiles( FileSystemAbstraction fileSystem, File directory )
{
for ( StoreFile file : StoreFile.values() )
{
if ( fileSystem.fileExists( new File( directory, file.storeFileName() ) ) )
{
return true;
}
}
return false;
}
public static void verifyFilesHaveSameContent( FileSystemAbstraction fileSystem, File original,
File other ) throws IOException
{
for ( File originalFile : fileSystem.listFiles( original ) )
{
File otherFile = new File( other, originalFile.getName() );
if ( !fileSystem.isDirectory( originalFile ) )
{
StoreChannel originalChannel = fileSystem.open( originalFile, "r" );
StoreChannel otherChannel = fileSystem.open( otherFile, "r" );
try
{
ByteBuffer buffer = ByteBuffer.allocate( 1 );
while( true )
{
if ( !readAndFlip( originalChannel, buffer, 1 ) )
break;
int originalByte = buffer.get();
if ( !readAndFlip( otherChannel, buffer, 1 ) )
fail( "Files have different sizes" );
assertEquals( "Different content in " + originalFile.getName(), originalByte, buffer.get() );
}
}
finally
{
originalChannel.close();
otherChannel.close();
}
}
}
}
static class AlwaysAllowedUpgradeConfiguration implements UpgradeConfiguration
{
@Override
public void checkConfigurationAllowsAutomaticUpgrade()
{
}
}
public static UpgradeConfiguration alwaysAllowed()
{
return new AlwaysAllowedUpgradeConfiguration();
}
public static File isolatedMigrationDirectoryOf( File dbDirectory )
{
return new File( dbDirectory, "upgrade" );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_MigrationTestUtils.java
|
382
|
private static final class LogicalLogFilenameFilter implements
FilenameFilter
{
private static final String[] logFilenamePatterns = { "active_tx_log",
"nioneo_logical\\.log.*", /* covers current log, active log marker
and backups */
"tm_tx_log\\..*" };
@Override
public boolean accept( File dir, String name )
{
for ( String pattern : logFilenamePatterns )
{
if ( name.matches( pattern ) )
{
return true;
}
}
return false;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_LogFiles.java
|
383
|
public class LogFiles
{
private static final class LogicalLogFilenameFilter implements
FilenameFilter
{
private static final String[] logFilenamePatterns = { "active_tx_log",
"nioneo_logical\\.log.*", /* covers current log, active log marker
and backups */
"tm_tx_log\\..*" };
@Override
public boolean accept( File dir, String name )
{
for ( String pattern : logFilenamePatterns )
{
if ( name.matches( pattern ) )
{
return true;
}
}
return false;
}
}
/**
* Moves all logical logs of a database from one directory
* to another. Since it just renames files (the standard way of moving with
* JDK6) from and to must be on the same disk partition.
* @param fs
*
* @param filename The base filename for the logical logs
* @param fromDirectory The directory that hosts the database and its logs
* @param toDirectory The directory to move the log files to
* @throws IOException If any of the move operations fail for any reason.
*/
public static void move( FileSystemAbstraction fs, File fromDirectory,
File toDirectory ) throws IOException
{
assert fs.isDirectory( fromDirectory );
assert fs.isDirectory( toDirectory );
FilenameFilter filter = new LogicalLogFilenameFilter();
for ( File logFile : fs.listFiles( fromDirectory ) )
{
if ( filter.accept( fromDirectory, logFile.getName() ) )
StoreFile.moveFile( fs, logFile.getName(), fromDirectory, toDirectory );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_LogFiles.java
|
384
|
public class DatabaseFiles
{
private final FileSystemAbstraction fs;
public DatabaseFiles( FileSystemAbstraction fs )
{
this.fs = fs;
}
public void moveToBackupDirectory( File workingDirectory, File backupDirectory )
{
if ( fs.fileExists( backupDirectory ) )
{
throw new StoreUpgrader.UnableToUpgradeException( String.format( "Cannot proceed with upgrade " +
"because there is an existing upgrade backup in the way at %s from a previous upgrade attempt. " +
"If you do not need this backup please delete it or move it out of the way before re-attempting upgrade.",
backupDirectory.getAbsolutePath() ) );
}
fs.mkdir( backupDirectory );
move( workingDirectory, backupDirectory, StoreFile.legacyStoreFiles() );
}
public void moveToWorkingDirectory( File upgradeDirectory, File workingDirectory )
{
move( upgradeDirectory, workingDirectory, StoreFile.currentStoreFiles() );
}
private void move( File fromDirectory, File toDirectory, Iterable<StoreFile> storeFiles )
{
try
{
StoreFile.move( fs, fromDirectory, toDirectory, storeFiles );
LogFiles.move( fs, fromDirectory, toDirectory );
}
catch ( IOException e )
{
throw new StoreUpgrader.UnableToUpgradeException( e );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_DatabaseFiles.java
|
385
|
public class CurrentDatabaseTest
{
@Test
public void shouldRejectStoreWhereOneFileHasTheWrongVersion() throws Exception
{
File workingDirectory = new File( "target/" + CurrentDatabaseTest.class.getSimpleName() );
StoreVersionCheck storeVersionCheck = mock( StoreVersionCheck.class );
when( storeVersionCheck.hasVersion( eq( new File( workingDirectory, "neostore.nodestore.db" ) ), anyString() ) )
.thenReturn( Pair.<Outcome,String>of( Outcome.missingStoreFile, null ) );
when( storeVersionCheck.hasVersion( not( eq( new File( workingDirectory, "neostore.nodestore.db" ) ) ),
anyString() ) )
.thenReturn( Pair.<Outcome,String>of( Outcome.ok, null ) );
assertFalse( new CurrentDatabase( storeVersionCheck ).storeFilesAtCurrentVersion( workingDirectory ) );
}
@Test
public void shouldAcceptStoreWhenAllFilesHaveTheCorrectVersion()
{
File workingDirectory = new File( "target/" + CurrentDatabaseTest.class.getSimpleName() );
StoreVersionCheck storeVersionCheck = mock( StoreVersionCheck.class );
when( storeVersionCheck.hasVersion( any( File.class ), anyString() ) ).thenReturn(
Pair.<Outcome,String>of( Outcome.ok, null ) );
assertTrue( new CurrentDatabase( storeVersionCheck ).storeFilesAtCurrentVersion( workingDirectory ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_CurrentDatabaseTest.java
|
386
|
{
@Override
public void migrate( LegacyStore legacyStore, NeoStore neoStore ) throws IOException
{
super.migrate( legacyStore, neoStore );
throw new RuntimeException( "This upgrade is failing" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreUpgraderInterruptionTestIT.java
|
387
|
{
@Override
public void moveToBackupDirectory( File workingDirectory, File backupDirectory )
{
fileSystem.mkdir( backupDirectory );
throw new RuntimeException( "Failing to backup working directory" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreUpgraderInterruptionTestIT.java
|
388
|
public class StoreUpgraderTestIT
{
@Test
public void shouldUpgradeAnOldFormatStore() throws IOException
{
assertTrue( allStoreFilesHaveVersion( fileSystem, dbDirectory, LegacyStore.LEGACY_VERSION ) );
newUpgrader( alwaysAllowed(), new StoreMigrator( new SilentMigrationProgressMonitor() ),
new DatabaseFiles( fileSystem ) ).attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
assertTrue( allStoreFilesHaveVersion( fileSystem, dbDirectory, ALL_STORES_VERSION ) );
assertFalse( containsAnyLogicalLogs( fileSystem, dbDirectory ) );
assertFalse( containsAnyStoreFiles( fileSystem, isolatedMigrationDirectoryOf( dbDirectory ) ) );
}
@Test
public void shouldLeaveACopyOfOriginalStoreFilesInBackupDirectory() throws IOException
{
newUpgrader( alwaysAllowed(), new StoreMigrator( new SilentMigrationProgressMonitor() ), new DatabaseFiles( fileSystem ) )
.attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
File backupDirectory = new File( dbDirectory, "upgrade_backup" );
verifyFilesHaveSameContent( fileSystem, MigrationTestUtils.findOldFormatStoreDirectory(), backupDirectory );
assertTrue( containsAnyLogicalLogs( fileSystem, backupDirectory ) );
}
@Test
public void shouldBackupOriginalStoreEvenIfMessagesLogIsMissing() throws IOException
{
// given
fileSystem.deleteFile( new File( dbDirectory, StringLogger.DEFAULT_NAME ) );
// when
newUpgrader( alwaysAllowed(), new StoreMigrator( new SilentMigrationProgressMonitor() ), new DatabaseFiles( fileSystem ) )
.attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
// then
File backupDirectory = new File( dbDirectory, "upgrade_backup" );
assertFalse( fileSystem.fileExists( new File( dbDirectory, StringLogger.DEFAULT_NAME ) ) );
assertFalse( fileSystem.fileExists( new File( backupDirectory, StringLogger.DEFAULT_NAME ) ) );
}
@Test
public void shouldHaltUpgradeIfUpgradeConfigurationVetoesTheProcess() throws IOException
{
UpgradeConfiguration vetoingUpgradeConfiguration = new UpgradeConfiguration()
{
@Override
public void checkConfigurationAllowsAutomaticUpgrade()
{
throw new UpgradeNotAllowedByConfigurationException( "vetoed" );
}
};
try
{
newUpgrader( vetoingUpgradeConfiguration, new StoreMigrator( new SilentMigrationProgressMonitor() ),
new DatabaseFiles( fileSystem ) ).attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( UpgradeNotAllowedByConfigurationException e )
{
// expected
}
}
@Test
public void shouldLeaveAllFilesUntouchedIfWrongVersionNumberFound() throws IOException
{
File comparisonDirectory = new File( "target/" + StoreUpgraderTestIT.class.getSimpleName()
+ "shouldLeaveAllFilesUntouchedIfWrongVersionNumberFound-comparison" );
changeVersionNumber( fileSystem, new File( dbDirectory, "neostore.nodestore.db" ), "v0.9.5" );
fileSystem.deleteRecursively( comparisonDirectory );
fileSystem.copyRecursively( dbDirectory, comparisonDirectory );
try
{
newUpgrader( alwaysAllowed(), new StoreMigrator( new SilentMigrationProgressMonitor() ),
new DatabaseFiles( fileSystem ) ).attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( StoreUpgrader.UnexpectedUpgradingStoreVersionException e )
{
// expected
}
verifyFilesHaveSameContent( fileSystem, comparisonDirectory, dbDirectory );
}
@Test
public void shouldRefuseToUpgradeIfAnyOfTheStoresWeNotShutDownCleanly() throws IOException
{
File comparisonDirectory = new File( "target/" + StoreUpgraderTestIT.class.getSimpleName()
+ "shouldRefuseToUpgradeIfAnyOfTheStoresWeNotShutDownCleanly-comparison" );
truncateFile( fileSystem, new File( dbDirectory, "neostore.propertystore.db.index.keys" ),
"StringPropertyStore v0.9.9" );
fileSystem.deleteRecursively( comparisonDirectory );
fileSystem.copyRecursively( dbDirectory, comparisonDirectory );
try
{
newUpgrader( alwaysAllowed(), new StoreMigrator( new SilentMigrationProgressMonitor() ),
new DatabaseFiles( fileSystem ) ).attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( StoreUpgrader.UpgradingStoreVersionNotFoundException e )
{
// expected
}
verifyFilesHaveSameContent( fileSystem, comparisonDirectory, dbDirectory );
}
@Test
public void shouldRefuseToUpgradeIfAllOfTheStoresWereNotShutDownCleanly() throws IOException
{
File comparisonDirectory = new File( "target/" + StoreUpgraderTestIT.class.getSimpleName()
+ "shouldRefuseToUpgradeIfAllOfTheStoresWeNotShutDownCleanly-comparison" );
truncateAllFiles( fileSystem, dbDirectory );
fileSystem.deleteRecursively( comparisonDirectory );
fileSystem.copyRecursively( dbDirectory, comparisonDirectory );
try
{
newUpgrader( alwaysAllowed(), new StoreMigrator( new SilentMigrationProgressMonitor() ),
new DatabaseFiles( fileSystem ) ).attemptUpgrade( new File( dbDirectory, NeoStore.DEFAULT_NAME ) );
fail( "Should throw exception" );
}
catch ( StoreUpgrader.UpgradingStoreVersionNotFoundException e )
{
// expected
}
verifyFilesHaveSameContent( fileSystem, comparisonDirectory, dbDirectory );
}
public static void truncateAllFiles( FileSystemAbstraction fileSystem, File workingDirectory ) throws IOException
{
for ( StoreFile storeFile : StoreFile.legacyStoreFiles() )
{
truncateFile( fileSystem, new File( workingDirectory, storeFile.storeFileName() ), storeFile.legacyVersion() );
}
}
@Rule public EphemeralFileSystemRule fs = new EphemeralFileSystemRule();
private final File dbDirectory = new File( "dir" );
private EphemeralFileSystemAbstraction fileSystem;
private StoreUpgrader newUpgrader( UpgradeConfiguration config, StoreMigrator migrator, DatabaseFiles files )
{
return new StoreUpgrader( defaultConfig(), config, new UpgradableDatabase( new StoreVersionCheck( fs.get() ) ), migrator,
files, new DefaultIdGeneratorFactory(), fs.get() );
}
@Before
public void before() throws Exception
{
fileSystem = fs.get();
prepareSampleLegacyDatabase( fileSystem, dbDirectory );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreUpgraderTestIT.java
|
389
|
{
@Override
public void checkConfigurationAllowsAutomaticUpgrade()
{
throw new UpgradeNotAllowedByConfigurationException( "vetoed" );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_StoreUpgraderTestIT.java
|
390
|
public abstract class BaseSpringTransactionImpl
{
/*
* The GD API reference below is used exclusively for accessing
* the TransactionManager. It is on purpose _not_ replaced with a
* reference to that however. In HA settings the reference passed is
* to a HAGD which when restarted has its TM changed. If we kept a
* reference to the TM it would be valid until the next internal
* restart. In contrast, this way always looks up the "real"
* reference and keeps the Spring integration working even when
* HA master switches happen.
*/
private final GraphDatabaseAPI neo4j;
public BaseSpringTransactionImpl( GraphDatabaseAPI neo4j )
{
this.neo4j = neo4j;
}
private TransactionManager getTxManager()
{
return neo4j.getDependencyResolver().resolveDependency( TransactionManager.class );
}
public void begin() throws NotSupportedException, SystemException
{
getTxManager().begin();
}
public void commit() throws RollbackException, HeuristicMixedException,
HeuristicRollbackException, SecurityException, IllegalStateException,
SystemException
{
getTransaction().commit();
}
public int getStatus() throws SystemException
{
return getTxManager().getStatus();
}
public Transaction getTransaction() throws SystemException
{
return getTxManager().getTransaction();
}
public void resume( Transaction tx ) throws InvalidTransactionException,
IllegalStateException, SystemException
{
getTxManager().resume(tx);
}
public void rollback() throws IllegalStateException, SecurityException,
SystemException
{
getTransaction().rollback();
}
public void setRollbackOnly() throws IllegalStateException, SystemException
{
getTransaction().setRollbackOnly();
}
public void setTransactionTimeout( int sec ) throws SystemException
{
getTxManager().setTransactionTimeout(sec);
}
public Transaction suspend() throws SystemException
{
return getTxManager().suspend();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_BaseSpringTransactionImpl.java
|
391
|
public abstract class AbstractTransactionManager implements TransactionManager, Lifecycle
{
public abstract void doRecovery() throws Throwable;
/**
* Returns the {@link TransactionState} associated with the current transaction.
* If no transaction is active for the current thread {@link TransactionState#NO_STATE}
* should be returned.
*
* @return state associated with the current transaction for this thread.
*/
public abstract TransactionState getTransactionState();
public abstract int getEventIdentifier();
public void begin( ForceMode forceMode ) throws NotSupportedException, SystemException
{
begin();
}
/**
* @return which {@link ForceMode} the transaction tied to the calling
* thread will have when committing. Default is {@link ForceMode#forced}
*/
public ForceMode getForceMode()
{
return ForceMode.forced;
}
/**
* @return the error that happened during recovery, if recovery has taken place, null otherwise.
*/
public Throwable getRecoveryError()
{
return null;
}
public void assertInTransaction()
{
try
{
if ( getTransaction() == null )
{
throw new NotInTransactionException();
}
}
catch ( SystemException e )
{
throw new IllegalStateException( "Unable to determine transaction state", e );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_transaction_AbstractTransactionManager.java
|
392
|
public class VisibleMigrationProgressMonitor implements MigrationProgressMonitor
{
private final StringLogger logger;
private final PrintStream out;
public VisibleMigrationProgressMonitor( StringLogger logger, PrintStream out )
{
this.logger = logger;
this.out = out;
}
@Override
public void started()
{
String message = "Starting upgrade of database store files";
out.println( message );
logger.logMessage( message, true );
}
@Override
public void percentComplete( int percent )
{
out.print( "." );
out.flush();
if (percent % 10 == 0)
{
logger.logMessage( format( "Store upgrade %d%% complete", percent ), true );
}
}
@Override
public void finished()
{
String message = "Finished upgrade of database store files";
out.println();
out.println( message );
logger.logMessage( message, true );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_monitoring_VisibleMigrationProgressMonitor.java
|
393
|
public class SilentMigrationProgressMonitor implements MigrationProgressMonitor
{
public void started()
{
}
public void percentComplete( int percent )
{
}
public void finished()
{
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_monitoring_SilentMigrationProgressMonitor.java
|
394
|
public class IndexFormatCompatibilityTest
{
@Rule
public TargetDirectory.TestDirectory storeDir = TargetDirectory.testDirForTest( getClass() );
private GraphDatabaseService db;
@Before
public void startDatabase() throws IOException
{
String file = getClass().getResource( "neostore" ).getFile();
FileUtils.copyRecursively( new File( file ).getParentFile(), storeDir.directory() );
db = new GraphDatabaseFactory().newEmbeddedDatabase( storeDir.directory().getPath() );
}
@After
public void shutdownDatabase()
{
db.shutdown();
}
@Test
public void shouldFindCorrectNodesUsingIndexedPropertyLookup() throws Exception
{
try ( Transaction tx = db.beginTx() )
{
assertEquals( IteratorUtil.<Integer>asSet(),
externalIds( db.findNodesByLabelAndProperty( label( "Person" ), "age", 0 ) ) );
assertEquals( asSet( 0 ),
externalIds( db.findNodesByLabelAndProperty( label( "Person" ), "age", 1 ) ) );
assertEquals( asSet( 1, 4 ),
externalIds( db.findNodesByLabelAndProperty( label( "Person" ), "age", 2 ) ) );
assertEquals( asSet( 2, 5, 7 ),
externalIds( db.findNodesByLabelAndProperty( label( "Person" ), "age", 3 ) ) );
assertEquals( asSet( 3, 6, 8, 9 ),
externalIds( db.findNodesByLabelAndProperty( label( "Person" ), "age", 4 ) ) );
tx.success();
}
}
@Test
public void shouldFindCorrectNodesUsingUniquePropertyLookup() throws Exception
{
try ( Transaction tx = db.beginTx() )
{
assertEquals( 1, age(single( db.findNodesByLabelAndProperty( label( "Person" ), "externalId", 0 ) ) ) );
assertEquals( 2, age(single( db.findNodesByLabelAndProperty( label( "Person" ), "externalId", 1 ) ) ) );
assertEquals( 3, age(single( db.findNodesByLabelAndProperty( label( "Person" ), "externalId", 2 ) ) ) );
assertEquals( 4, age(single( db.findNodesByLabelAndProperty( label( "Person" ), "externalId", 3 ) ) ) );
assertEquals( 2, age(single( db.findNodesByLabelAndProperty( label( "Person" ), "externalId", 4 ) ) ) );
assertTrue( asList( db.findNodesByLabelAndProperty( label( "Person" ), "externalId", 10 ) ).isEmpty() );
tx.success();
}
}
private Set<Integer> externalIds( Iterable<Node> nodes )
{
HashSet<Integer> externalIds = new HashSet<>();
for ( Node node : nodes )
{
externalIds.add( ((Number) node.getProperty( "externalId" )).intValue() );
}
return externalIds;
}
private int age( Node node )
{
return ((Number) node.getProperty( "age" )).intValue();
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_kernel_impl_storemigration_legacystore_indexcompat_IndexFormatCompatibilityTest.java
|
395
|
public class ReadRecordsTestIT
{
@Test
public void shouldReadNodeRecords() throws IOException
{
URL nodeStoreFile = getClass().getResource( "exampledb/neostore.nodestore.db" );
LegacyNodeStoreReader nodeStoreReader = new LegacyNodeStoreReader( fs, new File( nodeStoreFile.getFile() ) );
assertEquals( 1002, nodeStoreReader.getMaxId() );
Iterator<NodeRecord> records = nodeStoreReader.readNodeStore();
int nodeCount = 0;
for ( NodeRecord record : loop( records ) )
{
if ( record.inUse() )
nodeCount++;
}
assertEquals( 501, nodeCount );
nodeStoreReader.close();
}
private final FileSystemAbstraction fs = new DefaultFileSystemAbstraction();
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_legacystore_ReadRecordsTestIT.java
|
396
|
public class LegacyStoreTest
{
@Test
public void shouldFailIfEncodedVersionLengthDiffers() throws Exception
{
try
{
// WHEN
assertLegacyAndCurrentVersionHaveSameLength( "111", ALL_STORES_VERSION );
fail( "Should have thrown exception" );
}
catch ( IllegalStateException e )
{
// THEN
assertThat( e.getMessage(), containsString( "remain the same between versions" ) );
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_storemigration_legacystore_LegacyStoreTest.java
|
397
|
public class LegacyStore implements Closeable
{
public static final String LEGACY_VERSION = "v0.A.0";
private final File storageFileName;
private final Collection<Closeable> allStoreReaders = new ArrayList<Closeable>();
private LegacyNodeStoreReader nodeStoreReader;
private LegacyPropertyIndexStoreReader propertyIndexReader;
private LegacyPropertyStoreReader propertyStoreReader;
private final FileSystemAbstraction fs;
public LegacyStore( FileSystemAbstraction fs, File storageFileName ) throws IOException
{
this.fs = fs;
this.storageFileName = storageFileName;
assertLegacyAndCurrentVersionHaveSameLength( LEGACY_VERSION, CommonAbstractStore.ALL_STORES_VERSION );
initStorage();
}
/**
* Store files that don't need migration are just copied and have their trailing versions replaced
* by the current version. For this to work the legacy version and the current version must have the
* same encoded length.
*/
static void assertLegacyAndCurrentVersionHaveSameLength( String legacyVersion, String currentVersion )
{
if ( UTF8.encode( legacyVersion ).length != UTF8.encode( currentVersion ).length )
{
throw new IllegalStateException( "Encoded version string length must remain the same between versions" );
}
}
protected void initStorage() throws IOException
{
allStoreReaders.add( nodeStoreReader = new LegacyNodeStoreReader( fs, new File( getStorageFileName().getPath() + StoreFactory.NODE_STORE_NAME ) ) );
allStoreReaders.add( propertyIndexReader = new LegacyPropertyIndexStoreReader( fs, new File( getStorageFileName().getPath() + StoreFactory.PROPERTY_KEY_TOKEN_STORE_NAME ) ) );
allStoreReaders.add( propertyStoreReader = new LegacyPropertyStoreReader( fs, new File( getStorageFileName().getPath() + StoreFactory.PROPERTY_STORE_NAME ) ) );
}
public File getStorageFileName()
{
return storageFileName;
}
public static long getUnsignedInt(ByteBuffer buf)
{
return buf.getInt()&0xFFFFFFFFL;
}
protected static long longFromIntAndMod( long base, long modifier )
{
return modifier == 0 && base == IdGeneratorImpl.INTEGER_MINUS_ONE ? -1 : base|modifier;
}
@Override
public void close() throws IOException
{
for ( Closeable storeReader : allStoreReaders )
{
storeReader.close();
}
}
private void copyStore( File targetBaseStorageFileName, String storeNamePart, String versionTrailer )
throws IOException
{
File targetStoreFileName = new File( targetBaseStorageFileName.getPath() + storeNamePart );
fs.copyFile( new File( storageFileName + storeNamePart ), targetStoreFileName );
setStoreVersionTrailer( targetStoreFileName, versionTrailer );
fs.copyFile(
new File( storageFileName + storeNamePart + ".id" ),
new File( targetBaseStorageFileName + storeNamePart + ".id" ) );
}
private void setStoreVersionTrailer( File targetStoreFileName, String versionTrailer ) throws IOException
{
try ( StoreChannel fileChannel = fs.open( targetStoreFileName, "rw" ) )
{
byte[] trailer = UTF8.encode( versionTrailer );
fileChannel.position( fileChannel.size() - trailer.length );
fileChannel.write( ByteBuffer.wrap( trailer ) );
}
}
public void copyNeoStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), "", neoStore.getTypeAndVersionDescriptor() );
}
public void copyRelationshipStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.RELATIONSHIP_STORE_NAME,
buildTypeDescriptorAndVersion( RelationshipStore.TYPE_DESCRIPTOR ) );
}
public void copyRelationshipTypeTokenStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.RELATIONSHIP_TYPE_TOKEN_STORE_NAME,
buildTypeDescriptorAndVersion( RelationshipTypeTokenStore.TYPE_DESCRIPTOR ) );
}
public void copyRelationshipTypeTokenNameStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.RELATIONSHIP_TYPE_TOKEN_NAMES_STORE_NAME,
buildTypeDescriptorAndVersion( DynamicStringStore.TYPE_DESCRIPTOR ) );
}
public void copyPropertyStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.PROPERTY_STORE_NAME,
buildTypeDescriptorAndVersion( PropertyStore.TYPE_DESCRIPTOR ) );
}
public void copyPropertyKeyTokenStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.PROPERTY_KEY_TOKEN_STORE_NAME,
buildTypeDescriptorAndVersion( PropertyKeyTokenStore.TYPE_DESCRIPTOR ) );
}
public void copyPropertyKeyTokenNameStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.PROPERTY_KEY_TOKEN_NAMES_STORE_NAME,
buildTypeDescriptorAndVersion( DynamicStringStore.TYPE_DESCRIPTOR ) );
}
public void copyDynamicStringPropertyStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.PROPERTY_STRINGS_STORE_NAME,
buildTypeDescriptorAndVersion( DynamicStringStore.TYPE_DESCRIPTOR ) );
}
public void copyDynamicArrayPropertyStore( NeoStore neoStore ) throws IOException
{
copyStore( neoStore.getStorageFileName(), StoreFactory.PROPERTY_ARRAYS_STORE_NAME,
buildTypeDescriptorAndVersion( DynamicArrayStore.TYPE_DESCRIPTOR ) );
}
public LegacyNodeStoreReader getNodeStoreReader()
{
return nodeStoreReader;
}
public LegacyPropertyIndexStoreReader getPropertyIndexReader()
{
return propertyIndexReader;
}
public LegacyPropertyStoreReader getPropertyStoreReader()
{
return propertyStoreReader;
}
static void readIntoBuffer( StoreChannel fileChannel, ByteBuffer buffer, int nrOfBytes )
{
buffer.clear();
buffer.limit( nrOfBytes );
try
{
fileChannel.read( buffer );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
buffer.flip();
}
public void copyLegacyIndexStoreFile( File toDirectory ) throws IOException
{
File legacyDirectory = storageFileName.getParentFile();
File fromFile = new File( legacyDirectory, IndexStore.INDEX_DB_FILE_NAME );
if ( fromFile.exists() )
{
File toFile = new File( toDirectory, IndexStore.INDEX_DB_FILE_NAME );
fs.copyFile( fromFile, toFile );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_legacystore_LegacyStore.java
|
398
|
{
private long id = -1;
ByteBuffer buffer = allocateDirect( RECORD_SIZE );
@Override
protected PropertyRecord fetchNextOrNull()
{
while ( ++id <= maxId )
{
readIntoBuffer( fileChannel, buffer, RECORD_SIZE );
PropertyRecord record = readPropertyRecord( id, buffer );
if ( record.inUse() )
{
return record;
}
}
return null;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_legacystore_LegacyPropertyStoreReader.java
|
399
|
public class LegacyPropertyStoreReader implements Closeable
{
public static final String FROM_VERSION = "PropertyStore " + LegacyStore.LEGACY_VERSION;
public static final int RECORD_SIZE =
1/*next and prev high bits*/ + 4/*next*/ + 4/*prev*/ + 32 /*property blocks*/; // = 41
private final StoreChannel fileChannel;
private final long maxId;
public LegacyPropertyStoreReader( FileSystemAbstraction fs, File file ) throws IOException
{
fileChannel = fs.open( file, "r" );
int endHeaderSize = UTF8.encode( FROM_VERSION ).length;
maxId = (fileChannel.size() - endHeaderSize) / RECORD_SIZE;
}
public Iterator<PropertyRecord> readPropertyStore() throws IOException
{
return new PrefetchingIterator<PropertyRecord>()
{
private long id = -1;
ByteBuffer buffer = allocateDirect( RECORD_SIZE );
@Override
protected PropertyRecord fetchNextOrNull()
{
while ( ++id <= maxId )
{
readIntoBuffer( fileChannel, buffer, RECORD_SIZE );
PropertyRecord record = readPropertyRecord( id, buffer );
if ( record.inUse() )
{
return record;
}
}
return null;
}
};
}
protected PropertyRecord readPropertyRecord( long id, ByteBuffer buffer )
{
PropertyRecord record = new PropertyRecord( id );
/*
* [pppp,nnnn] previous, next high bits
*/
byte modifiers = buffer.get();
long prevMod = ( ( modifiers & 0xF0L ) << 28 );
long nextMod = ( ( modifiers & 0x0FL ) << 32 );
long prevProp = getUnsignedInt( buffer );
long nextProp = getUnsignedInt( buffer );
record.setPrevProp( longFromIntAndMod( prevProp, prevMod ) );
record.setNextProp( longFromIntAndMod( nextProp, nextMod ) );
while ( buffer.hasRemaining() )
{
PropertyBlock newBlock = getPropertyBlock( buffer );
if ( newBlock != null )
{
record.addPropertyBlock( newBlock );
record.setInUse( true );
}
else
{
// We assume that storage is defragged
break;
}
}
return record;
}
private PropertyBlock getPropertyBlock( ByteBuffer buffer )
{
long header = buffer.getLong();
PropertyType type = PropertyType.getPropertyType( header, true );
if ( type == null )
{
return null;
}
PropertyBlock toReturn = new PropertyBlock();
// toReturn.setInUse( true );
int numBlocks = type.calculateNumberOfBlocksUsed( header );
long[] blockData = new long[numBlocks];
blockData[0] = header; // we already have that
for ( int i = 1; i < numBlocks; i++ )
{
blockData[i] = buffer.getLong();
}
toReturn.setValueBlocks( blockData );
return toReturn;
}
@Override
public void close() throws IOException
{
fileChannel.close();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_storemigration_legacystore_LegacyPropertyStoreReader.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.