Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
4,800
|
{
@Override
public Response<LockResult> lock( Master master, RequestContext context, long... ids )
{
return master.acquireNodeReadLock( context, ids );
}
}, LOCK_SERIALIZER )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HaRequestType201.java
|
4,801
|
{
@Override
public Response<LockResult> lock( Master master, RequestContext context, long... ids )
{
return master.acquireRelationshipWriteLock( context, ids );
}
}, LOCK_SERIALIZER )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HaRequestType201.java
|
4,802
|
public class IndexPopulationFailedKernelException extends KernelException
{
private static final String FORMAT_MESSAGE = "Failed to populate index for %s [labelId: %d, propertyKeyId %d]";
public IndexPopulationFailedKernelException( IndexDescriptor descriptor, String indexUserDescription,
Throwable cause )
{
super( Status.Schema.IndexCreationFailure, cause, FORMAT_MESSAGE, indexUserDescription,
descriptor.getLabelId(), descriptor.getPropertyKeyId() );
}
public IndexPopulationFailedKernelException( IndexDescriptor descriptor, String indexUserDescription,
String message )
{
super( Status.Schema.IndexCreationFailure, FORMAT_MESSAGE + ", due to " + message,
indexUserDescription, descriptor.getLabelId(), descriptor.getPropertyKeyId() );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_exceptions_index_IndexPopulationFailedKernelException.java
|
4,803
|
public class SlaveTxHook implements RemoteTxHook
{
private final Master master;
private final HaXaDataSourceManager xaDsm;
private final StringLogger log;
private final RequestContextFactory contextFactory;
private final Set<Integer> seen = Collections.newSetFromMap(new ConcurrentHashMap<Integer, Boolean>());
public SlaveTxHook( Master master, HaXaDataSourceManager xaDsm,
TxHookModeSwitcher.RequestContextFactoryResolver contextFactory, StringLogger log )
{
this.master = master;
this.xaDsm = xaDsm;
this.log = log;
this.contextFactory = contextFactory.get();
}
@Override
public void remotelyInitializeTransaction( int eventIdentifier, TransactionState state )
{
if(!state.isRemotelyInitialized())
{
// Mark first, to ensure we never create more than one transaction on the master (exception below could
// cause retries).
state.markAsRemotelyInitialized();
Response<Void> response = master.initializeTx( contextFactory.newRequestContext( eventIdentifier ) );
xaDsm.applyTransactions( response );
}
}
@Override
public void remotelyFinishTransaction( int eventIdentifier, boolean success )
{
Response<Void> response = master.finishTransaction(
contextFactory.newRequestContext( eventIdentifier ), success );
xaDsm.applyTransactions( response );
}
@Override
public boolean freeIdsDuringRollback()
{
return false;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_SlaveTxHook.java
|
4,804
|
private static class CompletionNotifier
{
private boolean notified;
synchronized void completed()
{
notified = true;
notifyAll();
}
synchronized void waitForAnyCompletion()
{
if ( !notified )
{
notified = false;
try
{
wait( 2000 /*wait timeout just for safety*/ );
}
catch ( InterruptedException e )
{
Thread.interrupted();
// Hmm, ok we got interrupted. No biggy I'd guess
}
}
else
{
notified = false;
}
}
@Override
public String toString()
{
return "CompletionNotifier{id=" + System.identityHashCode( this ) +
",notified=" + notified +
'}';
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,805
|
{
@Override
public Void call()
{
try
{
pusher.queuePush( dataSource, slave, txId );
return null;
}
finally
{
notifier.completed();
}
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,806
|
{
@Override
public boolean accept( Slave item )
{
return item.getServerId() != externalAuthorServerId.intValue();
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,807
|
{
@Override
protected void triggered( Throwable failure )
{
log.error( "Slave commit threw " + (failure instanceof ComException ? "communication" : "" )
+ " exception", failure );
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,808
|
{
@Override
public int getTxPushFactor()
{
return config.get( HaSettings.tx_push_factor );
}
@Override
public int getServerId()
{
return config.get( ClusterSettings.server_id );
}
@Override
public SlavePriority getReplicationStrategy()
{
return slavePriority;
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,809
|
{
@Override
public int getTxPushFactor()
{
return config.get( HaSettings.tx_push_factor );
}
@Override
public int getServerId()
{
return config.get( ClusterSettings.server_id );
}
@Override
public SlavePriority getReplicationStrategy()
{
switch ( config.get( HaSettings.tx_push_strategy ) )
{
case fixed:
return SlavePriorities.fixed();
case round_robin:
return SlavePriorities.roundRobin();
default:
throw new RuntimeException( "Unknown replication strategy " );
}
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,810
|
public class MasterTxIdGenerator implements TxIdGenerator, Lifecycle
{
public interface Configuration
{
int getTxPushFactor();
int getServerId();
SlavePriority getReplicationStrategy();
}
public static Configuration from( final Config config )
{
return new Configuration()
{
@Override
public int getTxPushFactor()
{
return config.get( HaSettings.tx_push_factor );
}
@Override
public int getServerId()
{
return config.get( ClusterSettings.server_id );
}
@Override
public SlavePriority getReplicationStrategy()
{
switch ( config.get( HaSettings.tx_push_strategy ) )
{
case fixed:
return SlavePriorities.fixed();
case round_robin:
return SlavePriorities.roundRobin();
default:
throw new RuntimeException( "Unknown replication strategy " );
}
}
};
}
public static Configuration from( final Config config, final SlavePriority slavePriority )
{
return new Configuration()
{
@Override
public int getTxPushFactor()
{
return config.get( HaSettings.tx_push_factor );
}
@Override
public int getServerId()
{
return config.get( ClusterSettings.server_id );
}
@Override
public SlavePriority getReplicationStrategy()
{
return slavePriority;
}
};
}
private int desiredReplicationFactor;
private SlavePriority replicationStrategy;
private ExecutorService slaveCommitters;
private final StringLogger log;
private final Configuration config;
private final Slaves slaves;
private final CommitPusher pusher;
private final CappedOperation<Throwable> slaveCommitFailureLogger = new CappedOperation<Throwable>(
CappedOperation.time( 5, TimeUnit.SECONDS ),
CappedOperation.differentItemClasses() )
{
@Override
protected void triggered( Throwable failure )
{
log.error( "Slave commit threw " + (failure instanceof ComException ? "communication" : "" )
+ " exception", failure );
}
};
public MasterTxIdGenerator( Configuration config, StringLogger log, Slaves slaves, CommitPusher pusher )
{
this.config = config;
this.log = log;
this.slaves = slaves;
this.pusher = pusher;
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
this.slaveCommitters = Executors.newCachedThreadPool( new NamedThreadFactory( "slave-committer" ) );
desiredReplicationFactor = config.getTxPushFactor();
replicationStrategy = config.getReplicationStrategy();
}
@Override
public void stop() throws Throwable
{
this.slaveCommitters.shutdown();
}
@Override
public void shutdown() throws Throwable
{
}
@Override
public long generate( final XaDataSource dataSource, final int identifier ) throws XAException
{
return TxIdGenerator.DEFAULT.generate( dataSource, identifier );
}
@Override
public void committed( XaDataSource dataSource, int identifier, long txId, Integer externalAuthorServerId )
{
int replicationFactor = desiredReplicationFactor;
if ( externalAuthorServerId != null )
{
replicationFactor--;
}
if ( replicationFactor == 0 )
{
return;
}
Collection<Future<Void>> committers = new HashSet<>();
try
{
// TODO: Move this logic into {@link CommitPusher}
// Commit at the configured amount of slaves in parallel.
int successfulReplications = 0;
Iterator<Slave> slaveList = filter( replicationStrategy.prioritize( slaves.getSlaves() ).iterator(),
externalAuthorServerId );
CompletionNotifier notifier = new CompletionNotifier();
// Start as many initial committers as needed
for ( int i = 0; i < replicationFactor && slaveList.hasNext(); i++ )
{
committers.add( slaveCommitters.submit( slaveCommitter( dataSource, slaveList.next(),
txId, notifier ) ) );
}
// Wait for them and perhaps spawn new ones for failing committers until we're done
// or until we have no more slaves to try out.
Collection<Future<Void>> toAdd = new ArrayList<>();
Collection<Future<Void>> toRemove = new ArrayList<>();
while ( !committers.isEmpty() && successfulReplications < replicationFactor )
{
toAdd.clear();
toRemove.clear();
for ( Future<Void> committer : committers )
{
if ( !committer.isDone() )
{
continue;
}
if ( isSuccessful( committer ) )
// This committer was successful, increment counter
{
successfulReplications++;
}
else if ( slaveList.hasNext() )
// This committer failed, spawn another one
{
toAdd.add( slaveCommitters.submit( slaveCommitter( dataSource, slaveList.next(),
txId, notifier ) ) );
}
toRemove.add( committer );
}
// Incorporate the results into committers collection
if ( !toAdd.isEmpty() )
{
committers.addAll( toAdd );
}
if ( !toRemove.isEmpty() )
{
committers.removeAll( toRemove );
}
if ( !committers.isEmpty() )
// There are committers doing work right now, so go and wait for
// any of the committers to be done so that we can reevaluate
// the situation again.
{
notifier.waitForAnyCompletion();
}
}
// We did the best we could, have we committed successfully on enough slaves?
if ( !(successfulReplications >= replicationFactor) )
{
log.logMessage( "Transaction " + txId + " for " + dataSource.getName()
+ " couldn't commit on enough slaves, desired " + replicationFactor
+ ", but could only commit at " + successfulReplications );
}
}
catch ( Throwable t )
{
log.logMessage( "Unknown error commit master transaction at slave", t );
}
finally
{
// Cancel all ongoing committers in the executor
for ( Future<Void> committer : committers )
{
committer.cancel( false );
}
}
}
private Iterator<Slave> filter( Iterator<Slave> slaves, final Integer externalAuthorServerId )
{
return externalAuthorServerId == null ? slaves : new FilteringIterator<Slave>( slaves, new Predicate<Slave>()
{
@Override
public boolean accept( Slave item )
{
return item.getServerId() != externalAuthorServerId.intValue();
}
} );
}
private boolean isSuccessful( Future<Void> committer )
{
try
{
committer.get();
return true;
}
catch ( InterruptedException e )
{
return false;
}
catch ( ExecutionException e )
{
slaveCommitFailureLogger.event( e.getCause() );
return false;
}
catch ( CancellationException e )
{
return false;
}
}
/**
* A version of wait/notify which can handle that a notify comes before the
* call to wait, in which case the call to wait will return immediately.
*
* @author Mattias Persson
*/
private static class CompletionNotifier
{
private boolean notified;
synchronized void completed()
{
notified = true;
notifyAll();
}
synchronized void waitForAnyCompletion()
{
if ( !notified )
{
notified = false;
try
{
wait( 2000 /*wait timeout just for safety*/ );
}
catch ( InterruptedException e )
{
Thread.interrupted();
// Hmm, ok we got interrupted. No biggy I'd guess
}
}
else
{
notified = false;
}
}
@Override
public String toString()
{
return "CompletionNotifier{id=" + System.identityHashCode( this ) +
",notified=" + notified +
'}';
}
}
private Callable<Void> slaveCommitter( final XaDataSource dataSource,
final Slave slave, final long txId, final CompletionNotifier notifier )
{
return new Callable<Void>()
{
@Override
public Void call()
{
try
{
pusher.queuePush( dataSource, slave, txId );
return null;
}
finally
{
notifier.completed();
}
}
};
}
public int getCurrentMasterId()
{
return config.getServerId();
}
@Override
public int getMyId()
{
return config.getServerId();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxIdGenerator.java
|
4,811
|
public class MasterTxHook extends DefaultTxHook
{
@Override
public boolean freeIdsDuringRollback()
{
return false;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_MasterTxHook.java
|
4,812
|
{
@Override
public Object call() throws Exception
{
return null;
}
});
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_CommitPusher.java
|
4,813
|
private static class PullUpdateFuture
extends FutureTask<Object>
{
private Slave slave;
private long txId;
public PullUpdateFuture( Slave slave, long txId )
{
super( new Callable<Object>()
{
@Override
public Object call() throws Exception
{
return null;
}
});
this.slave = slave;
this.txId = txId;
}
@Override
public void done()
{
super.set( null );
super.done();
}
@Override
public void setException( Throwable t )
{
super.setException( t );
}
public Slave getSlave()
{
return slave;
}
private long getTxId()
{
return txId;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_CommitPusher.java
|
4,814
|
{
List<PullUpdateFuture> currentPulls = new ArrayList<>();
@Override
public void run()
{
try
{
while (true)
{
// Poll queue and call pullUpdate
currentPulls.clear();
currentPulls.add( finalQueue.take() );
finalQueue.drainTo( currentPulls );
try
{
PullUpdateFuture pullUpdateFuture = currentPulls.get( 0 );
Response<Void> response = pullUpdateFuture.getSlave().pullUpdates( dataSource.getName(), pullUpdateFuture.getTxId() );
response.close();
// Notify the futures
for ( PullUpdateFuture currentPull : currentPulls )
{
currentPull.done();
}
}
catch ( Exception e )
{
// Notify the futures
for ( PullUpdateFuture currentPull : currentPulls )
{
currentPull.setException( e );
}
}
}
}
catch ( InterruptedException e )
{
// Quit
}
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_CommitPusher.java
|
4,815
|
public class CommitPusher extends LifecycleAdapter
{
private static class PullUpdateFuture
extends FutureTask<Object>
{
private Slave slave;
private long txId;
public PullUpdateFuture( Slave slave, long txId )
{
super( new Callable<Object>()
{
@Override
public Object call() throws Exception
{
return null;
}
});
this.slave = slave;
this.txId = txId;
}
@Override
public void done()
{
super.set( null );
super.done();
}
@Override
public void setException( Throwable t )
{
super.setException( t );
}
public Slave getSlave()
{
return slave;
}
private long getTxId()
{
return txId;
}
}
private final Map<Integer, BlockingQueue<PullUpdateFuture>> pullUpdateQueues = new HashMap<>( );
private final JobScheduler scheduler;
public CommitPusher( JobScheduler scheduler )
{
this.scheduler = scheduler;
}
public void queuePush( final XaDataSource dataSource, Slave slave, final long txId )
{
PullUpdateFuture pullRequest = new PullUpdateFuture(slave, txId);
BlockingQueue<PullUpdateFuture> queue = pullUpdateQueues.get( slave.getServerId() );
// Create a new queue if needed
queue = queue == null ? createNewQueue( dataSource, slave ) : queue;
// Add our request to the queue
while( !queue.offer( pullRequest ) )
{
Thread.yield();
}
try
{
// Wait for request to finish
pullRequest.get();
}
catch ( InterruptedException e )
{
Thread.interrupted(); // Clear interrupt flag
throw new RuntimeException( e );
}
catch ( ExecutionException e )
{
if (e.getCause() instanceof RuntimeException)
throw ((RuntimeException)e.getCause());
else
throw new RuntimeException( e.getCause() );
}
}
private synchronized BlockingQueue<PullUpdateFuture> createNewQueue( final XaDataSource dataSource, Slave slave )
{
BlockingQueue<PullUpdateFuture> queue = pullUpdateQueues.get( slave.getServerId() );
if (queue == null)
{
// Create queue and worker
queue = new ArrayBlockingQueue<>( 100 );
pullUpdateQueues.put( slave.getServerId(), queue );
final BlockingQueue<PullUpdateFuture> finalQueue = queue;
scheduler.schedule( masterTransactionPushing, new Runnable()
{
List<PullUpdateFuture> currentPulls = new ArrayList<>();
@Override
public void run()
{
try
{
while (true)
{
// Poll queue and call pullUpdate
currentPulls.clear();
currentPulls.add( finalQueue.take() );
finalQueue.drainTo( currentPulls );
try
{
PullUpdateFuture pullUpdateFuture = currentPulls.get( 0 );
Response<Void> response = pullUpdateFuture.getSlave().pullUpdates( dataSource.getName(), pullUpdateFuture.getTxId() );
response.close();
// Notify the futures
for ( PullUpdateFuture currentPull : currentPulls )
{
currentPull.done();
}
}
catch ( Exception e )
{
// Notify the futures
for ( PullUpdateFuture currentPull : currentPulls )
{
currentPull.setException( e );
}
}
}
}
catch ( InterruptedException e )
{
// Quit
}
}
} );
}
return queue;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_CommitPusher.java
|
4,816
|
public class Pullupdates extends NonTransactionProvidingApp
{
@Override
protected Continuation exec( AppCommandParser parser, Session session, Output out )
throws ShellException, RemoteException
{
try
{
getServer().getDb().getDependencyResolver().resolveDependency( UpdatePuller.class ).pullUpdates();
}
catch ( IllegalArgumentException e )
{
throw new ShellException( "Couldn't pull updates. Not a highly available database?" );
}
return Continuation.INPUT_COMPLETE;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_shell_Pullupdates.java
|
4,817
|
public class HighlyAvailableKernelData extends KernelData implements Lifecycle
{
private final HighlyAvailableGraphDatabase db;
private final ClusterMembers memberInfo;
private final ClusterDatabaseInfoProvider memberInfoProvider;
public HighlyAvailableKernelData( HighlyAvailableGraphDatabase db, ClusterMembers memberInfo,
ClusterDatabaseInfoProvider databaseInfo )
{
super( db.getConfig() );
this.db = db;
this.memberInfo = memberInfo;
this.memberInfoProvider = databaseInfo;
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
}
@Override
public void stop() throws Throwable
{
}
@Override
public void shutdown()
{
super.shutdown();
}
@Override
public Version version()
{
return Version.getKernel();
}
@Override
public GraphDatabaseAPI graphDatabase()
{
return db;
}
public ClusterMemberInfo[] getClusterInfo()
{
List<ClusterMemberInfo> clusterMemberInfos = new ArrayList<ClusterMemberInfo>( );
for ( ClusterMember clusterMember : memberInfo.getMembers() )
{
ClusterMemberInfo clusterMemberInfo = new ClusterMemberInfo( clusterMember.getMemberId().toString(),
clusterMember.getHAUri() != null, clusterMember.isAlive(), clusterMember.getHARole(),
toArray( String.class, map( Functions.TO_STRING, clusterMember.getRoleURIs() ) ),
toArray( String.class, map( Functions.TO_STRING, clusterMember.getRoles() ) ) );
clusterMemberInfos.add( clusterMemberInfo );
}
return clusterMemberInfos.toArray( new ClusterMemberInfo[clusterMemberInfos.size()] );
}
public ClusterDatabaseInfo getMemberInfo()
{
return memberInfoProvider.getInfo();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_HighlyAvailableKernelData.java
|
4,818
|
private static class HighAvailabilityImpl extends Neo4jMBean implements HighAvailability
{
private final HighlyAvailableKernelData kernelData;
HighAvailabilityImpl( ManagementData management )
throws NotCompliantMBeanException
{
super( management );
this.kernelData = (HighlyAvailableKernelData) management.getKernelData();
}
HighAvailabilityImpl( ManagementData management, boolean isMXBean )
{
super( management, isMXBean );
this.kernelData = (HighlyAvailableKernelData) management.getKernelData();
}
@Override
public String getInstanceId()
{
return kernelData.getMemberInfo().getInstanceId();
}
@Override
public ClusterMemberInfo[] getInstancesInCluster()
{
return kernelData.getClusterInfo();
}
@Override
public String getRole()
{
return kernelData.getMemberInfo().getHaRole();
}
@Override
public boolean isAvailable()
{
return kernelData.getMemberInfo().isAvailable();
}
@Override
public boolean isAlive()
{
return kernelData.getMemberInfo().isAlive();
}
@Override
public String getLastUpdateTime()
{
long lastUpdateTime = kernelData.getMemberInfo().getLastUpdateTime();
return lastUpdateTime == 0 ? "N/A" : Format.date( lastUpdateTime );
}
@Override
public long getLastCommittedTxId()
{
return kernelData.getMemberInfo().getLastCommittedTxId();
}
@Override
public String update()
{
long time = System.currentTimeMillis();
try
{
kernelData.graphDatabase().getDependencyResolver().resolveDependency(
UpdatePuller.class ).pullUpdates();
}
catch ( Exception e )
{
return "Update failed: " + e;
}
time = System.currentTimeMillis() - time;
return "Update completed in " + time + "ms";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_HighAvailabilityBean.java
|
4,819
|
@Service.Implementation(ManagementBeanProvider.class)
public final class HighAvailabilityBean extends ManagementBeanProvider
{
public HighAvailabilityBean()
{
super( HighAvailability.class );
}
@Override
protected Neo4jMBean createMXBean( ManagementData management ) throws NotCompliantMBeanException
{
if ( !isHA( management ) )
{
return null;
}
return new HighAvailabilityImpl( management, true );
}
@Override
protected Neo4jMBean createMBean( ManagementData management ) throws NotCompliantMBeanException
{
if ( !isHA( management ) )
{
return null;
}
return new HighAvailabilityImpl( management );
}
private static boolean isHA( ManagementData management )
{
return management.getKernelData().graphDatabase() instanceof HighlyAvailableGraphDatabase;
}
private static class HighAvailabilityImpl extends Neo4jMBean implements HighAvailability
{
private final HighlyAvailableKernelData kernelData;
HighAvailabilityImpl( ManagementData management )
throws NotCompliantMBeanException
{
super( management );
this.kernelData = (HighlyAvailableKernelData) management.getKernelData();
}
HighAvailabilityImpl( ManagementData management, boolean isMXBean )
{
super( management, isMXBean );
this.kernelData = (HighlyAvailableKernelData) management.getKernelData();
}
@Override
public String getInstanceId()
{
return kernelData.getMemberInfo().getInstanceId();
}
@Override
public ClusterMemberInfo[] getInstancesInCluster()
{
return kernelData.getClusterInfo();
}
@Override
public String getRole()
{
return kernelData.getMemberInfo().getHaRole();
}
@Override
public boolean isAvailable()
{
return kernelData.getMemberInfo().isAvailable();
}
@Override
public boolean isAlive()
{
return kernelData.getMemberInfo().isAlive();
}
@Override
public String getLastUpdateTime()
{
long lastUpdateTime = kernelData.getMemberInfo().getLastUpdateTime();
return lastUpdateTime == 0 ? "N/A" : Format.date( lastUpdateTime );
}
@Override
public long getLastCommittedTxId()
{
return kernelData.getMemberInfo().getLastCommittedTxId();
}
@Override
public String update()
{
long time = System.currentTimeMillis();
try
{
kernelData.graphDatabase().getDependencyResolver().resolveDependency(
UpdatePuller.class ).pullUpdates();
}
catch ( Exception e )
{
return "Update failed: " + e;
}
time = System.currentTimeMillis() - time;
return "Update completed in " + time + "ms";
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_HighAvailabilityBean.java
|
4,820
|
public class ClusterDatabaseInfoProvider
{
private final ClusterMembers members;
private final LastTxIdGetter txIdGetter;
private final LastUpdateTime lastUpdateTime;
public ClusterDatabaseInfoProvider( ClusterMembers members, LastTxIdGetter txIdGetter,
LastUpdateTime lastUpdateTime )
{
this.members = members;
this.txIdGetter = txIdGetter;
this.lastUpdateTime = lastUpdateTime;
}
public ClusterDatabaseInfo getInfo()
{
ClusterMember self = members.getSelf();
if (self == null)
return null;
return new ClusterDatabaseInfo( new ClusterMemberInfo( self.getMemberId().toString(), self.getHAUri() != null,
true, self.getHARole(),
Iterables.toArray(String.class, Iterables.map( Functions.TO_STRING, self.getRoleURIs() ) ),
Iterables.toArray(String.class, Iterables.map( Functions.TO_STRING, self.getRoles() ) ) ),
txIdGetter.getLastTxId(), lastUpdateTime.getLastUpdateTime() );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_ClusterDatabaseInfoProvider.java
|
4,821
|
{
@Override
public void registeredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = extractStorePath( management );
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = null;
}
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_BranchedStoreBean.java
|
4,822
|
{
@Override
public void registeredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = extractStorePath( management );
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = null;
}
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_BranchedStoreBean.java
|
4,823
|
public class OnDiskLastTxIdGetter implements LastTxIdGetter
{
private final File storeDirectory;
public OnDiskLastTxIdGetter( File storeDirectory )
{
this.storeDirectory = storeDirectory;
}
@Override
public long getLastTxId()
{
if ( new File(storeDirectory, NeoStore.DEFAULT_NAME).exists() )
{
return new NeoStoreUtil(storeDirectory).getLastCommittedTx();
}
else
{
return -1;
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_OnDiskLastTxIdGetter.java
|
4,824
|
public class SlaveTxIdGenerator implements TxIdGenerator
{
private final int serverId;
private final Master master;
private final int masterId;
private final RequestContextFactory requestContextFactory;
private final HaXaDataSourceManager xaDsm;
private final AbstractTransactionManager txManager;
public SlaveTxIdGenerator( int serverId, Master master, int masterId, RequestContextFactory requestContextFactory,
HaXaDataSourceManager xaDsm, AbstractTransactionManager txManager )
{
this.serverId = serverId;
this.masterId = masterId;
this.requestContextFactory = requestContextFactory;
this.master = master;
this.xaDsm = xaDsm;
this.txManager = txManager;
}
@Override
public long generate( XaDataSource dataSource, int identifier ) throws XAException
{
try
{
// For the first resource to commit against, make sure the master tx is initialized. This is sub
// optimal to do here, since we are under a synchronized block, but writing to master from slaves
// is discouraged in any case. For details of the background for this call, see TransactionState
// and its isRemoteInitialized method.
TransactionState txState = txManager.getTransactionState();
txState.getTxHook().remotelyInitializeTransaction( txManager.getEventIdentifier(), txState );
Response<Long> response = master.commitSingleResourceTransaction(
requestContextFactory.newRequestContext( dataSource ), dataSource.getName(),
myPreparedTransactionToCommit( dataSource, identifier ) );
xaDsm.applyTransactions( response );
return response.response().longValue();
}
catch ( ComException e )
{
throw Exceptions.withCause( new XAException( XAException.XA_HEURCOM ), e );
}
catch (RuntimeException e)
{
// If the original issue was caused by an XAException, wrap the whole thing in an XA exception with
// the same error code and message.
Throwable currentException = e.getCause();
while(currentException != null)
{
if( currentException instanceof XAException )
{
throw Exceptions.withCause( new XAException( ((XAException) currentException).errorCode ), e );
}
currentException = currentException.getCause();
}
// If no XAException involved, just throw the runtime exception.
throw e;
}
}
@Override
public void committed( XaDataSource dataSource, int identifier, long txId, Integer externalAuthorServerId )
{
master.pushTransaction(
requestContextFactory.newRequestContext( identifier ), dataSource.getName(), txId ).close();
}
@Override
public int getCurrentMasterId()
{
return masterId;
}
@Override
public int getMyId()
{
return serverId;
}
private TxExtractor myPreparedTransactionToCommit( final XaDataSource dataSource, final int identifier )
{
return new TxExtractor()
{
@Override
public ReadableByteChannel extract()
{
throw new UnsupportedOperationException();
}
@Override
public void extract( LogBuffer buffer )
{
try
{
dataSource.getPreparedTransaction( identifier, buffer );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_SlaveTxIdGenerator.java
|
4,825
|
@Service.Implementation(ManagementBeanProvider.class)
public final class BranchedStoreBean extends ManagementBeanProvider
{
public BranchedStoreBean()
{
super( BranchedStore.class );
}
@Override
protected Neo4jMBean createMXBean( ManagementData management )
throws NotCompliantMBeanException
{
if ( !isHA( management ) )
{
return null;
}
return new BranchedStoreImpl( management, true );
}
@Override
protected Neo4jMBean createMBean( ManagementData management )
throws NotCompliantMBeanException
{
if ( !isHA( management ) )
{
return null;
}
return new BranchedStoreImpl( management );
}
private static boolean isHA( ManagementData management )
{
return management.getKernelData().graphDatabase() instanceof HighlyAvailableGraphDatabase;
}
private static class BranchedStoreImpl extends Neo4jMBean implements
BranchedStore
{
private File storePath;
protected BranchedStoreImpl( final ManagementData management )
throws NotCompliantMBeanException
{
super( management );
XaDataSourceManager xadsm = management.getKernelData().graphDatabase().getDependencyResolver()
.resolveDependency( XaDataSourceManager.class );
xadsm.addDataSourceRegistrationListener( new DataSourceRegistrationListener()
{
@Override
public void registeredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = extractStorePath( management );
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = null;
}
}
} );
}
protected BranchedStoreImpl( final ManagementData management, boolean isMXBean )
{
super( management, isMXBean );
XaDataSourceManager xadsm = management.getKernelData().graphDatabase().getDependencyResolver()
.resolveDependency( XaDataSourceManager.class );
xadsm.addDataSourceRegistrationListener( new DataSourceRegistrationListener()
{
@Override
public void registeredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = extractStorePath( management );
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = null;
}
}
} );
}
private File extractStorePath( ManagementData management )
{
NeoStoreXaDataSource nioneodb = management.getKernelData().graphDatabase().getDependencyResolver()
.resolveDependency( XaDataSourceManager.class ).getNeoStoreDataSource();
File path;
try
{
path = new File( nioneodb.getStoreDir() ).getCanonicalFile().getAbsoluteFile();
}
catch ( IOException e )
{
path = new File( nioneodb.getStoreDir() ).getAbsoluteFile();
}
return path;
}
@Override
public BranchedStoreInfo[] getBranchedStores()
{
if ( storePath == null )
{
return new BranchedStoreInfo[0];
}
List<BranchedStoreInfo> toReturn = new LinkedList<BranchedStoreInfo>();
for ( File branchDirectory : BranchedDataPolicy.getBranchedDataRootDirectory( storePath ).listFiles() )
{
if ( !branchDirectory.isDirectory() )
{
continue;
}
toReturn.add( parseBranchedStore( branchDirectory ) );
}
return toReturn.toArray( new BranchedStoreInfo[]{} );
}
private BranchedStoreInfo parseBranchedStore( File branchDirectory )
{
File neostoreFile = new File( branchDirectory, NeoStore.DEFAULT_NAME );
long txId = NeoStore.getTxId( new DefaultFileSystemAbstraction(), neostoreFile );
long timestamp = Long.parseLong( branchDirectory.getName() );
return new BranchedStoreInfo( branchDirectory.getName(), txId, timestamp );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_BranchedStoreBean.java
|
4,826
|
{
@Override
public ReadableByteChannel extract()
{
throw new UnsupportedOperationException();
}
@Override
public void extract( LogBuffer buffer )
{
try
{
dataSource.getPreparedTransaction( identifier, buffer );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_SlaveTxIdGenerator.java
|
4,827
|
{
@Override
public Void apply( GraphDatabaseService graphDb )
{
// given
graphDb.schema().indexFor( label( "Label1" ) ).on( "key1" ).create();
// when
try
{
function.apply( graphDb );
fail( "expected exception" );
}
// then
catch ( Exception e )
{
assertEquals( "Cannot perform data updates in a transaction that has performed schema updates.",
e.getMessage() );
}
return null;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_DataAndSchemaTransactionSeparationIT.java
|
4,828
|
public class DataAndSchemaTransactionSeparationIT
{
public final @Rule DatabaseRule db = new ImpermanentDatabaseRule();
private static Function<GraphDatabaseService, Void> expectFailureAfterSchemaOperation(
final Function<GraphDatabaseService, ?> function )
{
return new Function<GraphDatabaseService, Void>()
{
@Override
public Void apply( GraphDatabaseService graphDb )
{
// given
graphDb.schema().indexFor( label( "Label1" ) ).on( "key1" ).create();
// when
try
{
function.apply( graphDb );
fail( "expected exception" );
}
// then
catch ( Exception e )
{
assertEquals( "Cannot perform data updates in a transaction that has performed schema updates.",
e.getMessage() );
}
return null;
}
};
}
private static Function<GraphDatabaseService, Void> succeedAfterSchemaOperation(
final Function<GraphDatabaseService, ?> function )
{
return new Function<GraphDatabaseService, Void>()
{
@Override
public Void apply( GraphDatabaseService graphDb )
{
// given
graphDb.schema().indexFor( label( "Label1" ) ).on( "key1" ).create();
// when/then
function.apply( graphDb );
return null;
}
};
}
@Test
public void shouldNotAllowNodeCreationInSchemaTransaction() throws Exception
{
db.executeAndRollback( expectFailureAfterSchemaOperation( createNode() ) );
}
@Test
public void shouldNotAllowRelationshipCreationInSchemaTransaction() throws Exception
{
// given
final Pair<Node, Node> nodes = db.executeAndCommit( aPairOfNodes() );
// then
db.executeAndRollback( expectFailureAfterSchemaOperation( relate( nodes ) ) );
}
@Test
@SuppressWarnings("unchecked")
public void shouldNotAllowPropertyWritesInSchemaTransaction() throws Exception
{
// given
Pair<Node, Node> nodes = db.executeAndCommit( aPairOfNodes() );
Relationship relationship = db.executeAndCommit( relate( nodes ) );
// when
for ( Function<GraphDatabaseService, ?> operation : new Function[]{
propertyWrite( Node.class, nodes.first(), "key1", "value1" ),
propertyWrite( Relationship.class, relationship, "key1", "value1" ),
} )
{
// then
db.executeAndRollback( expectFailureAfterSchemaOperation( operation ) );
}
}
@Test
@SuppressWarnings("unchecked")
public void shouldAllowPropertyReadsInSchemaTransaction() throws Exception
{
// given
Pair<Node, Node> nodes = db.executeAndCommit( aPairOfNodes() );
Relationship relationship = db.executeAndCommit( relate( nodes ) );
db.executeAndCommit( propertyWrite( Node.class, nodes.first(), "key1", "value1" ) );
db.executeAndCommit( propertyWrite( Relationship.class, relationship, "key1", "value1" ) );
// when
for ( Function<GraphDatabaseService, ?> operation : new Function[]{
propertyRead( Node.class, nodes.first(), "key1" ),
propertyRead( Relationship.class, relationship, "key1" ),
} )
{
// then
db.executeAndRollback( succeedAfterSchemaOperation( operation ) );
}
}
private static Function<GraphDatabaseService, Node> createNode()
{
return new Function<GraphDatabaseService, Node>()
{
@Override
public Node apply( GraphDatabaseService graphDb )
{
return graphDb.createNode();
}
};
}
private static <T extends PropertyContainer> Function<GraphDatabaseService, Object> propertyRead(
Class<T> type, final T entity, final String key )
{
return new FailureRewrite<Object>( type.getSimpleName() + ".getProperty()" )
{
@Override
Object perform( GraphDatabaseService graphDb )
{
return entity.getProperty( key );
}
};
}
private static <T extends PropertyContainer> Function<GraphDatabaseService, Void> propertyWrite(
Class<T> type, final T entity, final String key, final Object value )
{
return new FailureRewrite<Void>( type.getSimpleName() + ".setProperty()" )
{
@Override
Void perform( GraphDatabaseService graphDb )
{
entity.setProperty( key, value );
return null;
}
};
}
private static Function<GraphDatabaseService, Pair<Node, Node>> aPairOfNodes()
{
return new Function<GraphDatabaseService, Pair<Node, Node>>()
{
@Override
public Pair<Node, Node> apply( GraphDatabaseService graphDb )
{
return Pair.of( graphDb.createNode(), graphDb.createNode() );
}
};
}
private static Function<GraphDatabaseService, Relationship> relate( final Pair<Node, Node> nodes )
{
return new Function<GraphDatabaseService, Relationship>()
{
@Override
public Relationship apply( GraphDatabaseService graphDb )
{
return nodes.first().createRelationshipTo( nodes.other(), withName( "RELATED" ) );
}
};
}
private static abstract class FailureRewrite<T> implements Function<GraphDatabaseService, T>
{
private final String message;
FailureRewrite( String message )
{
this.message = message;
}
@Override
public T apply( GraphDatabaseService graphDb )
{
try
{
return perform( graphDb );
}
catch ( AssertionError e )
{
AssertionError error = new AssertionError( message + ": " + e.getMessage() );
error.setStackTrace( e.getStackTrace() );
throw error;
}
}
abstract T perform( GraphDatabaseService graphDb );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_DataAndSchemaTransactionSeparationIT.java
|
4,829
|
public class ConstraintEnforcingEntityOperations implements EntityOperations
{
private final EntityWriteOperations entityWriteOperations;
private final EntityReadOperations entityReadOperations;
private final SchemaReadOperations schemaReadOperations;
public ConstraintEnforcingEntityOperations(
EntityWriteOperations entityWriteOperations,
EntityReadOperations entityReadOperations,
SchemaReadOperations schemaReadOperations )
{
this.entityWriteOperations = entityWriteOperations;
this.entityReadOperations = entityReadOperations;
this.schemaReadOperations = schemaReadOperations;
}
@Override
public boolean nodeAddLabel( KernelStatement state, long nodeId, int labelId )
throws EntityNotFoundException, ConstraintValidationKernelException
{
Iterator<UniquenessConstraint> constraints = schemaReadOperations.constraintsGetForLabel( state, labelId );
while ( constraints.hasNext() )
{
UniquenessConstraint constraint = constraints.next();
int propertyKeyId = constraint.propertyKeyId();
Property property = entityReadOperations.nodeGetProperty( state, nodeId, propertyKeyId );
if ( property.isDefined() )
{
validateNoExistingNodeWithLabelAndProperty( state, labelId, (DefinedProperty) property, nodeId );
}
}
return entityWriteOperations.nodeAddLabel( state, nodeId, labelId );
}
@Override
public Property nodeSetProperty( KernelStatement state, long nodeId, DefinedProperty property )
throws EntityNotFoundException, ConstraintValidationKernelException
{
PrimitiveIntIterator labelIds = entityReadOperations.nodeGetLabels( state, nodeId );
while ( labelIds.hasNext() )
{
int labelId = labelIds.next();
int propertyKeyId = property.propertyKeyId();
Iterator<UniquenessConstraint> constraintIterator =
schemaReadOperations.constraintsGetForLabelAndPropertyKey( state, labelId, propertyKeyId );
if ( constraintIterator.hasNext() )
{
validateNoExistingNodeWithLabelAndProperty( state, labelId, property, nodeId );
}
}
return entityWriteOperations.nodeSetProperty( state, nodeId, property );
}
private void validateNoExistingNodeWithLabelAndProperty( KernelStatement state, int labelId,
DefinedProperty property, long modifiedNode )
throws ConstraintValidationKernelException
{
try
{
Object value = property.value();
IndexDescriptor indexDescriptor = new IndexDescriptor( labelId, property.propertyKeyId() );
assertIndexOnline( state, indexDescriptor );
state.locks().acquireIndexEntryWriteLock( labelId, property.propertyKeyId(), property.valueAsString() );
PrimitiveLongIterator existingNodes = entityReadOperations.nodesGetFromIndexLookup(
state, indexDescriptor, value );
while ( existingNodes.hasNext() )
{
long existingNode = existingNodes.next();
if ( existingNode != modifiedNode )
{
throw new UniqueConstraintViolationKernelException( labelId, property.propertyKeyId(), value,
existingNode );
}
}
}
catch ( IndexNotFoundKernelException | IndexBrokenKernelException e )
{
throw new UnableToValidateConstraintKernelException( e );
}
}
private void assertIndexOnline( KernelStatement state, IndexDescriptor indexDescriptor )
throws IndexNotFoundKernelException, IndexBrokenKernelException
{
switch ( schemaReadOperations.indexGetState( state, indexDescriptor ) )
{
case ONLINE:
return;
default:
throw new IndexBrokenKernelException( schemaReadOperations.indexGetFailure( state, indexDescriptor ) );
}
}
// Simply delegate the rest of the invocations
@Override
public void nodeDelete( KernelStatement state, long nodeId )
{
entityWriteOperations.nodeDelete( state, nodeId );
}
@Override
public void relationshipDelete( KernelStatement state, long relationshipId )
{
entityWriteOperations.relationshipDelete( state, relationshipId );
}
@Override
public boolean nodeRemoveLabel( KernelStatement state, long nodeId, int labelId ) throws EntityNotFoundException
{
return entityWriteOperations.nodeRemoveLabel( state, nodeId, labelId );
}
@Override
public Property relationshipSetProperty( KernelStatement state, long relationshipId, DefinedProperty property )
throws EntityNotFoundException
{
return entityWriteOperations.relationshipSetProperty( state, relationshipId, property );
}
@Override
public Property graphSetProperty( KernelStatement state, DefinedProperty property )
{
return entityWriteOperations.graphSetProperty( state, property );
}
@Override
public Property nodeRemoveProperty( KernelStatement state, long nodeId, int propertyKeyId )
throws EntityNotFoundException
{
return entityWriteOperations.nodeRemoveProperty( state, nodeId, propertyKeyId );
}
@Override
public Property relationshipRemoveProperty( KernelStatement state, long relationshipId, int propertyKeyId )
throws EntityNotFoundException
{
return entityWriteOperations.relationshipRemoveProperty( state, relationshipId, propertyKeyId );
}
@Override
public Property graphRemoveProperty( KernelStatement state, int propertyKeyId )
{
return entityWriteOperations.graphRemoveProperty( state, propertyKeyId );
}
@Override
public PrimitiveLongIterator nodesGetForLabel( KernelStatement state, int labelId )
{
return entityReadOperations.nodesGetForLabel( state, labelId );
}
@Override
public PrimitiveLongIterator nodesGetFromIndexLookup( KernelStatement state, IndexDescriptor index, Object value )
throws IndexNotFoundKernelException
{
return entityReadOperations.nodesGetFromIndexLookup( state, index, value );
}
@Override
public long nodeGetUniqueFromIndexLookup(
KernelStatement state,
IndexDescriptor index,
Object value )
throws IndexNotFoundKernelException, IndexBrokenKernelException
{
assertIndexOnline( state, index );
int labelId = index.getLabelId();
int propertyKeyId = index.getPropertyKeyId();
String stringVal = "";
if ( null != value )
{
DefinedProperty property = Property.property( propertyKeyId, value );
stringVal = property.valueAsString();
}
// If we find the node - hold a READ lock. If we don't find a node - hold a WRITE lock.
LockHolder holder = state.locks();
try ( ReleasableLock r = holder.getReleasableIndexEntryReadLock( labelId, propertyKeyId, stringVal ) )
{
long nodeId = entityReadOperations.nodeGetUniqueFromIndexLookup( state, index, value );
if ( NO_SUCH_NODE == nodeId )
{
r.release(); // and change to a WRITE lock
try ( ReleasableLock w = holder.getReleasableIndexEntryWriteLock( labelId, propertyKeyId, stringVal ) )
{
nodeId = entityReadOperations.nodeGetUniqueFromIndexLookup( state, index, value );
if ( NO_SUCH_NODE != nodeId ) // we found it under the WRITE lock
{ // downgrade to a READ lock
holder.getReleasableIndexEntryReadLock( labelId, propertyKeyId, stringVal )
.registerWithTransaction();
w.release();
}
}
}
return nodeId;
}
}
@Override
public boolean nodeHasLabel( KernelStatement state, long nodeId, int labelId ) throws EntityNotFoundException
{
return entityReadOperations.nodeHasLabel( state, nodeId, labelId );
}
@Override
public PrimitiveIntIterator nodeGetLabels( KernelStatement state, long nodeId ) throws EntityNotFoundException
{
return entityReadOperations.nodeGetLabels( state, nodeId );
}
@Override
public Property nodeGetProperty( KernelStatement state, long nodeId, int propertyKeyId ) throws EntityNotFoundException
{
return entityReadOperations.nodeGetProperty( state, nodeId, propertyKeyId );
}
@Override
public Property relationshipGetProperty( KernelStatement state, long relationshipId, int propertyKeyId ) throws
EntityNotFoundException
{
return entityReadOperations.relationshipGetProperty( state, relationshipId, propertyKeyId );
}
@Override
public Property graphGetProperty( KernelStatement state, int propertyKeyId )
{
return entityReadOperations.graphGetProperty( state, propertyKeyId );
}
@Override
public PrimitiveLongIterator nodeGetPropertyKeys( KernelStatement state, long nodeId ) throws EntityNotFoundException
{
return entityReadOperations.nodeGetPropertyKeys( state, nodeId );
}
@Override
public Iterator<DefinedProperty> nodeGetAllProperties( KernelStatement state, long nodeId ) throws EntityNotFoundException
{
return entityReadOperations.nodeGetAllProperties( state, nodeId );
}
@Override
public PrimitiveLongIterator relationshipGetPropertyKeys( KernelStatement state, long relationshipId ) throws
EntityNotFoundException
{
return entityReadOperations.relationshipGetPropertyKeys( state, relationshipId );
}
@Override
public Iterator<DefinedProperty> relationshipGetAllProperties( KernelStatement state, long relationshipId ) throws
EntityNotFoundException
{
return entityReadOperations.relationshipGetAllProperties( state, relationshipId );
}
@Override
public PrimitiveLongIterator graphGetPropertyKeys( KernelStatement state )
{
return entityReadOperations.graphGetPropertyKeys( state );
}
@Override
public Iterator<DefinedProperty> graphGetAllProperties( KernelStatement state )
{
return entityReadOperations.graphGetAllProperties( state );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_api_ConstraintEnforcingEntityOperations.java
|
4,830
|
@SupportedSourceVersion( SourceVersion.RELEASE_7 )
@SupportedAnnotationTypes( "org.neo4j.helpers.Service.Implementation" )
public class ServiceProcessor extends AnnotationProcessor
{
@SuppressWarnings( "unchecked" )
@Override
protected void process( TypeElement annotationType, Element annotated, AnnotationMirror annotation,
Map<? extends ExecutableElement, ? extends AnnotationValue> values ) throws IOException
{
for ( AnnotationValue o : (List<? extends AnnotationValue>) values.values().iterator().next().getValue() )
{
TypeMirror service = (TypeMirror) o.getValue();
addTo( ( (TypeElement) annotated ).getQualifiedName().toString(), "META-INF", "services",
service.toString() );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_annotations_ServiceProcessor.java
|
4,831
|
@SupportedSourceVersion( SourceVersion.RELEASE_7 )
@SupportedAnnotationTypes( "org.neo4j.kernel.impl.annotations.Documented" )
public class DocumentationProcessor extends AnnotationProcessor
{
private static final String DEFAULT_VALUE;
static
{
String defaultValue = Documented.DEFAULT_VALUE;
try
{
defaultValue = (String) Documented.class.getMethod( "value" ).getDefaultValue();
}
catch ( Exception e )
{
// OK
}
DEFAULT_VALUE = defaultValue;
}
@Override
protected void process( TypeElement annotationType, Element annotated, AnnotationMirror annotation,
Map<? extends ExecutableElement, ? extends AnnotationValue> values ) throws IOException
{
if ( values.size() != 1 )
{
error( annotated, annotation,
"Annotation values don't match the expectation" );
return;
}
String value = (String) values.values().iterator().next().getValue();
if ( DEFAULT_VALUE.equals( value ) || value == null )
{
String javadoc = processingEnv.getElementUtils().getDocComment( annotated );
if ( javadoc == null )
{
error( annotated, annotation,
"Cannot extract JavaDoc documentation comment for "
+ annotated );
// return no period, since that could mess up Title generation;
javadoc = "Documentation not available";
}
if ( !updateAnnotationValue( annotated, annotation, "value", javadoc ) )
{
warn( annotated, annotation, "Failed to update annotation value" );
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_annotations_DocumentationProcessor.java
|
4,832
|
@SuppressWarnings( "restriction" )
private static class JavacManipulator extends CompilationManipulator
{
private final AnnotationProcessor proc;
private final com.sun.source.util.Trees trees;
private final com.sun.tools.javac.tree.TreeMaker maker;
private final com.sun.tools.javac.model.JavacElements elements;
JavacManipulator( AnnotationProcessor proc, ProcessingEnvironment env )
{
com.sun.tools.javac.util.Context context = ( (com.sun.tools.javac.processing.JavacProcessingEnvironment) env )
.getContext();
this.proc = proc;
this.trees = com.sun.source.util.Trees.instance( env );
this.maker = com.sun.tools.javac.tree.TreeMaker.instance( context );
this.elements = com.sun.tools.javac.model.JavacElements.instance( context );
}
@Override
boolean updateAnnotationValue( Element annotated, AnnotationMirror annotation, String key, String value )
{
com.sun.source.tree.Tree leaf = trees.getTree( annotated, annotation );
if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCAnnotation )
{
com.sun.tools.javac.tree.JCTree.JCAnnotation annot = (com.sun.tools.javac.tree.JCTree.JCAnnotation) leaf;
for ( com.sun.tools.javac.tree.JCTree.JCExpression expr : annot.args )
{
if ( expr instanceof com.sun.tools.javac.tree.JCTree.JCAssign )
{
com.sun.tools.javac.tree.JCTree.JCAssign assign = (com.sun.tools.javac.tree.JCTree.JCAssign) expr;
if ( assign.lhs instanceof com.sun.tools.javac.tree.JCTree.JCIdent )
{
com.sun.tools.javac.tree.JCTree.JCIdent ident = (com.sun.tools.javac.tree.JCTree.JCIdent) assign.lhs;
if ( ident.name.contentEquals( key ) )
{
assign.rhs = maker.Literal( value );
return true;
}
}
}
}
annot.args = annot.args.append( assignment( key, value ) );
return true;
}
return false;
}
@Override
boolean addAnnotation( Element target, String annotationType, Map<String, Object> parameters )
{
com.sun.source.tree.Tree leaf = trees.getPath( target ).getLeaf();
final com.sun.tools.javac.tree.JCTree.JCModifiers modifiers;
if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCMethodDecl )
{
com.sun.tools.javac.tree.JCTree.JCMethodDecl method = (com.sun.tools.javac.tree.JCTree.JCMethodDecl) leaf;
modifiers = method.mods != null ? method.mods : ( method.mods = makeModifiers( target, 0 ) );
}
else if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCClassDecl )
{
com.sun.tools.javac.tree.JCTree.JCClassDecl clazz = (com.sun.tools.javac.tree.JCTree.JCClassDecl) leaf;
modifiers = clazz.mods != null ? clazz.mods : ( clazz.mods = makeModifiers( target, 0 ) );
}
else if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCVariableDecl )
{
com.sun.tools.javac.tree.JCTree.JCVariableDecl param = (com.sun.tools.javac.tree.JCTree.JCVariableDecl) leaf;
modifiers = param.mods != null ? param.mods : ( param.mods = makeModifiers( target, 0 ) );
}
else
{
return false;
}
for ( com.sun.tools.javac.tree.JCTree.JCAnnotation annotation : modifiers.annotations )
{
if ( annotation.annotationType instanceof com.sun.tools.javac.tree.JCTree.JCIdent )
{
com.sun.tools.javac.tree.JCTree.JCIdent ident = (com.sun.tools.javac.tree.JCTree.JCIdent) annotation.annotationType;
if ( ident.getName().contentEquals( annotationType ) ) return false;
}
}
modifiers.annotations = modifiers.annotations.prepend( maker.Annotation( typeName( annotationType ),
makeParams( parameters ) ) );
return true;
}
private com.sun.tools.javac.tree.JCTree.JCExpression typeName( String typeName )
{
String[] parts = typeName.split( "\\.", -1 );
com.sun.tools.javac.tree.JCTree.JCExpression exp = maker.Ident( elements.getName( parts[0] ) );
for ( int i = 1; i < parts.length; i++ )
exp = maker.Select( exp, elements.getName( parts[i] ) );
return exp;
}
private com.sun.tools.javac.tree.JCTree.JCModifiers makeModifiers( Element target, long flags )
{
proc.warn( target, "No modifiers, creating default" );
return maker.Modifiers( flags,
com.sun.tools.javac.util.List.<com.sun.tools.javac.tree.JCTree.JCAnnotation>nil() );
}
private com.sun.tools.javac.util.List<com.sun.tools.javac.tree.JCTree.JCExpression> makeParams(
Map<String, Object> parameters )
{
com.sun.tools.javac.util.List<com.sun.tools.javac.tree.JCTree.JCExpression> result = com.sun.tools.javac.util.List
.<com.sun.tools.javac.tree.JCTree.JCExpression>nil();
for ( Map.Entry<String, Object> entry : parameters.entrySet() )
{
result = result.prepend( assignment( entry.getKey(), entry.getValue() ) );
}
return result;
}
private com.sun.tools.javac.tree.JCTree.JCAssign assignment( String key, Object value )
{
return maker.Assign( maker.Ident( elements.getName( key ) ), maker.Literal( value ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_annotations_CompilationManipulator.java
|
4,833
|
JAVAC( "com.sun.tools.javac.processing.JavacProcessingEnvironment" )
{
@Override
CompilationManipulator create( AnnotationProcessor proc, ProcessingEnvironment env )
{
return new JavacManipulator( proc, env );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_annotations_CompilationManipulator.java
|
4,834
|
abstract class CompilationManipulator
{
static CompilationManipulator load( AnnotationProcessor proc, ProcessingEnvironment processingEnv )
{
for ( Environment env : Environment.values() )
{
CompilationManipulator manipulator = env.load( proc, processingEnv );
if ( manipulator != null ) return manipulator;
}
return null;
}
private enum Environment
{
JAVAC( "com.sun.tools.javac.processing.JavacProcessingEnvironment" )
{
@Override
CompilationManipulator create( AnnotationProcessor proc, ProcessingEnvironment env )
{
return new JavacManipulator( proc, env );
}
};
private final Class<?> environment;
private Environment( String environment )
{
this.environment = loadClass( environment );
}
CompilationManipulator load( AnnotationProcessor proc, ProcessingEnvironment env )
{
try
{
if ( environment != null && environment.isInstance( env ) && canLoad( env ) )
{
return create( proc, env );
}
}
catch ( Exception e )
{
return null;
}
catch ( LinkageError e )
{
return null;
}
return null;
}
boolean canLoad( @SuppressWarnings( "unused" ) ProcessingEnvironment env )
{
return true;
}
abstract CompilationManipulator create( AnnotationProcessor proc, ProcessingEnvironment env );
private static Class<?> loadClass( String className )
{
try
{
return Class.forName( className );
}
catch ( Throwable e )
{
return null;
}
}
}
abstract boolean updateAnnotationValue( Element annotated, AnnotationMirror annotation, String key, String value );
abstract boolean addAnnotation( Element target, String annotationType, Map<String, Object> parameters );
@SuppressWarnings( "restriction" )
private static class JavacManipulator extends CompilationManipulator
{
private final AnnotationProcessor proc;
private final com.sun.source.util.Trees trees;
private final com.sun.tools.javac.tree.TreeMaker maker;
private final com.sun.tools.javac.model.JavacElements elements;
JavacManipulator( AnnotationProcessor proc, ProcessingEnvironment env )
{
com.sun.tools.javac.util.Context context = ( (com.sun.tools.javac.processing.JavacProcessingEnvironment) env )
.getContext();
this.proc = proc;
this.trees = com.sun.source.util.Trees.instance( env );
this.maker = com.sun.tools.javac.tree.TreeMaker.instance( context );
this.elements = com.sun.tools.javac.model.JavacElements.instance( context );
}
@Override
boolean updateAnnotationValue( Element annotated, AnnotationMirror annotation, String key, String value )
{
com.sun.source.tree.Tree leaf = trees.getTree( annotated, annotation );
if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCAnnotation )
{
com.sun.tools.javac.tree.JCTree.JCAnnotation annot = (com.sun.tools.javac.tree.JCTree.JCAnnotation) leaf;
for ( com.sun.tools.javac.tree.JCTree.JCExpression expr : annot.args )
{
if ( expr instanceof com.sun.tools.javac.tree.JCTree.JCAssign )
{
com.sun.tools.javac.tree.JCTree.JCAssign assign = (com.sun.tools.javac.tree.JCTree.JCAssign) expr;
if ( assign.lhs instanceof com.sun.tools.javac.tree.JCTree.JCIdent )
{
com.sun.tools.javac.tree.JCTree.JCIdent ident = (com.sun.tools.javac.tree.JCTree.JCIdent) assign.lhs;
if ( ident.name.contentEquals( key ) )
{
assign.rhs = maker.Literal( value );
return true;
}
}
}
}
annot.args = annot.args.append( assignment( key, value ) );
return true;
}
return false;
}
@Override
boolean addAnnotation( Element target, String annotationType, Map<String, Object> parameters )
{
com.sun.source.tree.Tree leaf = trees.getPath( target ).getLeaf();
final com.sun.tools.javac.tree.JCTree.JCModifiers modifiers;
if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCMethodDecl )
{
com.sun.tools.javac.tree.JCTree.JCMethodDecl method = (com.sun.tools.javac.tree.JCTree.JCMethodDecl) leaf;
modifiers = method.mods != null ? method.mods : ( method.mods = makeModifiers( target, 0 ) );
}
else if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCClassDecl )
{
com.sun.tools.javac.tree.JCTree.JCClassDecl clazz = (com.sun.tools.javac.tree.JCTree.JCClassDecl) leaf;
modifiers = clazz.mods != null ? clazz.mods : ( clazz.mods = makeModifiers( target, 0 ) );
}
else if ( leaf instanceof com.sun.tools.javac.tree.JCTree.JCVariableDecl )
{
com.sun.tools.javac.tree.JCTree.JCVariableDecl param = (com.sun.tools.javac.tree.JCTree.JCVariableDecl) leaf;
modifiers = param.mods != null ? param.mods : ( param.mods = makeModifiers( target, 0 ) );
}
else
{
return false;
}
for ( com.sun.tools.javac.tree.JCTree.JCAnnotation annotation : modifiers.annotations )
{
if ( annotation.annotationType instanceof com.sun.tools.javac.tree.JCTree.JCIdent )
{
com.sun.tools.javac.tree.JCTree.JCIdent ident = (com.sun.tools.javac.tree.JCTree.JCIdent) annotation.annotationType;
if ( ident.getName().contentEquals( annotationType ) ) return false;
}
}
modifiers.annotations = modifiers.annotations.prepend( maker.Annotation( typeName( annotationType ),
makeParams( parameters ) ) );
return true;
}
private com.sun.tools.javac.tree.JCTree.JCExpression typeName( String typeName )
{
String[] parts = typeName.split( "\\.", -1 );
com.sun.tools.javac.tree.JCTree.JCExpression exp = maker.Ident( elements.getName( parts[0] ) );
for ( int i = 1; i < parts.length; i++ )
exp = maker.Select( exp, elements.getName( parts[i] ) );
return exp;
}
private com.sun.tools.javac.tree.JCTree.JCModifiers makeModifiers( Element target, long flags )
{
proc.warn( target, "No modifiers, creating default" );
return maker.Modifiers( flags,
com.sun.tools.javac.util.List.<com.sun.tools.javac.tree.JCTree.JCAnnotation>nil() );
}
private com.sun.tools.javac.util.List<com.sun.tools.javac.tree.JCTree.JCExpression> makeParams(
Map<String, Object> parameters )
{
com.sun.tools.javac.util.List<com.sun.tools.javac.tree.JCTree.JCExpression> result = com.sun.tools.javac.util.List
.<com.sun.tools.javac.tree.JCTree.JCExpression>nil();
for ( Map.Entry<String, Object> entry : parameters.entrySet() )
{
result = result.prepend( assignment( entry.getKey(), entry.getValue() ) );
}
return result;
}
private com.sun.tools.javac.tree.JCTree.JCAssign assignment( String key, Object value )
{
return maker.Assign( maker.Ident( elements.getName( key ) ), maker.Literal( value ) );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_annotations_CompilationManipulator.java
|
4,835
|
public abstract class AnnotationProcessor extends AbstractProcessor
{
private CompilationManipulator manipulator = null;
@Override
public synchronized void init( @SuppressWarnings( "hiding" ) ProcessingEnvironment processingEnv )
{
super.init( processingEnv );
manipulator = CompilationManipulator.load( this, processingEnv );
if ( manipulator == null )
processingEnv.getMessager().printMessage( Kind.NOTE,
"Cannot write values to this compiler: " + processingEnv.getClass().getName() );
}
@Override
public boolean process( Set<? extends TypeElement> annotations, RoundEnvironment roundEnv )
{
for ( TypeElement type : annotations )
{
for ( Element annotated : roundEnv.getElementsAnnotatedWith( type ) )
{
for ( AnnotationMirror mirror : annotated.getAnnotationMirrors() )
{
if ( mirror.getAnnotationType().asElement().equals( type ) )
{
try
{
process( type, annotated, mirror, processingEnv.getElementUtils()
.getElementValuesWithDefaults( mirror ) );
}
catch ( Exception e )
{
e.printStackTrace();
processingEnv.getMessager().printMessage( Kind.ERROR, "Internal error: " + e.toString(),
annotated, mirror );
}
}
}
}
}
return false;
}
protected final void warn( Element element, String message )
{
processingEnv.getMessager().printMessage( Kind.WARNING, message, element );
}
protected final void warn( Element element, AnnotationMirror annotation, String message )
{
processingEnv.getMessager().printMessage( Kind.WARNING, message, element, annotation );
}
protected final void error( Element element, String message )
{
processingEnv.getMessager().printMessage( Kind.ERROR, message, element );
}
protected final void error( Element element, AnnotationMirror annotation, String message )
{
processingEnv.getMessager().printMessage( Kind.ERROR, message, element, annotation );
}
protected final boolean updateAnnotationValue( Element annotated, AnnotationMirror annotation, String key,
String value )
{
return manipulator != null && manipulator.updateAnnotationValue( annotated, annotation, key, value );
}
protected final boolean addAnnotation( Element target, Class<? extends Annotation> annotation, Object value )
{
return addAnnotation( target, annotation, Collections.singletonMap( "value", value ) );
}
protected final boolean addAnnotation( Element target, Class<? extends Annotation> annotation, String key,
Object value )
{
return addAnnotation( target, annotation, Collections.singletonMap( key, value ) );
}
protected final boolean addAnnotation( Element target, Class<? extends Annotation> annotation )
{
return addAnnotation( target, annotation, Collections.<String, Object>emptyMap() );
}
protected final boolean addAnnotation( Element target, Class<? extends Annotation> annotation,
Map<String, Object> parameters )
{
return manipulator != null && manipulator.addAnnotation( target, nameOf( annotation ), parameters );
}
private static String nameOf( Class<? extends Annotation> annotation )
{
return annotation.getName().replace( '$', '.' );
}
protected abstract void process( TypeElement annotationType, Element annotated, AnnotationMirror annotation,
Map<? extends ExecutableElement, ? extends AnnotationValue> values ) throws IOException;
private static Pattern nl = Pattern.compile( "\n" );
void addTo( String line, String... path ) throws IOException
{
FileObject fo = processingEnv.getFiler().getResource( StandardLocation.CLASS_OUTPUT, "", path( path ) );
URI uri = fo.toUri();
File file;
try
{
file = new File( uri );
}
catch ( Exception e )
{
file = new File( uri.toString() );
}
if ( file.exists() )
{
for ( String previous : nl.split( fo.getCharContent( true ), 0 ) )
{
if ( line.equals( previous ) ) return;
}
}
else
{
file.getParentFile().mkdirs();
}
newFilePrintWriter( file, UTF_8 ).append( line ).append( "\n" ).close();
}
private String path( String[] path )
{
StringBuilder filename = new StringBuilder();
String sep = "";
for ( String part : path )
{
filename.append( sep ).append( part );
sep = "/";
}
return filename.toString();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_impl_annotations_AnnotationProcessor.java
|
4,836
|
private static class DataSourceThatRefusesAccessToResources extends XaDataSource
{
public DataSourceThatRefusesAccessToResources( String name )
{
super( new byte[]{}, name );
}
@Override
public XaConnection getXaConnection()
{
throw new RuntimeException( "Should not access connection during shutdown." );
}
@Override
public void init() throws Throwable { }
@Override
public void start() throws Throwable { }
@Override
public void stop() throws Throwable { }
@Override
public void shutdown() throws Throwable { }
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_XaDataSourceManagerTest.java
|
4,837
|
public class XaDataSourceManagerTest
{
@Test
public void shouldNotAccessResourcesDuringShutdown()
{
XaDataSourceManager manager = new XaDataSourceManager(null);
manager.registerDataSource( new DataSourceThatRefusesAccessToResources( "the-data-source" ) );
manager.unregisterDataSource( "the-data-source" );
}
private static class DataSourceThatRefusesAccessToResources extends XaDataSource
{
public DataSourceThatRefusesAccessToResources( String name )
{
super( new byte[]{}, name );
}
@Override
public XaConnection getXaConnection()
{
throw new RuntimeException( "Should not access connection during shutdown." );
}
@Override
public void init() throws Throwable { }
@Override
public void start() throws Throwable { }
@Override
public void stop() throws Throwable { }
@Override
public void shutdown() throws Throwable { }
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_XaDataSourceManagerTest.java
|
4,838
|
{
@Override
public void deadlock( DebuggedThread thread )
{
// Another thread wants to get into the synchronized region,
// time for the sleeping thread in there to make progress
thread.resume();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_TestPropertyDataRace.java
|
4,839
|
{
@Override
public void run()
{
Transaction txn = graphdb.beginTx();
try
{
while ( true )
{
try
{
prepare.await();
break;
}
catch ( InterruptedException e )
{
Thread.interrupted(); // reset
}
}
for ( String key : one.getPropertyKeys() )
{
one.removeProperty( key );
}
txn.success();
}
finally
{
txn.finish();
}
clearCaches();
done.countDown();
}
}.start();
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_TestPropertyDataRace.java
|
4,840
|
{
@Override
public void run()
{
Transaction txn = graphdb.beginTx();
try
{
for ( String key : one.getPropertyKeys() )
{
one.removeProperty( key );
}
clearCaches();
prepare.countDown();
txn.success();
}
finally
{
txn.finish();
}
txn = graphdb.beginTx();
try
{
two.setProperty( "node", "two" );
txn.success();
}
finally
{
txn.finish();
}
countDown( done );
}
}.start();
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_TestPropertyDataRace.java
|
4,841
|
@ForeignBreakpoints( {
@ForeignBreakpoints.BreakpointDef( type = "org.neo4j.kernel.impl.core.ArrayBasedPrimitive",
method = "setProperties" ),
@ForeignBreakpoints.BreakpointDef( type = "org.neo4j.kernel.impl.core.NodeManager",
method = "getNodeIfCached" ) } )
@RunWith( SubProcessTestRunner.class )
@Ignore( "Ignored in 2.0 due to half-way refactoring moving properties into kernel API. " +
"Unignore and change appropriately when it's done" )
public class TestPropertyDataRace
{
@ClassRule
public static EmbeddedDatabaseRule database = new EmbeddedDatabaseRule();
public static final TargetDirectory targetDir = TargetDirectory.forTest( TestPropertyDataRace.class );
@Test
@EnabledBreakpoints( { "enable breakpoints", "done" } )
public void readingMutatorVersusCommittingMutator() throws Exception
{
final Node one, two;
final GraphDatabaseService graphdb = database.getGraphDatabaseService();
Transaction tx = graphdb.beginTx();
try
{
one = graphdb.createNode();
two = graphdb.createNode();
one.setProperty( "node", "one" );
tx.success();
}
finally
{
tx.finish();
}
clearCaches();
final CountDownLatch done = new CountDownLatch( 2 ), prepare = new CountDownLatch( 1 );
new Thread( "committing mutator" )
{
@Override
public void run()
{
Transaction txn = graphdb.beginTx();
try
{
for ( String key : one.getPropertyKeys() )
{
one.removeProperty( key );
}
clearCaches();
prepare.countDown();
txn.success();
}
finally
{
txn.finish();
}
txn = graphdb.beginTx();
try
{
two.setProperty( "node", "two" );
txn.success();
}
finally
{
txn.finish();
}
countDown( done );
}
}.start();
new Thread( "reading mutator" )
{
@Override
public void run()
{
Transaction txn = graphdb.beginTx();
try
{
while ( true )
{
try
{
prepare.await();
break;
}
catch ( InterruptedException e )
{
Thread.interrupted(); // reset
}
}
for ( String key : one.getPropertyKeys() )
{
one.removeProperty( key );
}
txn.success();
}
finally
{
txn.finish();
}
clearCaches();
done.countDown();
}
}.start();
if ( !done.await( 1, MINUTES ) )
{
File dumpDirectory = targetDir.cleanDirectory( "dump" );
dumpVmInfo( dumpDirectory );
new DumpProcessInformation( new SystemOutLogging(), dumpDirectory ).doThreadDump(
stringContains( SubProcess.class.getSimpleName() ) );
fail( "Test didn't complete within a reasonable time, dumping process information to " + dumpDirectory );
}
for ( String key : two.getPropertyKeys() )
{
assertEquals( "two", two.getProperty( key ) );
}
}
@BreakpointTrigger( "enable breakpoints" )
private void clearCaches()
{
database.getGraphDatabaseAPI().getDependencyResolver().resolveDependency( NodeManager.class ).clearCache();
}
@BreakpointTrigger( "done" )
private void countDown( CountDownLatch latch )
{
latch.countDown();
}
private static DebuggedThread thread;
private static final DebuggerDeadlockCallback RESUME_THREAD = new DebuggerDeadlockCallback()
{
@Override
public void deadlock( DebuggedThread thread )
{
// Another thread wants to get into the synchronized region,
// time for the sleeping thread in there to make progress
thread.resume();
}
};
@BreakpointHandler( "enable breakpoints" )
public static void onEnableBreakpoints( BreakPoint self,
@BreakpointHandler( "getNodeIfCached" ) BreakPoint getNodeIfCached,
@BreakpointHandler( "setProperties" ) BreakPoint setProperties )
{
if ( getNodeIfCached.isEnabled() )
{
setProperties.enable();
self.disable();
}
else
{
getNodeIfCached.enable();
}
}
@BreakpointHandler( "setProperties" )
public static void onSetProperties( BreakPoint self, DebugInterface di )
{
self.disable();
if ( thread != null )
{
thread.resume();
}
thread = di.thread().suspend( RESUME_THREAD );
}
@BreakpointHandler( "getNodeIfCached" )
public static void onGetNodeIfCached( BreakPoint self, DebugInterface di )
{
self.disable();
if ( thread == null )
{
thread = di.thread().suspend( null );
}
}
@BreakpointHandler( "done" )
public static void onDone()
{
thread.resume();
thread = null;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_TestPropertyDataRace.java
|
4,842
|
{
@Override
public Statement apply( Statement base, Description description )
{
tearDownDb();
setupGraphDatabase(description.getTestClass().getName(),
description.getTestClass().getAnnotation( RequiresPersistentGraphDatabase.class ).value());
return base;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_AbstractNeo4jTestCase.java
|
4,843
|
@AbstractNeo4jTestCase.RequiresPersistentGraphDatabase( false )
public abstract class AbstractNeo4jTestCase
{
@Retention( RetentionPolicy.RUNTIME )
@Target( ElementType.TYPE )
@Inherited
public @interface RequiresPersistentGraphDatabase
{
boolean value() default true;
}
protected static final File NEO4J_BASE_DIR = new File( "target", "var" );
public static final @ClassRule TestRule START_GRAPHDB = new TestRule()
{
@Override
public Statement apply( Statement base, Description description )
{
tearDownDb();
setupGraphDatabase(description.getTestClass().getName(),
description.getTestClass().getAnnotation( RequiresPersistentGraphDatabase.class ).value());
return base;
}
};
private static ThreadLocal<GraphDatabaseAPI> threadLocalGraphDb = new ThreadLocal<>();
private static ThreadLocal<String> currentTestClassName = new ThreadLocal<>();
private static ThreadLocal<Boolean> requiresPersistentGraphDatabase = new ThreadLocal<>();
private GraphDatabaseAPI graphDb;
private Transaction tx;
protected AbstractNeo4jTestCase()
{
graphDb = threadLocalGraphDb.get();
}
public GraphDatabaseService getGraphDb()
{
return graphDb;
}
private static void setupGraphDatabase( String testClassName, boolean requiresPersistentGraphDatabase )
{
AbstractNeo4jTestCase.requiresPersistentGraphDatabase.set( requiresPersistentGraphDatabase );
AbstractNeo4jTestCase.currentTestClassName.set( testClassName );
if ( requiresPersistentGraphDatabase )
{
try
{
FileUtils.deleteRecursively( new File( getStorePath( "neo-test" ) ) );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
threadLocalGraphDb.set( (GraphDatabaseAPI) (requiresPersistentGraphDatabase ?
new TestGraphDatabaseFactory().newEmbeddedDatabase( getStorePath( "neo-test" ) ) :
new TestGraphDatabaseFactory().newImpermanentDatabase()) );
}
public GraphDatabaseAPI getGraphDbAPI()
{
return graphDb;
}
protected boolean restartGraphDbBetweenTests()
{
return false;
}
public Transaction getTransaction()
{
return tx;
}
public static String getStorePath( String endPath )
{
return new File( NEO4J_BASE_DIR, currentTestClassName.get() + "-" + endPath ).getAbsolutePath();
}
@Before
public void setUpTest()
{
if ( restartGraphDbBetweenTests() && graphDb == null )
{
setupGraphDatabase( currentTestClassName.get(), requiresPersistentGraphDatabase.get());
graphDb = threadLocalGraphDb.get();
}
tx = graphDb.beginTx();
}
@After
public void tearDownTest()
{
if ( tx != null )
{
tx.finish();
}
if ( restartGraphDbBetweenTests() )
{
tearDownDb();
}
}
@AfterClass
public static void tearDownDb()
{
try
{
if ( threadLocalGraphDb.get() != null ) threadLocalGraphDb.get().shutdown();
}
finally
{
threadLocalGraphDb.remove();
}
}
public void setTransaction( Transaction tx )
{
this.tx = tx;
}
public Transaction newTransaction()
{
if ( tx != null )
{
tx.success();
tx.finish();
}
tx = graphDb.beginTx();
return tx;
}
public void commit()
{
if ( tx != null )
{
tx.success();
tx.finish();
tx = null;
}
}
public void finish()
{
if ( tx != null )
{
tx.finish();
tx = null;
}
}
public void rollback()
{
if ( tx != null )
{
tx.failure();
tx.finish();
tx = null;
}
}
public NodeManager getNodeManager()
{
return graphDb.getDependencyResolver().resolveDependency( NodeManager.class );
}
public static void deleteFileOrDirectory( String dir )
{
deleteFileOrDirectory( new File( dir ) );
}
public static void deleteFileOrDirectory( File file )
{
if ( !file.exists() )
{
return;
}
if ( file.isDirectory() )
{
for ( File child : file.listFiles() )
{
deleteFileOrDirectory( child );
}
}
else
{
file.delete();
}
}
protected void clearCache()
{
getGraphDbAPI().getDependencyResolver().resolveDependency( NodeManager.class ).clearCache();
}
protected long propertyRecordsInUse()
{
return propertyStore().getNumberOfIdsInUse();
}
protected long dynamicStringRecordsInUse()
{
return dynamicRecordsInUse( "stringPropertyStore" );
}
protected long dynamicArrayRecordsInUse()
{
return dynamicRecordsInUse( "arrayPropertyStore" );
}
private long dynamicRecordsInUse( String fieldName )
{
try
{
Field storeField = PropertyStore.class.getDeclaredField( fieldName );
storeField.setAccessible( true );
return ( (AbstractDynamicStore) storeField.get( propertyStore() ) ).getNumberOfIdsInUse();
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
protected PropertyStore propertyStore()
{
XaDataSourceManager dsMgr = graphDb.getDependencyResolver().resolveDependency( XaDataSourceManager.class );
return dsMgr.getNeoStoreDataSource().getXaConnection().getPropertyStore();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_AbstractNeo4jTestCase.java
|
4,844
|
public class TxIdGeneratorModeSwitcher extends AbstractModeSwitcher<TxIdGenerator>
{
private final HaXaDataSourceManager xaDsm;
private final DelegateInvocationHandler<Master> master;
private final RequestContextFactory requestContextFactory;
private final StringLogger msgLog;
private final Config config;
private final Slaves slaves;
private final AbstractTransactionManager tm;
private final JobScheduler scheduler;
public TxIdGeneratorModeSwitcher( HighAvailabilityMemberStateMachine stateMachine,
DelegateInvocationHandler<TxIdGenerator> delegate, HaXaDataSourceManager xaDsm,
DelegateInvocationHandler<Master> master,
RequestContextFactory requestContextFactory,
StringLogger msgLog, Config config, Slaves slaves, AbstractTransactionManager tm,
JobScheduler scheduler
)
{
super( stateMachine, delegate );
this.xaDsm = xaDsm;
this.master = master;
this.requestContextFactory = requestContextFactory;
this.msgLog = msgLog;
this.config = config;
this.slaves = slaves;
this.tm = tm;
this.scheduler = scheduler;
}
@Override
protected TxIdGenerator getMasterImpl()
{
return new MasterTxIdGenerator( MasterTxIdGenerator.from( config ), msgLog, slaves, new CommitPusher( scheduler ) );
}
@Override
protected TxIdGenerator getSlaveImpl( URI serverHaUri )
{
return new SlaveTxIdGenerator( config.get( ClusterSettings.server_id ), master.cement(),
HighAvailabilityModeSwitcher.getServerId( serverHaUri ), requestContextFactory, xaDsm, tm);
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_TxIdGeneratorModeSwitcher.java
|
4,845
|
public class TxHookModeSwitcher extends AbstractModeSwitcher<RemoteTxHook>
{
private final DelegateInvocationHandler<Master> master;
private final RequestContextFactoryResolver requestContextFactory;
private final StringLogger log;
private final DependencyResolver resolver;
public TxHookModeSwitcher( HighAvailabilityMemberStateMachine stateMachine,
DelegateInvocationHandler<RemoteTxHook> delegate,
DelegateInvocationHandler<Master> master,
RequestContextFactoryResolver requestContextFactory, StringLogger log,
DependencyResolver resolver )
{
super( stateMachine, delegate );
this.master = master;
this.requestContextFactory = requestContextFactory;
this.log = log;
this.resolver = resolver;
}
@Override
protected RemoteTxHook getMasterImpl()
{
return new MasterTxHook();
}
@Override
protected RemoteTxHook getSlaveImpl( URI serverHaUri )
{
return new SlaveTxHook( master.cement(), resolver.resolveDependency( HaXaDataSourceManager.class ),
requestContextFactory, log );
}
public interface RequestContextFactoryResolver
{
RequestContextFactory get();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_transaction_TxHookModeSwitcher.java
|
4,846
|
private static class BranchedStoreImpl extends Neo4jMBean implements
BranchedStore
{
private File storePath;
protected BranchedStoreImpl( final ManagementData management )
throws NotCompliantMBeanException
{
super( management );
XaDataSourceManager xadsm = management.getKernelData().graphDatabase().getDependencyResolver()
.resolveDependency( XaDataSourceManager.class );
xadsm.addDataSourceRegistrationListener( new DataSourceRegistrationListener()
{
@Override
public void registeredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = extractStorePath( management );
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = null;
}
}
} );
}
protected BranchedStoreImpl( final ManagementData management, boolean isMXBean )
{
super( management, isMXBean );
XaDataSourceManager xadsm = management.getKernelData().graphDatabase().getDependencyResolver()
.resolveDependency( XaDataSourceManager.class );
xadsm.addDataSourceRegistrationListener( new DataSourceRegistrationListener()
{
@Override
public void registeredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = extractStorePath( management );
}
}
@Override
public void unregisteredDataSource( XaDataSource ds )
{
if ( ds instanceof NeoStoreXaDataSource )
{
storePath = null;
}
}
} );
}
private File extractStorePath( ManagementData management )
{
NeoStoreXaDataSource nioneodb = management.getKernelData().graphDatabase().getDependencyResolver()
.resolveDependency( XaDataSourceManager.class ).getNeoStoreDataSource();
File path;
try
{
path = new File( nioneodb.getStoreDir() ).getCanonicalFile().getAbsoluteFile();
}
catch ( IOException e )
{
path = new File( nioneodb.getStoreDir() ).getAbsoluteFile();
}
return path;
}
@Override
public BranchedStoreInfo[] getBranchedStores()
{
if ( storePath == null )
{
return new BranchedStoreInfo[0];
}
List<BranchedStoreInfo> toReturn = new LinkedList<BranchedStoreInfo>();
for ( File branchDirectory : BranchedDataPolicy.getBranchedDataRootDirectory( storePath ).listFiles() )
{
if ( !branchDirectory.isDirectory() )
{
continue;
}
toReturn.add( parseBranchedStore( branchDirectory ) );
}
return toReturn.toArray( new BranchedStoreInfo[]{} );
}
private BranchedStoreInfo parseBranchedStore( File branchDirectory )
{
File neostoreFile = new File( branchDirectory, NeoStore.DEFAULT_NAME );
long txId = NeoStore.getTxId( new DefaultFileSystemAbstraction(), neostoreFile );
long timestamp = Long.parseLong( branchDirectory.getName() );
return new BranchedStoreInfo( branchDirectory.getName(), txId, timestamp );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_management_BranchedStoreBean.java
|
4,847
|
public class SlaveLockManager implements LockManager
{
private final RequestContextFactory requestContextFactory;
private final LockManagerImpl local;
private final Master master;
private final HaXaDataSourceManager xaDsm;
private final AbstractTransactionManager txManager;
private final RemoteTxHook txHook;
private final AvailabilityGuard availabilityGuard;
private final Configuration config;
public static interface Configuration
{
long getAvailabilityTimeout();
}
public SlaveLockManager( RagManager ragManager, RequestContextFactory requestContextFactory, Master master,
HaXaDataSourceManager xaDsm, AbstractTransactionManager txManager, RemoteTxHook txHook,
AvailabilityGuard availabilityGuard, Configuration config )
{
this.requestContextFactory = requestContextFactory;
this.xaDsm = xaDsm;
this.txManager = txManager;
this.txHook = txHook;
this.availabilityGuard = availabilityGuard;
this.config = config;
this.local = new LockManagerImpl( ragManager );
this.master = master;
}
@Override
public long getDetectedDeadlockCount()
{
return local.getDetectedDeadlockCount();
}
@Override
public void getReadLock( Object resource, Transaction tx ) throws DeadlockDetectedException, IllegalResourceException
{
if ( getReadLockOnMaster( resource ) )
{
if ( !local.tryReadLock( resource, tx ) )
{
throw new LocalDeadlockDetectedException( local, tx, resource, READ );
}
}
}
private boolean getReadLockOnMaster( Object resource )
{
Response<LockResult> response;
if ( resource instanceof Node )
{
makeSureTxHasBeenInitialized();
response = master.acquireNodeReadLock( requestContextFactory.newRequestContext(),
((Node) resource).getId() );
}
else if ( resource instanceof Relationship )
{
makeSureTxHasBeenInitialized();
response = master.acquireRelationshipReadLock( requestContextFactory.newRequestContext(),
((Relationship) resource).getId() );
}
else if ( resource instanceof GraphProperties )
{
makeSureTxHasBeenInitialized();
response = master.acquireGraphReadLock( requestContextFactory.newRequestContext() );
}
else if ( resource instanceof IndexLock )
{
makeSureTxHasBeenInitialized();
IndexLock indexLock = (IndexLock) resource;
response = master.acquireIndexReadLock( requestContextFactory.newRequestContext(), indexLock.getIndex(),
indexLock.getKey() );
}
else
{
return true;
}
return receiveLockResponse( response );
}
private boolean receiveLockResponse( Response<LockResult> response )
{
LockResult result = xaDsm.applyTransactions( response );
switch ( result.getStatus() )
{
case DEAD_LOCKED:
throw new DeadlockDetectedException( result.getDeadlockMessage() );
case NOT_LOCKED:
throw new UnsupportedOperationException();
case OK_LOCKED:
break;
default:
throw new UnsupportedOperationException( result.toString() );
}
return true;
}
@Override
public void getWriteLock( Object resource, Transaction tx ) throws DeadlockDetectedException, IllegalResourceException
{
if ( getWriteLockOnMaster( resource ) )
{
if ( !local.tryWriteLock( resource, tx ) )
{
throw new LocalDeadlockDetectedException( local, tx, resource, WRITE );
}
}
}
@Override
public boolean tryReadLock( Object resource, Transaction tx ) throws LockNotFoundException,
IllegalResourceException
{
throw newUnsupportedDirectTryLockUsageException();
}
@Override
public boolean tryWriteLock( Object resource, Transaction tx ) throws LockNotFoundException,
IllegalResourceException
{
throw newUnsupportedDirectTryLockUsageException();
}
private UnsupportedOperationException newUnsupportedDirectTryLockUsageException()
{
return new UnsupportedOperationException( "At the time of adding \"try lock\" semantics there was no usage of " +
getClass().getSimpleName() + " calling it directly. It was designed to be called on a local " +
LockManager.class.getSimpleName() + " delegated to from within the waiting version" );
}
private boolean getWriteLockOnMaster( Object resource )
{
Response<LockResult> response;
if ( resource instanceof Node )
{
makeSureTxHasBeenInitialized();
response = master.acquireNodeWriteLock( requestContextFactory.newRequestContext(),
((Node) resource).getId() );
}
else if ( resource instanceof Relationship )
{
makeSureTxHasBeenInitialized();
response = master.acquireRelationshipWriteLock( requestContextFactory.newRequestContext(),
((Relationship) resource).getId() );
}
else if ( resource instanceof GraphProperties )
{
makeSureTxHasBeenInitialized();
response = master.acquireGraphWriteLock( requestContextFactory.newRequestContext() );
}
else if ( resource instanceof IndexEntryLock )
{
makeSureTxHasBeenInitialized();
IndexEntryLock lock = (IndexEntryLock) resource;
response = master.acquireIndexEntryWriteLock( requestContextFactory.newRequestContext(),
lock.labelId(), lock.propertyKeyId(), lock.propertyValue() );
}
else if ( resource instanceof IndexLock )
{
makeSureTxHasBeenInitialized();
IndexLock indexLock = (IndexLock) resource;
response = master.acquireIndexWriteLock( requestContextFactory.newRequestContext(), indexLock.getIndex(),
indexLock.getKey() );
}
else
{
throw new IllegalArgumentException("Don't know how to take lock on resource: '" + resource + "'.");
}
return receiveLockResponse( response );
}
@Override
public void releaseReadLock( Object resource, Transaction tx ) throws LockNotFoundException,
IllegalResourceException
{
local.releaseReadLock( resource, tx );
}
@Override
public void releaseWriteLock( Object resource, Transaction tx ) throws LockNotFoundException,
IllegalResourceException
{
local.releaseWriteLock( resource, tx );
}
@Override
public void dumpLocksOnResource( Object resource, Logging logging )
{
local.dumpLocksOnResource( resource, logging );
}
@Override
public List<LockInfo> getAllLocks()
{
return local.getAllLocks();
}
@Override
public List<LockInfo> getAwaitedLocks( long minWaitTime )
{
return local.getAwaitedLocks( minWaitTime );
}
@Override
public void dumpRagStack( Logging logging )
{
local.dumpRagStack( logging );
}
@Override
public void dumpAllLocks( Logging logging )
{
local.dumpAllLocks( logging );
}
private void makeSureTxHasBeenInitialized()
{
if ( !availabilityGuard.isAvailable( config.getAvailabilityTimeout() ) )
{
// TODO Specific exception instead?
throw new RuntimeException( "Timed out waiting for database to allow operations to proceed. "
+ availabilityGuard.describeWhoIsBlocking() );
}
txHook.remotelyInitializeTransaction( txManager.getEventIdentifier(), txManager.getTransactionState() );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_SlaveLockManager.java
|
4,848
|
public abstract class SlavePriorities
{
// Purely a factory.
private SlavePriorities()
{
}
/**
* @return {@link SlavePriority} which returns the slaves in the order that
* they are given in the {@code slaves} array.
*/
public static SlavePriority givenOrder()
{
return new SlavePriority()
{
@Override
public Iterable<Slave> prioritize( Iterable<Slave> slaves )
{
return slaves;
}
};
}
/**
* @return {@link SlavePriority} which returns the slaves in a round robin
* fashion, more precisely the start index in the array increments with
* each {@link SlavePriority#prioritize(Iterable<Slave>) prioritization}, ordered
* by server id in ascending.
*/
public static SlavePriority roundRobin()
{
return new SlavePriority()
{
final AtomicInteger index = new AtomicInteger();
@Override
public Iterable<Slave> prioritize( final Iterable<Slave> slaves )
{
final List<Slave> slaveList = sortSlaves( slaves, true );
return new Iterable<Slave>()
{
@Override
public Iterator<Slave> iterator()
{
return new PrefetchingIterator<Slave>()
{
private int start = index.getAndIncrement()%slaveList.size();
private int count;
@Override
protected Slave fetchNextOrNull()
{
int id = count++;
return id <= slaveList.size() ? slaveList.get( (start+id)%slaveList.size() ) : null;
}
};
}
};
}
};
}
/**
* @return {@link SlavePriority} which returns the slaves in the same fixed order
* sorted by server id in descending order.
*/
public static SlavePriority fixed()
{
return new SlavePriority()
{
@Override
public Iterable<Slave> prioritize( final Iterable<Slave> slaves )
{
return sortSlaves( slaves, false );
}
};
}
private static List<Slave> sortSlaves( final Iterable<Slave> slaves, boolean asc )
{
ArrayList<Slave> slaveList = Iterables.addAll( new ArrayList<Slave>(), slaves );
Collections.sort( slaveList, asc ? SERVER_ID_COMPARATOR : REVERSE_SERVER_ID_COMPARATOR );
return slaveList;
}
private static final Comparator<Slave> SERVER_ID_COMPARATOR = new Comparator<Slave>()
{
public int compare( Slave first, Slave second )
{
return first.getServerId() - second.getServerId();
}
};
private static final Comparator<Slave> REVERSE_SERVER_ID_COMPARATOR = reverseOrder( SERVER_ID_COMPARATOR );
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlavePriorities.java
|
4,849
|
protected static class AcquireLockSerializer implements Serializer
{
private final long[] entities;
AcquireLockSerializer( long... entities )
{
this.entities = entities;
}
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( entities.length );
for ( long entity : entities )
{
buffer.writeLong( entity );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,850
|
{
@Override
public HandshakeResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws
IOException
{
return new HandshakeResult( buffer.readInt(), buffer.readLong(), -1 );
}
}, storeId
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,851
|
{
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( txId );
}
}, new Deserializer<HandshakeResult>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,852
|
{
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( success ? 1 : 0 );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,853
|
{
@Override
@SuppressWarnings("boxing")
public Long read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readLong();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,854
|
{
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
BlockLogBuffer blockLogBuffer = new BlockLogBuffer( buffer, monitor );
txGetter.extract( blockLogBuffer );
blockLogBuffer.done();
}
}, new Deserializer<Long>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,855
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,856
|
{
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,857
|
{
@Override
public IdAllocation read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return readIdAllocation( buffer );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,858
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resourceName );
buffer.writeLong( tx );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,859
|
{
public void write( ChannelBuffer buffer )
throws IOException
{
writeString( buffer, ds );
buffer.writeLong( startTxId );
buffer.writeLong( endTxId );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,860
|
{
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( idType.ordinal() );
}
}, new Deserializer<IdAllocation>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,861
|
public class MasterClient18 extends Client<Master> implements MasterClient
{
/* Version 1 first version
* Version 2 since 2012-01-24
* Version 3 since 2012-02-16
* Version 4 since 2012-07-05 */
public static final byte PROTOCOL_VERSION = 4;
private final long lockReadTimeout;
private final ByteCounterMonitor monitor;
public MasterClient18( String hostNameOrIp, int port, Logging logging, Monitors monitors, StoreId storeId,
long readTimeoutSeconds, long lockReadTimeout, int maxConcurrentChannels, int chunkSize )
{
super( hostNameOrIp, port, logging, monitors, storeId, MasterServer.FRAME_LENGTH, PROTOCOL_VERSION,
readTimeoutSeconds, maxConcurrentChannels, chunkSize );
this.lockReadTimeout = lockReadTimeout;
this.monitor = monitors.newMonitor( ByteCounterMonitor.class, getClass() );
}
@Override
protected long getReadTimeout( RequestType<Master> type, long readTimeout )
{
HaRequestType18 specificType = (HaRequestType18) type;
if ( specificType.isLock() )
{
return lockReadTimeout;
}
if ( specificType == HaRequestType18.COPY_STORE )
{
return readTimeout * 2;
}
return readTimeout;
}
@Override
protected boolean shouldCheckStoreId( RequestType<Master> type )
{
return type != HaRequestType18.COPY_STORE;
}
@Override
public Response<IdAllocation> allocateIds( RequestContext context, final IdType idType )
{
return sendRequest( HaRequestType18.ALLOCATE_IDS, context, new Serializer()
{
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( idType.ordinal() );
}
}, new Deserializer<IdAllocation>()
{
@Override
public IdAllocation read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return readIdAllocation( buffer );
}
}
);
}
@Override
public Response<Integer> createRelationshipType( RequestContext context, final String name )
{
return sendRequest( HaRequestType18.CREATE_RELATIONSHIP_TYPE, context, new Serializer()
{
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
}
);
}
@Override
public Response<Integer> createPropertyKey( RequestContext context, String name )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<Integer> createLabel( RequestContext context, String name )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<Void> initializeTx( RequestContext context )
{
return sendRequest( HaRequestType18.INITIALIZE_TX, context, EMPTY_SERIALIZER, VOID_DESERIALIZER );
}
@Override
public Response<LockResult> acquireNodeWriteLock( RequestContext context, long... nodes )
{
return sendRequest( HaRequestType18.ACQUIRE_NODE_WRITE_LOCK, context,
new AcquireLockSerializer( nodes ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireNodeReadLock( RequestContext context, long... nodes )
{
return sendRequest( HaRequestType18.ACQUIRE_NODE_READ_LOCK, context,
new AcquireLockSerializer( nodes ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireRelationshipWriteLock( RequestContext context,
long... relationships )
{
return sendRequest( HaRequestType18.ACQUIRE_RELATIONSHIP_WRITE_LOCK, context,
new AcquireLockSerializer( relationships ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireRelationshipReadLock( RequestContext context,
long... relationships )
{
return sendRequest( HaRequestType18.ACQUIRE_RELATIONSHIP_READ_LOCK, context,
new AcquireLockSerializer( relationships ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireGraphWriteLock( RequestContext context )
{
return sendRequest( HaRequestType18.ACQUIRE_GRAPH_WRITE_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireGraphReadLock( RequestContext context )
{
return sendRequest( HaRequestType18.ACQUIRE_GRAPH_READ_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexReadLock( RequestContext context, String index, String key )
{
return sendRequest( HaRequestType18.ACQUIRE_INDEX_READ_LOCK, context,
new AcquireIndexLockSerializer( index, key ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexWriteLock( RequestContext context, String index, String key )
{
return sendRequest( HaRequestType18.ACQUIRE_INDEX_WRITE_LOCK, context,
new AcquireIndexLockSerializer( index, key ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireSchemaReadLock( RequestContext context )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<LockResult> acquireSchemaWriteLock( RequestContext context )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<LockResult> acquireIndexEntryWriteLock( RequestContext context, long labelId, long propertyKeyId,
String propertyValue )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<Long> commitSingleResourceTransaction( RequestContext context,
final String resource, final TxExtractor txGetter )
{
return sendRequest( HaRequestType18.COMMIT, context, new Serializer()
{
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
BlockLogBuffer blockLogBuffer = new BlockLogBuffer( buffer, monitor );
txGetter.extract( blockLogBuffer );
blockLogBuffer.done();
}
}, new Deserializer<Long>()
{
@Override
@SuppressWarnings("boxing")
public Long read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readLong();
}
}
);
}
@Override
public Response<Void> finishTransaction( RequestContext context, final boolean success )
{
try
{
return sendRequest( HaRequestType18.FINISH, context, new Serializer()
{
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( success ? 1 : 0 );
}
}, VOID_DESERIALIZER );
}
catch ( TransactionAlreadyActiveException e )
{
if ( !success )
{
/* Here we are in a state where the client failed while the request
* was processing on the server and the tx.finish() in the usual
* try-finally transaction block gets called, only to find that
* the transaction is already active... which is totally expected.
* The fact that the transaction is already active here shouldn't
* hide the original exception on the client, the exception which
* cause the client to fail while the request was processing on the master.
* This is effectively the use case of awaiting a lock that isn't granted
* within the lock read timeout period.
*/
return new Response<>( null, getStoreId(), TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
throw e;
}
}
@Override
public void rollbackOngoingTransactions( RequestContext context )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<Void> pullUpdates( RequestContext context )
{
return sendRequest( HaRequestType18.PULL_UPDATES, context, EMPTY_SERIALIZER, VOID_DESERIALIZER );
}
@Override
public Response<HandshakeResult> handshake( final long txId, StoreId storeId )
{
return sendRequest( HaRequestType18.HANDSHAKE, RequestContext.EMPTY, new Serializer()
{
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( txId );
}
}, new Deserializer<HandshakeResult>()
{
@Override
public HandshakeResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws
IOException
{
return new HandshakeResult( buffer.readInt(), buffer.readLong(), -1 );
}
}, storeId
);
}
@Override
public Response<Void> copyStore( RequestContext context, final StoreWriter writer )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType18.COPY_STORE, context, EMPTY_SERIALIZER,
new Protocol.FileStreamsDeserializer( writer ) );
}
private RequestContext stripFromTransactions( RequestContext context )
{
return new RequestContext( context.getEpoch(), context.machineId(), context.getEventIdentifier(),
new RequestContext.Tx[0], context.getMasterId(), context.getChecksum() );
}
@Override
public Response<Void> copyTransactions( RequestContext context,
final String ds, final long startTxId, final long endTxId )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType18.COPY_TRANSACTIONS, context, new Serializer()
{
public void write( ChannelBuffer buffer )
throws IOException
{
writeString( buffer, ds );
buffer.writeLong( startTxId );
buffer.writeLong( endTxId );
}
}, VOID_DESERIALIZER );
}
@Override
public Response<Void> pushTransaction( RequestContext context, final String resourceName, final long tx )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType18.PUSH_TRANSACTION, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resourceName );
buffer.writeLong( tx );
}
}, VOID_DESERIALIZER );
}
protected static IdAllocation readIdAllocation( ChannelBuffer buffer )
{
int numberOfDefragIds = buffer.readInt();
long[] defragIds = new long[numberOfDefragIds];
for ( int i = 0; i < numberOfDefragIds; i++ )
{
defragIds[i] = buffer.readLong();
}
long rangeStart = buffer.readLong();
int rangeLength = buffer.readInt();
long highId = buffer.readLong();
long defragCount = buffer.readLong();
return new IdAllocation( new IdRange( defragIds, rangeStart, rangeLength ),
highId, defragCount );
}
protected static class AcquireLockSerializer implements Serializer
{
private final long[] entities;
AcquireLockSerializer( long... entities )
{
this.entities = entities;
}
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( entities.length );
for ( long entity : entities )
{
buffer.writeLong( entity );
}
}
}
protected static class AcquireIndexLockSerializer implements Serializer
{
private final String index;
private final String key;
AcquireIndexLockSerializer( String index, String key )
{
this.index = index;
this.key = key;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, index );
writeString( buffer, key );
}
}
public static abstract class AquireLockCall implements TargetCaller<Master, LockResult>
{
@Override
public Response<LockResult> call( Master master, RequestContext context,
ChannelBuffer input, ChannelBuffer target )
{
long[] ids = new long[input.readInt()];
for ( int i = 0; i < ids.length; i++ )
{
ids[i] = input.readLong();
}
return lock( master, context, ids );
}
protected abstract Response<LockResult> lock( Master master, RequestContext context, long... ids );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,862
|
{
public LockResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
LockStatus status = LockStatus.values()[buffer.readByte()];
return status.hasMessage() ? new LockResult( readString( buffer ) ) : new LockResult( status );
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient.java
|
4,863
|
{
public void write( LockResult responseObject, ChannelBuffer result ) throws IOException
{
result.writeByte( responseObject.getStatus().ordinal() );
if ( responseObject.getStatus().hasMessage() )
{
writeString( result, responseObject.getDeadlockMessage() );
}
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient.java
|
4,864
|
{
public int compare( Slave first, Slave second )
{
return first.getServerId() - second.getServerId();
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlavePriorities.java
|
4,865
|
{
@Override
public Iterable<Slave> prioritize( final Iterable<Slave> slaves )
{
return sortSlaves( slaves, false );
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlavePriorities.java
|
4,866
|
{
private int start = index.getAndIncrement()%slaveList.size();
private int count;
@Override
protected Slave fetchNextOrNull()
{
int id = count++;
return id <= slaveList.size() ? slaveList.get( (start+id)%slaveList.size() ) : null;
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlavePriorities.java
|
4,867
|
{
@Override
public Iterator<Slave> iterator()
{
return new PrefetchingIterator<Slave>()
{
private int start = index.getAndIncrement()%slaveList.size();
private int count;
@Override
protected Slave fetchNextOrNull()
{
int id = count++;
return id <= slaveList.size() ? slaveList.get( (start+id)%slaveList.size() ) : null;
}
};
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlavePriorities.java
|
4,868
|
{
final AtomicInteger index = new AtomicInteger();
@Override
public Iterable<Slave> prioritize( final Iterable<Slave> slaves )
{
final List<Slave> slaveList = sortSlaves( slaves, true );
return new Iterable<Slave>()
{
@Override
public Iterator<Slave> iterator()
{
return new PrefetchingIterator<Slave>()
{
private int start = index.getAndIncrement()%slaveList.size();
private int count;
@Override
protected Slave fetchNextOrNull()
{
int id = count++;
return id <= slaveList.size() ? slaveList.get( (start+id)%slaveList.size() ) : null;
}
};
}
};
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlavePriorities.java
|
4,869
|
protected static class AcquireIndexLockSerializer implements Serializer
{
private final String index;
private final String key;
AcquireIndexLockSerializer( String index, String key )
{
this.index = index;
this.key = key;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, index );
writeString( buffer, key );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,870
|
public static abstract class AquireLockCall implements TargetCaller<Master, LockResult>
{
@Override
public Response<LockResult> call( Master master, RequestContext context,
ChannelBuffer input, ChannelBuffer target )
{
long[] ids = new long[input.readInt()];
for ( int i = 0; i < ids.length; i++ )
{
ids[i] = input.readLong();
}
return lock( master, context, ids );
}
protected abstract Response<LockResult> lock( Master master, RequestContext context, long... ids );
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClient18.java
|
4,871
|
public class LockableRelationship implements Relationship
{
private final long id;
public LockableRelationship( long id )
{
this.id = id;
}
public void delete()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Node getEndNode()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public long getId()
{
return this.id;
}
public GraphDatabaseService getGraphDatabase()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Node[] getNodes()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Node getOtherNode( Node node )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Object getProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Object getProperty( String key, Object defaultValue )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Iterable<String> getPropertyKeys()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Node getStartNode()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public RelationshipType getType()
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public boolean isType( RelationshipType type )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public boolean hasProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public Object removeProperty( String key )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public void setProperty( String key, Object value )
{
throw new UnsupportedOperationException( "Lockable rel" );
}
public boolean equals( Object o )
{
if ( !(o instanceof Relationship) )
{
return false;
}
return this.getId() == ((Relationship) o).getId();
}
public int hashCode()
{
return (int) (( id >>> 32 ) ^ id );
}
public String toString()
{
return "Lockable relationship #" + this.getId();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LockableRelationship.java
|
4,872
|
public class MasterClientResolver implements MasterClientFactory, MismatchingVersionHandler
{
private volatile MasterClientFactory currentFactory;
private volatile ProtocolVersionCombo currentVersion;
private boolean downgradeForbidden = false;
@Override
public MasterClient instantiate( String hostNameOrIp, int port, Monitors monitors, StoreId storeId, LifeSupport life )
{
if ( currentFactory == null )
{
assignDefaultFactory();
}
MasterClient result = currentFactory.instantiate( hostNameOrIp, port, monitors, storeId, life );
result.addMismatchingVersionHandler( this );
return result;
}
@Override
public void versionMismatched( int expected, int received )
{
getFor( received, 2 );
}
private static final class ProtocolVersionCombo implements Comparable<ProtocolVersionCombo>
{
final int applicationProtocol;
final int internalProtocol;
ProtocolVersionCombo( int applicationProtocol, int internalProtocol )
{
this.applicationProtocol = applicationProtocol;
this.internalProtocol = internalProtocol;
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( obj.getClass() != ProtocolVersionCombo.class )
{
return false;
}
ProtocolVersionCombo other = (ProtocolVersionCombo) obj;
return other.applicationProtocol == applicationProtocol && other.internalProtocol == internalProtocol;
}
@Override
public int hashCode()
{
return ( 31 * applicationProtocol ) | internalProtocol;
}
@Override
public int compareTo( ProtocolVersionCombo o )
{
return ( applicationProtocol < o.applicationProtocol ? -1
: ( applicationProtocol == o.applicationProtocol ? 0 : 1 ) );
}
/* Legacy version combos:
* static final ProtocolVersionCombo PC_153 = new ProtocolVersionCombo( MasterClient153.PROTOCOL_VERSION, 2 );
* static final ProtocolVersionCombo PC_17 = new ProtocolVersionCombo( MasterClient17.PROTOCOL_VERSION, 2 );
* static final ProtocolVersionCombo PC_18 = new ProtocolVersionCombo( MasterClient18.PROTOCOL_VERSION, 2 ); */
static final ProtocolVersionCombo PC_20 = new ProtocolVersionCombo( MasterClient20.PROTOCOL_VERSION, 2 );
static final ProtocolVersionCombo PC_201 = new ProtocolVersionCombo( MasterClient201.PROTOCOL_VERSION, 2 );
}
private final Map<ProtocolVersionCombo, MasterClientFactory> protocolToFactoryMapping;
public MasterClientResolver( Logging logging, int readTimeout, int lockReadTimeout, int channels,
int chunkSize )
{
protocolToFactoryMapping = new HashMap<ProtocolVersionCombo, MasterClientFactory>();
/* Legacy version combos:
* protocolToFactoryMapping.put( ProtocolVersionCombo.PC_153, new F153( logging, readTimeout, lockReadTimeout,
* channels, chunkSize ) );
* protocolToFactoryMapping.put( ProtocolVersionCombo.PC_17, new F17( logging, readTimeout, lockReadTimeout,
* channels, chunkSize ) );
* protocolToFactoryMapping.put( ProtocolVersionCombo.PC_18, new F18( logging, readTimeout, lockReadTimeout,
* channels, chunkSize ) ); */
protocolToFactoryMapping.put( ProtocolVersionCombo.PC_20, new F20( logging, readTimeout, lockReadTimeout,
channels, chunkSize ) );
protocolToFactoryMapping.put( ProtocolVersionCombo.PC_201, new F201( logging, readTimeout, lockReadTimeout,
channels, chunkSize ) );
}
public MasterClientFactory getFor( int applicationProtocol, int internalProtocol )
{
ProtocolVersionCombo incomingCombo = new ProtocolVersionCombo( applicationProtocol, internalProtocol );
MasterClientFactory candidate = protocolToFactoryMapping.get( incomingCombo );
/*
* Things that can happen here regarding replacing the current factory, in order:
* 1. We do not know the protocol - candidate is null: We don't change the current factory
* 2. The current factory is null: We always set it to the latest requested
* 3. We receive a version newer than the current one: Always replace the current factory
* 4. We receive a version older than the current: Replace if downgrades are allowed, else leave as is.
*/
if ( ( candidate != null )
&& ( currentVersion == null || !downgradeForbidden || currentVersion.compareTo( incomingCombo ) <= 0 ) )
{
currentFactory = candidate;
currentVersion = incomingCombo;
}
return candidate;
}
public MasterClientFactory assignDefaultFactory()
{
return getFor( ProtocolVersionCombo.PC_201.applicationProtocol, ProtocolVersionCombo.PC_201.internalProtocol );
}
protected static abstract class StaticMasterClientFactory implements MasterClientFactory
{
protected final Logging logging;
protected final int readTimeoutSeconds;
protected final int lockReadTimeout;
protected final int maxConcurrentChannels;
protected final int chunkSize;
StaticMasterClientFactory( Logging logging, int readTimeoutSeconds, int lockReadTimeout,
int maxConcurrentChannels, int chunkSize )
{
this.logging = logging;
this.readTimeoutSeconds = readTimeoutSeconds;
this.lockReadTimeout = lockReadTimeout;
this.maxConcurrentChannels = maxConcurrentChannels;
this.chunkSize = chunkSize;
}
}
public static final class F20 extends StaticMasterClientFactory
{
public F20( Logging logging, int readTimeoutSeconds, int lockReadTimeout, int maxConcurrentChannels,
int chunkSize )
{
super( logging, readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize );
}
@Override
public MasterClient instantiate( String hostNameOrIp, int port, Monitors monitors, StoreId storeId, LifeSupport life )
{
return life.add( new MasterClient20( hostNameOrIp, port, logging, monitors, storeId,
readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize ) );
}
}
public static final class F201 extends StaticMasterClientFactory
{
public F201( Logging logging, int readTimeoutSeconds, int lockReadTimeout, int maxConcurrentChannels,
int chunkSize )
{
super( logging, readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize );
}
@Override
public MasterClient instantiate( String hostNameOrIp, int port, Monitors monitors, StoreId storeId, LifeSupport life )
{
return life.add( new MasterClient201( hostNameOrIp, port, logging, monitors, storeId,
readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize ) );
}
}
public void enableDowngradeBarrier()
{
downgradeForbidden = true;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClientResolver.java
|
4,873
|
public class LockableNode implements Node
{
private final long id;
public LockableNode( long id )
{
this.id = id;
}
@Override
public void delete()
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public long getId()
{
return this.id;
}
@Override
public GraphDatabaseService getGraphDatabase()
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public Object getProperty( String key )
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public Object getProperty( String key, Object defaultValue )
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public Iterable<String> getPropertyKeys()
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public boolean hasProperty( String key )
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public Object removeProperty( String key )
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public void setProperty( String key, Object value )
{
throw new UnsupportedOperationException( "Lockable node" );
}
@Override
public boolean equals( Object o )
{
if ( !(o instanceof Node) )
{
return false;
}
return this.getId() == ((Node) o).getId();
}
@Override
public int hashCode()
{
return (int) (( id >>> 32 ) ^ id );
}
@Override
public String toString()
{
return "Lockable node #" + this.getId();
}
private UnsupportedOperationException lockableNodeException()
{
return new UnsupportedOperationException( "Lockable node" );
}
@Override
public Relationship createRelationshipTo( Node otherNode,
RelationshipType type )
{
throw lockableNodeException();
}
@Override
public Iterable<Relationship> getRelationships()
{
throw lockableNodeException();
}
@Override
public Iterable<Relationship> getRelationships( RelationshipType... types )
{
throw lockableNodeException();
}
@Override
public Iterable<Relationship> getRelationships( Direction direction, RelationshipType... types )
{
throw lockableNodeException();
}
@Override
public Iterable<Relationship> getRelationships( Direction dir )
{
throw lockableNodeException();
}
@Override
public Iterable<Relationship> getRelationships( RelationshipType type,
Direction dir )
{
throw lockableNodeException();
}
@Override
public Relationship getSingleRelationship( RelationshipType type,
Direction dir )
{
throw lockableNodeException();
}
@Override
public boolean hasRelationship()
{
throw lockableNodeException();
}
@Override
public boolean hasRelationship( RelationshipType... types )
{
throw lockableNodeException();
}
@Override
public boolean hasRelationship( Direction direction, RelationshipType... types )
{
throw lockableNodeException();
}
@Override
public boolean hasRelationship( Direction dir )
{
throw lockableNodeException();
}
@Override
public boolean hasRelationship( RelationshipType type, Direction dir )
{
throw lockableNodeException();
}
@Override
public Traverser traverse( Order traversalOrder,
StopEvaluator stopEvaluator,
ReturnableEvaluator returnableEvaluator,
RelationshipType relationshipType, Direction direction )
{
throw lockableNodeException();
}
@Override
public Traverser traverse( Order traversalOrder,
StopEvaluator stopEvaluator,
ReturnableEvaluator returnableEvaluator,
RelationshipType firstRelationshipType, Direction firstDirection,
RelationshipType secondRelationshipType, Direction secondDirection )
{
throw lockableNodeException();
}
@Override
public Traverser traverse( Order traversalOrder,
StopEvaluator stopEvaluator,
ReturnableEvaluator returnableEvaluator,
Object... relationshipTypesAndDirections )
{
throw lockableNodeException();
}
@Override
public void addLabel( Label label )
{
throw lockableNodeException();
}
@Override
public boolean hasLabel( Label label )
{
throw lockableNodeException();
}
@Override
public ResourceIterable<Label> getLabels()
{
throw lockableNodeException();
}
@Override
public void removeLabel( Label label )
{
throw lockableNodeException();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LockableNode.java
|
4,874
|
DEAD_LOCKED
{
@Override
public boolean hasMessage()
{
return true;
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LockStatus.java
|
4,875
|
public class LockResult
{
private final LockStatus status;
private final String deadlockMessage;
public LockResult( LockStatus status )
{
this.status = status;
this.deadlockMessage = null;
}
public LockResult( String deadlockMessage )
{
this.status = LockStatus.DEAD_LOCKED;
this.deadlockMessage = deadlockMessage;
}
public LockStatus getStatus()
{
return status;
}
public String getDeadlockMessage()
{
return deadlockMessage;
}
@Override
public String toString()
{
return "LockResult[" + status + ", " + deadlockMessage + "]";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LockResult.java
|
4,876
|
{
@Override
public long getAvailabilityTimeout()
{
return config.get( HaSettings.lock_read_timeout );
}
});
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LockManagerModeSwitcher.java
|
4,877
|
public class LockManagerModeSwitcher extends AbstractModeSwitcher<LockManager>
{
private final HaXaDataSourceManager xaDsm;
private final DelegateInvocationHandler<Master> master;
private final RequestContextFactory requestContextFactory;
private final AbstractTransactionManager txManager;
private final RemoteTxHook remoteTxHook;
private final AvailabilityGuard availabilityGuard;
private final Config config;
public LockManagerModeSwitcher( HighAvailabilityMemberStateMachine stateMachine,
DelegateInvocationHandler<LockManager> delegate,
HaXaDataSourceManager xaDsm, DelegateInvocationHandler<Master> master,
RequestContextFactory requestContextFactory, AbstractTransactionManager txManager,
RemoteTxHook remoteTxHook, AvailabilityGuard availabilityGuard, Config config )
{
super( stateMachine, delegate );
this.xaDsm = xaDsm;
this.master = master;
this.requestContextFactory = requestContextFactory;
this.txManager = txManager;
this.remoteTxHook = remoteTxHook;
this.availabilityGuard = availabilityGuard;
this.config = config;
}
@Override
protected LockManager getMasterImpl()
{
return new LockManagerImpl( new RagManager() );
}
@Override
protected LockManager getSlaveImpl( URI serverHaUri )
{
return new SlaveLockManager( new RagManager(), requestContextFactory, master.cement(), xaDsm, txManager,
remoteTxHook, availabilityGuard, new SlaveLockManager.Configuration()
{
@Override
public long getAvailabilityTimeout()
{
return config.get( HaSettings.lock_read_timeout );
}
});
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LockManagerModeSwitcher.java
|
4,878
|
public class LocalDeadlockDetectedException extends DeadlockDetectedException
{
public LocalDeadlockDetectedException( LockManager lockManager, Transaction tx, Object resource,
LockType type )
{
super( constructHelpfulDiagnosticsMessage( lockManager, tx, resource, type ) );
}
private static String constructHelpfulDiagnosticsMessage( LockManager lockManager,
Transaction tx, Object resource, LockType type )
{
StringBuilder builder = new StringBuilder( format(
"%s tried to apply local %s lock on %s after acquired on master. Currently these locks exist:%n",
tx, type, resource ) );
for ( LockInfo lock : lockManager.getAllLocks() )
{
if ( lock.getReadCount() > 0 || lock.getWriteCount() > 0 )
{
builder.append( format( lock.toString() /*lock.toString includes a %n at the end*/ ) );
}
}
return builder.toString();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_lock_LocalDeadlockDetectedException.java
|
4,879
|
public final class IdAllocation
{
private final IdRange idRange;
private final long highestIdInUse;
private final long defragCount;
public IdAllocation( IdRange idRange, long highestIdInUse, long defragCount )
{
this.idRange = idRange;
this.highestIdInUse = highestIdInUse;
this.defragCount = defragCount;
}
public long getHighestIdInUse()
{
return highestIdInUse;
}
public long getDefragCount()
{
return defragCount;
}
public IdRange getIdRange()
{
return this.idRange;
}
@Override
public String toString()
{
return "IdAllocation[" + idRange + ", " + highestIdInUse + ", " + defragCount + "]";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_id_IdAllocation.java
|
4,880
|
public class HaIdGeneratorFactoryTest
{
@Test
public void slaveIdGeneratorShouldReturnFromAssignedRange() throws Exception
{
// GIVEN
IdAllocation firstResult = new IdAllocation( new IdRange( new long[]{}, 42, 123 ), 123, 0 );
Response<IdAllocation> response = response( firstResult );
when( master.allocateIds( any( RequestContext.class ), any( IdType.class ) ) ).thenReturn( response );
// WHEN
IdGenerator gen = switchToSlave();
// THEN
for ( long i = firstResult.getIdRange().getRangeStart(); i < firstResult.getIdRange().getRangeLength(); i++ )
{
assertEquals(i, gen.nextId());
}
verify( master, times( 1 ) ).allocateIds( any( RequestContext.class ), eq( IdType.NODE ) );
}
@Test
public void slaveIdGeneratorShouldAskForMoreWhenRangeIsOver() throws Exception
{
// GIVEN
IdAllocation firstResult = new IdAllocation( new IdRange( new long[]{}, 42, 123 ), 42 + 123, 0 );
IdAllocation secondResult = new IdAllocation( new IdRange( new long[]{}, 1042, 223 ), 1042 + 223, 0 );
Response<IdAllocation> response = response( firstResult, secondResult );
when( master.allocateIds( any( RequestContext.class ), any( IdType.class ) ) ).thenReturn( response );
// WHEN
IdGenerator gen = switchToSlave();
// THEN
long startAt = firstResult.getIdRange().getRangeStart();
long forThatMany = firstResult.getIdRange().getRangeLength();
for ( long i =startAt ; i < startAt + forThatMany; i++ )
{
assertEquals(i, gen.nextId());
}
verify( master, times( 1 ) ).allocateIds( any( RequestContext.class ), eq( IdType.NODE ) );
startAt = secondResult.getIdRange().getRangeStart();
forThatMany = secondResult.getIdRange().getRangeLength();
for ( long i =startAt ; i < startAt + forThatMany; i++ )
{
assertEquals(i, gen.nextId());
}
verify( master, times( 2 ) ).allocateIds( any( RequestContext.class ), eq( IdType.NODE ) );
}
@Test
public void shouldUseDefraggedIfPresent() throws Exception
{
// GIVEN
long[] defragIds = {42, 27172828, 314159};
IdAllocation firstResult = new IdAllocation( new IdRange( defragIds, 0, 0 ), 0, defragIds.length );
Response<IdAllocation> response = response( firstResult );
when( master.allocateIds( any( RequestContext.class ), any( IdType.class ) ) ).thenReturn( response );
// WHEN
IdGenerator gen = switchToSlave();
// THEN
for ( int i = 0; i < defragIds.length; i++ )
{
assertEquals(defragIds[i], gen.nextId() );
}
}
@Test
public void shouldMoveFromDefraggedToRange() throws Exception
{
// GIVEN
long[] defragIds = {42, 27172828, 314159};
IdAllocation firstResult = new IdAllocation( new IdRange( defragIds, 0, 10 ), 100, defragIds.length );
Response<IdAllocation> response = response( firstResult );
when( master.allocateIds( any( RequestContext.class ), any( IdType.class ) ) ).thenReturn( response );
// WHEN
IdGenerator gen = switchToSlave();
// THEN
for ( int i = 0; i < defragIds.length; i++ )
{
assertEquals(defragIds[i], gen.nextId() );
}
}
@Test
public void slaveShouldNeverAllowReducingHighId() throws Exception
{
// GIVEN
final int highIdFromAllocation = 123;
IdAllocation firstResult = new IdAllocation( new IdRange( new long[] {}, 42, highIdFromAllocation ),
highIdFromAllocation, 0 );
Response<IdAllocation> response = response( firstResult );
when( master.allocateIds( any( RequestContext.class ), any( IdType.class ) ) ).thenReturn( response );
// WHEN
IdGenerator gen = switchToSlave();
final int highIdFromUpdatedRecord = highIdFromAllocation + 1;
gen.setHighId( highIdFromUpdatedRecord ); // Assume this is from a received transaction
gen.nextId(); // that will ask the master for an IdRange
// THEN
assertEquals ( highIdFromUpdatedRecord, gen.getHighId() );
}
private Master master;
private DelegateInvocationHandler<Master> masterDelegate;
private EphemeralFileSystemAbstraction fs;
private HaIdGeneratorFactory fac;
@Before
public void before()
{
master = mock( Master.class );
masterDelegate = new DelegateInvocationHandler<>( Master.class );
fs = new EphemeralFileSystemAbstraction();
fac = new HaIdGeneratorFactory( masterDelegate, new DevNullLoggingService(),
mock( RequestContextFactory.class ) );
}
@SuppressWarnings( "unchecked" )
private Response<IdAllocation> response( IdAllocation firstValue, IdAllocation... additionalValues )
{
Response<IdAllocation> response = mock( Response.class );
when( response.response() ).thenReturn( firstValue, additionalValues );
return response;
}
private IdGenerator switchToSlave()
{
fac.switchToSlave();
IdGenerator gen = fac.open( fs, new File( "someFile" ), 10, IdType.NODE, 1 );
masterDelegate.setDelegate( master );
return gen;
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_id_HaIdGeneratorFactoryTest.java
|
4,881
|
private static class SlaveIdGenerator implements IdGenerator
{
private volatile long highestIdInUse;
private volatile long defragCount;
private volatile IdRangeIterator idQueue = EMPTY_ID_RANGE_ITERATOR;
private final Master master;
private final IdType idType;
private final StringLogger logger;
private final RequestContextFactory requestContextFactory;
SlaveIdGenerator( IdType idType, long highId, Master master, StringLogger logger,
RequestContextFactory requestContextFactory )
{
this.idType = idType;
this.highestIdInUse = highId;
this.master = master;
this.logger = logger;
this.requestContextFactory = requestContextFactory;
}
@Override
public void close()
{
}
@Override
public void freeId( long id )
{
}
@Override
public long getHighId()
{
return highestIdInUse;
}
@Override
public long getNumberOfIdsInUse()
{
return highestIdInUse - defragCount;
}
@Override
public synchronized long nextId()
{
long nextId = nextLocalId();
if ( nextId == VALUE_REPRESENTING_NULL )
{
// If we don't have anymore grabbed ids from master, grab a bunch
try ( Response<IdAllocation> response =
master.allocateIds( requestContextFactory.newRequestContext(), idType ) )
{
IdAllocation allocation = response.response();
logger.info( "Received id allocation " + allocation + " from master " + master + " for " + idType );
nextId = storeLocally( allocation );
}
}
return nextId;
}
@Override
public IdRange nextIdBatch( int size )
{
throw new UnsupportedOperationException( "Should never be called" );
}
private long storeLocally( IdAllocation allocation )
{
setHighId( allocation.getHighestIdInUse() );
this.defragCount = allocation.getDefragCount();
this.idQueue = new IdRangeIterator( allocation.getIdRange() );
return idQueue.next();
}
private long nextLocalId()
{
return this.idQueue.next();
}
@Override
public void setHighId( long id )
{
this.highestIdInUse = Math.max( this.highestIdInUse, id );
}
@Override
public long getDefragCount()
{
return this.defragCount;
}
@Override
public void delete()
{
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + this.idQueue + "]";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_id_HaIdGeneratorFactory.java
|
4,882
|
private static class IdRangeIterator
{
private int position = 0;
private final long[] defrag;
private final long start;
private final int length;
IdRangeIterator( IdRange idRange )
{
this.defrag = idRange.getDefragIds();
this.start = idRange.getRangeStart();
this.length = idRange.getRangeLength();
}
long next()
{
try
{
if ( position < defrag.length )
{
return defrag[position];
}
else
{
int offset = position - defrag.length;
return (offset < length) ? (start + offset) : VALUE_REPRESENTING_NULL;
}
}
finally
{
++position;
}
}
@Override
public String toString()
{
return "IdRangeIterator[start:" + start + ", length:" + length + ", position:" + position + ", defrag:" + Arrays.toString( defrag ) + "]";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_id_HaIdGeneratorFactory.java
|
4,883
|
private class HaIdGenerator implements IdGenerator
{
private volatile IdGenerator delegate;
private final FileSystemAbstraction fs;
private final File fileName;
private final int grabSize;
private final IdType idType;
private volatile IdGeneratorState state;
HaIdGenerator( IdGenerator initialDelegate, FileSystemAbstraction fs, File fileName, int grabSize,
IdType idType, IdGeneratorState initialState )
{
delegate = initialDelegate;
this.fs = fs;
this.fileName = fileName;
this.grabSize = grabSize;
this.idType = idType;
this.state = initialState;
logger.debug( "Instantiated HaIdGenerator for " + initialDelegate + " " + idType + ", " + initialState );
}
private void switchToSlave( Master master )
{
long highId = delegate.getHighId();
delegate.close();
delegate = new SlaveIdGenerator( idType, highId, master, logger, requestContextFactory );
logger.debug( "Instantiated slave delegate " + delegate + " of type " + idType + " with highid " + highId );
state = IdGeneratorState.SLAVE;
}
private void switchToMaster()
{
if ( state == IdGeneratorState.SLAVE )
{
long highId = delegate.getHighId();
delegate.close();
if ( fs.fileExists( fileName ) )
{
fs.deleteFile( fileName );
}
localFactory.create( fs, fileName, highId );
delegate = localFactory.open( fs, fileName, grabSize, idType, highId );
logger.debug( "Instantiated master delegate " + delegate + " of type " + idType + " with highid " + highId );
}
else
{
logger.debug( "Keeps " + delegate );
}
state = IdGeneratorState.MASTER;
}
@Override
public String toString()
{
return delegate.toString();
}
@Override
public final boolean equals( Object other )
{
return delegate.equals( other );
}
@Override
public final int hashCode()
{
return delegate.hashCode();
}
@Override
public long nextId()
{
if ( state == IdGeneratorState.PENDING )
{
throw new IllegalStateException( state.name() );
}
long result = delegate.nextId();
return result;
}
@Override
public IdRange nextIdBatch( int size )
{
if ( state == IdGeneratorState.PENDING )
{
throw new IllegalStateException( state.name() );
}
return delegate.nextIdBatch( size );
}
@Override
public void setHighId( long id )
{
delegate.setHighId( id );
}
@Override
public long getHighId()
{
return delegate.getHighId();
}
@Override
public void freeId( long id )
{
delegate.freeId( id );
}
@Override
public void close()
{
delegate.close();
}
@Override
public long getNumberOfIdsInUse()
{
return delegate.getNumberOfIdsInUse();
}
@Override
public long getDefragCount()
{
return delegate.getDefragCount();
}
@Override
public void delete()
{
delegate.delete();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_id_HaIdGeneratorFactory.java
|
4,884
|
{
@Override
long next()
{
return VALUE_REPRESENTING_NULL;
}
;
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_id_HaIdGeneratorFactory.java
|
4,885
|
public class HaIdGeneratorFactory implements IdGeneratorFactory
{
private final Map<IdType, HaIdGenerator> generators =
new EnumMap<IdType, HaIdGenerator>( IdType.class );
private final IdGeneratorFactory localFactory = new DefaultIdGeneratorFactory();
private final DelegateInvocationHandler<Master> master;
private final StringLogger logger;
private final RequestContextFactory requestContextFactory;
private IdGeneratorState globalState = IdGeneratorState.PENDING;
public HaIdGeneratorFactory( DelegateInvocationHandler<Master> master, Logging logging,
RequestContextFactory requestContextFactory )
{
this.master = master;
this.logger = logging.getMessagesLog( getClass() );
this.requestContextFactory = requestContextFactory;
}
@Override
public IdGenerator open( FileSystemAbstraction fs, File fileName, int grabSize, IdType idType, long highId )
{
HaIdGenerator previous = generators.remove( idType );
if ( previous != null )
{
previous.close();
}
IdGenerator initialIdGenerator;
switch ( globalState )
{
case MASTER:
initialIdGenerator = localFactory.open( fs, fileName, grabSize, idType, highId );
break;
case SLAVE:
initialIdGenerator = new SlaveIdGenerator( idType, highId, master.cement(), logger, requestContextFactory );
break;
default:
throw new IllegalStateException( globalState.name() );
}
HaIdGenerator haIdGenerator = new HaIdGenerator( initialIdGenerator, fs, fileName, grabSize, idType, globalState );
generators.put( idType, haIdGenerator );
return haIdGenerator;
}
@Override
public void create( FileSystemAbstraction fs, File fileName, long highId )
{
localFactory.create( fs, fileName, highId );
}
@Override
public IdGenerator get( IdType idType )
{
return generators.get( idType );
}
public void switchToMaster()
{
globalState = IdGeneratorState.MASTER;
for ( HaIdGenerator generator : generators.values() )
{
generator.switchToMaster();
}
}
public void switchToSlave()
{
globalState = IdGeneratorState.SLAVE;
for ( HaIdGenerator generator : generators.values() )
{
generator.switchToSlave( master.cement() );
}
}
private static final long VALUE_REPRESENTING_NULL = -1;
private enum IdGeneratorState
{
PENDING, SLAVE, MASTER;
}
private class HaIdGenerator implements IdGenerator
{
private volatile IdGenerator delegate;
private final FileSystemAbstraction fs;
private final File fileName;
private final int grabSize;
private final IdType idType;
private volatile IdGeneratorState state;
HaIdGenerator( IdGenerator initialDelegate, FileSystemAbstraction fs, File fileName, int grabSize,
IdType idType, IdGeneratorState initialState )
{
delegate = initialDelegate;
this.fs = fs;
this.fileName = fileName;
this.grabSize = grabSize;
this.idType = idType;
this.state = initialState;
logger.debug( "Instantiated HaIdGenerator for " + initialDelegate + " " + idType + ", " + initialState );
}
private void switchToSlave( Master master )
{
long highId = delegate.getHighId();
delegate.close();
delegate = new SlaveIdGenerator( idType, highId, master, logger, requestContextFactory );
logger.debug( "Instantiated slave delegate " + delegate + " of type " + idType + " with highid " + highId );
state = IdGeneratorState.SLAVE;
}
private void switchToMaster()
{
if ( state == IdGeneratorState.SLAVE )
{
long highId = delegate.getHighId();
delegate.close();
if ( fs.fileExists( fileName ) )
{
fs.deleteFile( fileName );
}
localFactory.create( fs, fileName, highId );
delegate = localFactory.open( fs, fileName, grabSize, idType, highId );
logger.debug( "Instantiated master delegate " + delegate + " of type " + idType + " with highid " + highId );
}
else
{
logger.debug( "Keeps " + delegate );
}
state = IdGeneratorState.MASTER;
}
@Override
public String toString()
{
return delegate.toString();
}
@Override
public final boolean equals( Object other )
{
return delegate.equals( other );
}
@Override
public final int hashCode()
{
return delegate.hashCode();
}
@Override
public long nextId()
{
if ( state == IdGeneratorState.PENDING )
{
throw new IllegalStateException( state.name() );
}
long result = delegate.nextId();
return result;
}
@Override
public IdRange nextIdBatch( int size )
{
if ( state == IdGeneratorState.PENDING )
{
throw new IllegalStateException( state.name() );
}
return delegate.nextIdBatch( size );
}
@Override
public void setHighId( long id )
{
delegate.setHighId( id );
}
@Override
public long getHighId()
{
return delegate.getHighId();
}
@Override
public void freeId( long id )
{
delegate.freeId( id );
}
@Override
public void close()
{
delegate.close();
}
@Override
public long getNumberOfIdsInUse()
{
return delegate.getNumberOfIdsInUse();
}
@Override
public long getDefragCount()
{
return delegate.getDefragCount();
}
@Override
public void delete()
{
delegate.delete();
}
}
private static class SlaveIdGenerator implements IdGenerator
{
private volatile long highestIdInUse;
private volatile long defragCount;
private volatile IdRangeIterator idQueue = EMPTY_ID_RANGE_ITERATOR;
private final Master master;
private final IdType idType;
private final StringLogger logger;
private final RequestContextFactory requestContextFactory;
SlaveIdGenerator( IdType idType, long highId, Master master, StringLogger logger,
RequestContextFactory requestContextFactory )
{
this.idType = idType;
this.highestIdInUse = highId;
this.master = master;
this.logger = logger;
this.requestContextFactory = requestContextFactory;
}
@Override
public void close()
{
}
@Override
public void freeId( long id )
{
}
@Override
public long getHighId()
{
return highestIdInUse;
}
@Override
public long getNumberOfIdsInUse()
{
return highestIdInUse - defragCount;
}
@Override
public synchronized long nextId()
{
long nextId = nextLocalId();
if ( nextId == VALUE_REPRESENTING_NULL )
{
// If we don't have anymore grabbed ids from master, grab a bunch
try ( Response<IdAllocation> response =
master.allocateIds( requestContextFactory.newRequestContext(), idType ) )
{
IdAllocation allocation = response.response();
logger.info( "Received id allocation " + allocation + " from master " + master + " for " + idType );
nextId = storeLocally( allocation );
}
}
return nextId;
}
@Override
public IdRange nextIdBatch( int size )
{
throw new UnsupportedOperationException( "Should never be called" );
}
private long storeLocally( IdAllocation allocation )
{
setHighId( allocation.getHighestIdInUse() );
this.defragCount = allocation.getDefragCount();
this.idQueue = new IdRangeIterator( allocation.getIdRange() );
return idQueue.next();
}
private long nextLocalId()
{
return this.idQueue.next();
}
@Override
public void setHighId( long id )
{
this.highestIdInUse = Math.max( this.highestIdInUse, id );
}
@Override
public long getDefragCount()
{
return this.defragCount;
}
@Override
public void delete()
{
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[" + this.idQueue + "]";
}
}
private static class IdRangeIterator
{
private int position = 0;
private final long[] defrag;
private final long start;
private final int length;
IdRangeIterator( IdRange idRange )
{
this.defrag = idRange.getDefragIds();
this.start = idRange.getRangeStart();
this.length = idRange.getRangeLength();
}
long next()
{
try
{
if ( position < defrag.length )
{
return defrag[position];
}
else
{
int offset = position - defrag.length;
return (offset < length) ? (start + offset) : VALUE_REPRESENTING_NULL;
}
}
finally
{
++position;
}
}
@Override
public String toString()
{
return "IdRangeIterator[start:" + start + ", length:" + length + ", position:" + position + ", defrag:" + Arrays.toString( defrag ) + "]";
}
}
private static IdRangeIterator EMPTY_ID_RANGE_ITERATOR =
new IdRangeIterator( new IdRange( new long[0], 0, 0 ) )
{
@Override
long next()
{
return VALUE_REPRESENTING_NULL;
}
;
};
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_id_HaIdGeneratorFactory.java
|
4,886
|
public class SlaveServer extends Server<Slave, Void>
{
public static final byte APPLICATION_PROTOCOL_VERSION = 1;
public SlaveServer( Slave requestTarget, Configuration config, Logging logging, Monitors monitors )
{
super( requestTarget, config, logging, DEFAULT_FRAME_LENGTH, APPLICATION_PROTOCOL_VERSION, ALWAYS_MATCH,
SYSTEM_CLOCK, monitors );
}
@Override
protected RequestType<Slave> getRequestContext( byte id )
{
return SlaveRequestType.values()[id];
}
@Override
protected void finishOffChannel( Channel channel, RequestContext context )
{
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_SlaveServer.java
|
4,887
|
public class SlaveImpl implements Slave
{
private final Master master;
private final RequestContextFactory requestContextFactory;
private final StoreId storeId;
private final HaXaDataSourceManager xaDsm;
public SlaveImpl( StoreId storeId, Master master, RequestContextFactory requestContextFactory,
HaXaDataSourceManager xaDsm )
{
this.storeId = storeId;
this.master = master;
this.requestContextFactory = requestContextFactory;
this.xaDsm = xaDsm;
}
@Override
public Response<Void> pullUpdates( String resource, long upToAndIncludingTxId )
{
xaDsm.applyTransactions( master.pullUpdates( requestContextFactory.newRequestContext( 0 ) ), ServerUtil.NO_ACTION );
return ServerUtil.packResponseWithoutTransactionStream( storeId, null );
}
@Override
public int getServerId()
{
return 0;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_SlaveImpl.java
|
4,888
|
protected static abstract class StaticMasterClientFactory implements MasterClientFactory
{
protected final Logging logging;
protected final int readTimeoutSeconds;
protected final int lockReadTimeout;
protected final int maxConcurrentChannels;
protected final int chunkSize;
StaticMasterClientFactory( Logging logging, int readTimeoutSeconds, int lockReadTimeout,
int maxConcurrentChannels, int chunkSize )
{
this.logging = logging;
this.readTimeoutSeconds = readTimeoutSeconds;
this.lockReadTimeout = lockReadTimeout;
this.maxConcurrentChannels = maxConcurrentChannels;
this.chunkSize = chunkSize;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClientResolver.java
|
4,889
|
private static final class ProtocolVersionCombo implements Comparable<ProtocolVersionCombo>
{
final int applicationProtocol;
final int internalProtocol;
ProtocolVersionCombo( int applicationProtocol, int internalProtocol )
{
this.applicationProtocol = applicationProtocol;
this.internalProtocol = internalProtocol;
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( obj.getClass() != ProtocolVersionCombo.class )
{
return false;
}
ProtocolVersionCombo other = (ProtocolVersionCombo) obj;
return other.applicationProtocol == applicationProtocol && other.internalProtocol == internalProtocol;
}
@Override
public int hashCode()
{
return ( 31 * applicationProtocol ) | internalProtocol;
}
@Override
public int compareTo( ProtocolVersionCombo o )
{
return ( applicationProtocol < o.applicationProtocol ? -1
: ( applicationProtocol == o.applicationProtocol ? 0 : 1 ) );
}
/* Legacy version combos:
* static final ProtocolVersionCombo PC_153 = new ProtocolVersionCombo( MasterClient153.PROTOCOL_VERSION, 2 );
* static final ProtocolVersionCombo PC_17 = new ProtocolVersionCombo( MasterClient17.PROTOCOL_VERSION, 2 );
* static final ProtocolVersionCombo PC_18 = new ProtocolVersionCombo( MasterClient18.PROTOCOL_VERSION, 2 ); */
static final ProtocolVersionCombo PC_20 = new ProtocolVersionCombo( MasterClient20.PROTOCOL_VERSION, 2 );
static final ProtocolVersionCombo PC_201 = new ProtocolVersionCombo( MasterClient201.PROTOCOL_VERSION, 2 );
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClientResolver.java
|
4,890
|
public static final class F201 extends StaticMasterClientFactory
{
public F201( Logging logging, int readTimeoutSeconds, int lockReadTimeout, int maxConcurrentChannels,
int chunkSize )
{
super( logging, readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize );
}
@Override
public MasterClient instantiate( String hostNameOrIp, int port, Monitors monitors, StoreId storeId, LifeSupport life )
{
return life.add( new MasterClient201( hostNameOrIp, port, logging, monitors, storeId,
readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize ) );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClientResolver.java
|
4,891
|
public static final class F20 extends StaticMasterClientFactory
{
public F20( Logging logging, int readTimeoutSeconds, int lockReadTimeout, int maxConcurrentChannels,
int chunkSize )
{
super( logging, readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize );
}
@Override
public MasterClient instantiate( String hostNameOrIp, int port, Monitors monitors, StoreId storeId, LifeSupport life )
{
return life.add( new MasterClient20( hostNameOrIp, port, logging, monitors, storeId,
readTimeoutSeconds, lockReadTimeout, maxConcurrentChannels, chunkSize ) );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_slave_MasterClientResolver.java
|
4,892
|
{
@Override
public Void apply( GraphDatabaseService graphDb )
{
// given
graphDb.schema().indexFor( label( "Label1" ) ).on( "key1" ).create();
// when/then
function.apply( graphDb );
return null;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_DataAndSchemaTransactionSeparationIT.java
|
4,893
|
{
@Override
public Node apply( GraphDatabaseService graphDb )
{
return graphDb.createNode();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_DataAndSchemaTransactionSeparationIT.java
|
4,894
|
{
@Override
Object perform( GraphDatabaseService graphDb )
{
return entity.getProperty( key );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_DataAndSchemaTransactionSeparationIT.java
|
4,895
|
{
@Override
public IndexUpdater newUpdater( IndexUpdateMode mode )
{
return super.newUpdater( mode );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_ContractCheckingIndexProxyTest.java
|
4,896
|
{
@Override
public void start()
{
latch.startAndAwaitFinish();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_ContractCheckingIndexProxyTest.java
|
4,897
|
{
@Override
public void run() throws IOException
{
outer.start();
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_ContractCheckingIndexProxyTest.java
|
4,898
|
{
@Override
public void start()
{
latch.startAndAwaitFinish();
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_ContractCheckingIndexProxyTest.java
|
4,899
|
public class ContractCheckingIndexProxyTest
{
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotCreateIndexTwice() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.start();
outer.start();
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotCloseIndexTwice() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.close();
outer.close();
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotDropIndexTwice() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.drop();
outer.drop();
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotDropAfterClose() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.close();
outer.drop();
}
@Test
public void shouldDropAfterCreate() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.start();
// PASS
outer.drop();
}
@Test
public void shouldCloseAfterCreate() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.start();
// PASS
outer.close();
}
@Test(expected = IllegalStateException.class)
public void shouldNotUpdateBeforeCreate() throws IOException, IndexEntryConflictException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
try (IndexUpdater updater = outer.newUpdater( IndexUpdateMode.ONLINE ))
{
updater.process( null );
}
}
@Test(expected = IllegalStateException.class)
public void shouldNotUpdateAfterClose() throws IOException, IndexEntryConflictException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.start();
outer.close();
try (IndexUpdater updater = outer.newUpdater( IndexUpdateMode.ONLINE ))
{
updater.process( null );
}
}
@Test(expected = IllegalStateException.class)
public void shouldNotForceBeforeCreate() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.force();
}
@Test(expected = IllegalStateException.class)
public void shouldNotForceAfterClose() throws IOException
{
// GIVEN
IndexProxy inner = mockIndexProxy();
IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
outer.start();
outer.close();
outer.force();
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotCloseWhileCreating() throws IOException
{
// GIVEN
final DoubleLatch latch = new DoubleLatch();
final IndexProxy inner = new IndexProxyAdapter()
{
@Override
public void start()
{
latch.startAndAwaitFinish();
}
};
final IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
runInSeparateThread( new ThrowingRunnable()
{
@Override
public void run() throws IOException
{
outer.start();
}
} );
try
{
latch.awaitStart();
outer.close();
}
finally
{
latch.finish();
}
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotDropWhileCreating() throws IOException
{
// GIVEN
final DoubleLatch latch = new DoubleLatch();
final IndexProxy inner = new IndexProxyAdapter()
{
@Override
public void start()
{
latch.startAndAwaitFinish();
}
};
final IndexProxy outer = newContractCheckingIndexProxy( inner );
// WHEN
runInSeparateThread( new ThrowingRunnable()
{
@Override
public void run() throws IOException
{
outer.start();
}
} );
try
{
latch.awaitStart();
outer.drop();
}
finally
{
latch.finish();
}
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotCloseWhileUpdating() throws IOException
{
// GIVEN
final DoubleLatch latch = new DoubleLatch();
final IndexProxy inner = new IndexProxyAdapter()
{
@Override
public IndexUpdater newUpdater( IndexUpdateMode mode )
{
return super.newUpdater( mode );
}
};
final IndexProxy outer = newContractCheckingIndexProxy( inner );
outer.start();
// WHEN
runInSeparateThread( new ThrowingRunnable()
{
@Override
public void run() throws IOException
{
try (IndexUpdater updater = outer.newUpdater( IndexUpdateMode.ONLINE ))
{
updater.process( null );
latch.startAndAwaitFinish();
}
catch ( IndexEntryConflictException e )
{
throw new RuntimeException( e );
}
}
} );
try
{
latch.awaitStart();
outer.close();
}
finally
{
latch.finish();
}
}
@Test( expected = /* THEN */ IllegalStateException.class )
public void shouldNotCloseWhileForcing() throws IOException
{
// GIVEN
final DoubleLatch latch = new DoubleLatch();
final IndexProxy inner = new IndexProxyAdapter()
{
@Override
public void force()
{
latch.startAndAwaitFinish();
}
};
final IndexProxy outer = newContractCheckingIndexProxy( inner );
outer.start();
// WHEN
runInSeparateThread( new ThrowingRunnable()
{
@Override
public void run() throws IOException
{
outer.force();
}
} );
try
{
latch.awaitStart();
outer.close();
}
finally
{
latch.finish();
}
}
private interface ThrowingRunnable
{
void run() throws IOException;
}
private void runInSeparateThread( final ThrowingRunnable action )
{
new Thread( new Runnable()
{
@Override
public void run()
{
try
{
action.run();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
} ).start();
}
private ContractCheckingIndexProxy newContractCheckingIndexProxy( IndexProxy inner )
{
return new ContractCheckingIndexProxy( inner, false );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_impl_api_index_ContractCheckingIndexProxyTest.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.