Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
5,000
|
protected static class AcquireLockSerializer implements Serializer
{
private final long[] entities;
AcquireLockSerializer( long... entities )
{
this.entities = entities;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( entities.length );
for ( long entity : entities )
{
buffer.writeLong( entity );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,001
|
protected static class AcquireIndexLockSerializer implements Serializer
{
private final String index;
private final String key;
AcquireIndexLockSerializer( String index, String key )
{
this.index = index;
this.key = key;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, index );
writeString( buffer, key );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,002
|
protected static class AcquireIndexEntryLockSerializer implements Serializer
{
private final long labelId;
private final long propertyKeyId;
private final String value;
AcquireIndexEntryLockSerializer( long labelId, long propertyKeyId, String value )
{
this.labelId = labelId;
this.propertyKeyId = propertyKeyId;
this.value = value;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( labelId );
buffer.writeLong( propertyKeyId );
writeString( buffer, value );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,003
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
BlockLogBuffer blockLogBuffer = new BlockLogBuffer( buffer, monitor );
txGetter.extract( blockLogBuffer );
blockLogBuffer.done();
}
}, new Deserializer<Long>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,004
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
});
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,005
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,006
|
{
@Override
public Iterable<Slave> getSlaves()
{
return slaves;
}
}, new CommitPusher( scheduler ) );
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TestMasterCommittingAtSlave.java
|
5,007
|
private static class FakeSlave implements Slave
{
private volatile Queue<Long> calledWithTxId = new LinkedList<Long>();
private final boolean failing;
private final int serverId;
FakeSlave( boolean failing, int serverId )
{
this.failing = failing;
this.serverId = serverId;
}
@Override
public Response<Void> pullUpdates( String resource, long txId )
{
if ( failing )
{
throw new ComException( "Told to fail" );
}
calledWithTxId.add( txId );
return new Response<Void>( null, new StoreId(), TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
Long popCalledTx()
{
return calledWithTxId.poll();
}
boolean moreTxs()
{
return !calledWithTxId.isEmpty();
}
@Override
public int getServerId()
{
return serverId;
}
@Override
public String toString()
{
return "FakeSlave[" + serverId + "]";
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TestMasterCommittingAtSlave.java
|
5,008
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,009
|
private static class FakeStringLogger extends StringLogger
{
private volatile boolean unexpectedExceptionLogged;
private final StringBuilder errors = new StringBuilder();
@Override
public void logLongMessage( String msg, Visitor<LineLogger, RuntimeException> source, boolean flush )
{
addError( msg );
}
private void addError( String msg )
{
if ( !msg.contains( "communication" ) )
{
unexpectedExceptionLogged = true;
}
errors.append( errors.length() > 0 ? "," : "" ).append( msg );
}
@Override
public void logMessage( String msg, boolean flush )
{
addError( msg );
}
@Override
public void logMessage( String msg, LogMarker marker )
{
addError( msg );
}
@Override
public void logMessage( String msg, Throwable cause, boolean flush )
{
addError( msg );
}
@Override
public void addRotationListener( Runnable listener )
{
}
@Override
public void flush()
{
}
@Override
public void close()
{
}
@Override
protected void logLine( String line )
{
addError( line );
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TestMasterCommittingAtSlave.java
|
5,010
|
public class HANewSnapshotFunctionTest
{
@Test
public void normalClusterCreationShouldBePassedUnchanged() throws Exception
{
// GIVEN
// This is what the end result should look like
List<MemberIsAvailable> events = new LinkedList<MemberIsAvailable>();
events.add( roleForId( MASTER, 1 ) );
events.add( roleForId( BACKUP, 1 ) );
events.add( roleForId( SLAVE, 2 ) );
events.add( roleForId( SLAVE, 3 ) );
// WHEN events start getting added
Iterable<MemberIsAvailable > result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable event : events )
{
result = new HANewSnapshotFunction().apply( result, event );
}
// THEN the result is the expected one
eventsMatch( result, events );
}
@Test
public void duplicateSlaveEventsShouldBeFilteredOut() throws Exception
{
// GIVEN
// This is the list of events
List<MemberIsAvailable> events = new LinkedList<MemberIsAvailable>();
events.add( roleForId( MASTER, 1 ) );
events.add( roleForId( BACKUP, 1 ) );
events.add( roleForId( SLAVE, 2 ) );
events.add( roleForId( SLAVE, 3 ) );
events.add( roleForId( SLAVE, 2 ) );
events.add( roleForId( SLAVE, 3 ) );
// This is what it should look like
List<MemberIsAvailable> expected = new LinkedList<MemberIsAvailable>();
expected.add( roleForId( MASTER, 1 ) );
expected.add( roleForId( BACKUP, 1 ) );
expected.add( roleForId( SLAVE, 2 ) );
expected.add( roleForId( SLAVE, 3 ) );
// WHEN events start getting added
Iterable<MemberIsAvailable > result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable event : events )
{
result = new HANewSnapshotFunction().apply( result, event );
}
// THEN the result should be the same as the one above
eventsMatch( result, expected );
}
@Test
public void instanceBeingMasterReappearsAsSlaveShouldBeTreatedAsSlave() throws Exception
{
// GIVEN these events
List<MemberIsAvailable> events = new LinkedList<MemberIsAvailable>();
events.add( roleForId( MASTER, 1 ) );
events.add( roleForId( BACKUP, 1 ) );
events.add( roleForId( SLAVE, 2 ) );
events.add( roleForId( SLAVE, 1 ) );
events.add( roleForId( SLAVE, 3 ) );
// and this expected outcome
List<MemberIsAvailable> expected = new LinkedList<MemberIsAvailable>();
expected.add( roleForId( SLAVE, 2 ) );
expected.add( roleForId( SLAVE, 1 ) );
expected.add( roleForId( SLAVE, 3 ) );
// WHEN events start getting added
Iterable<MemberIsAvailable > result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable event : events )
{
result = new HANewSnapshotFunction().apply( result, event );
}
// THEN the result should be the expected one
eventsMatch( result, expected );
}
@Test
public void instanceBeingSlaveReappearsAsMasterShouldBeTreatedAsMaster() throws Exception
{
// GIVEN these events
List<MemberIsAvailable> events = new LinkedList<MemberIsAvailable>();
events.add( roleForId( SLAVE, 2 ) );
events.add( roleForId( SLAVE, 1 ) );
events.add( roleForId( MASTER, 1 ) );
events.add( roleForId( SLAVE, 3 ) );
// and this expected outcome
List<MemberIsAvailable> expected = new LinkedList<MemberIsAvailable>();
expected.add( roleForId( SLAVE, 2 ) );
expected.add( roleForId( MASTER, 1 ) );
expected.add( roleForId( SLAVE, 3 ) );
// WHEN events start getting added
Iterable<MemberIsAvailable > result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable event : events )
{
result = new HANewSnapshotFunction().apply( result, event );
}
// THEN the result should be the expected one
eventsMatch( result, expected );
}
@Test
public void instanceBeingMasterReplacedByAnotherInstanceShouldNotRemainMaster() throws Exception
{
// GIVEN these events
List<MemberIsAvailable> events = new LinkedList<MemberIsAvailable>();
events.add( roleForId( MASTER, 1 ) );
events.add( roleForId( BACKUP, 1 ) );
events.add( roleForId( MASTER, 2 ) );
events.add( roleForId( SLAVE, 3 ) );
// and this expected outcome
List<MemberIsAvailable> expected = new LinkedList<MemberIsAvailable>();
expected.add( roleForId( MASTER, 2 ) );
expected.add( roleForId( SLAVE, 3 ) );
// WHEN events start getting added
Iterable<MemberIsAvailable > result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable event : events )
{
result = new HANewSnapshotFunction().apply( result, event );
}
// THEN the result should be the expected one
eventsMatch( result, expected );
}
private MemberIsAvailable roleForId( String role, int id )
{
return new MemberIsAvailable( role, new InstanceId( id ),
URI.create( "cluster://"+id ), URI.create( "ha://"+id ) );
}
private void eventsMatch( Iterable<MemberIsAvailable> result, List<MemberIsAvailable> expected )
{
Iterator<MemberIsAvailable> iter = result.iterator();
for ( int i = 0; i < expected.size(); i++ )
{
assertEquals( expected.get( i ), iter.next() );
}
assertFalse( iter.hasNext() );
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_cluster_HANewSnapshotFunctionTest.java
|
5,011
|
public class HANewSnapshotFunction
implements Function2<Iterable<MemberIsAvailable>, MemberIsAvailable, Iterable<MemberIsAvailable>>, Serializable
{
@Override
public Iterable<MemberIsAvailable> apply( Iterable<MemberIsAvailable> previousSnapshot, final MemberIsAvailable newMessage )
{
/*
* If a master event is received, all events that set to slave that instance should be removed. The same
* should happen to existing master events and backup events, no matter which instance they are for
*/
if ( newMessage.getRole().equals( HighAvailabilityModeSwitcher.MASTER ) )
{
List<MemberIsAvailable> result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable existing : previousSnapshot )
{
if ( ( existing.getInstanceId().equals( newMessage.getInstanceId() ) &&
existing.getRole().equals( HighAvailabilityModeSwitcher.SLAVE ) ) ||
existing.getRole().equals( HighAvailabilityModeSwitcher.MASTER ) ||
existing.getRole().equals( OnlineBackupKernelExtension.BACKUP ) )
{
continue;
}
result.add( existing );
}
result.add( newMessage );
return result;
}
/*
* If a slave event is received, all existing slave events for that instance should be removed. The same for
* master and backup, which means remove all events for that instance.
*/
else if ( newMessage.getRole().equals( HighAvailabilityModeSwitcher.SLAVE ) )
{
List<MemberIsAvailable> result = new LinkedList<MemberIsAvailable>();
for ( MemberIsAvailable existing : previousSnapshot )
{
if ( existing.getInstanceId().equals( newMessage.getInstanceId() ) )
{
continue;
}
result.add( existing );
}
result.add( newMessage );
return result;
}
return Iterables.append( newMessage, previousSnapshot );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_HANewSnapshotFunction.java
|
5,012
|
class DefaultMasterImplSPI implements MasterImpl.SPI
{
private static final int ID_GRAB_SIZE = 1000;
private final DependencyResolver dependencyResolver;
private final GraphDatabaseAPI graphDb;
private final Logging logging;
private final TransactionManager txManager;
private final Monitors monitors;
public DefaultMasterImplSPI( GraphDatabaseAPI graphDb, Logging logging,
TransactionManager txManager, Monitors monitors )
{
this.graphDb = graphDb;
this.logging = logging;
this.txManager = txManager;
this.dependencyResolver = graphDb.getDependencyResolver();
this.monitors = monitors;
}
@Override
public boolean isAccessible()
{
// Wait for 5s for the database to become available, if not already so
return graphDb.isAvailable( 5000 );
}
@Override
public void acquireLock( MasterImpl.LockGrabber grabber, Object... entities )
{
LockManager lockManager = resolve( LockManager.class );
AbstractTransactionManager dbTxManager = resolve( AbstractTransactionManager.class );
TransactionState state = dbTxManager.getTransactionState();
for ( Object entity : entities )
{
grabber.grab( lockManager, state, entity );
}
}
@Override
public Transaction beginTx() throws SystemException, NotSupportedException
{
txManager.begin();
return txManager.getTransaction();
}
@Override
public void finishTransaction( boolean success )
{
try
{
if ( success )
{
txManager.commit();
}
else
{
txManager.rollback();
}
}
catch ( Exception e )
{
throw Exceptions.launderedException( e );
}
}
@Override
public void suspendTransaction() throws SystemException
{
txManager.suspend();
}
@Override
public void resumeTransaction( Transaction transaction )
{
try
{
txManager.resume( transaction );
}
catch ( Exception e )
{
throw Exceptions.launderedException( e );
}
}
@Override
public GraphProperties graphProperties()
{
return resolve( NodeManager.class ).getGraphProperties();
}
@Override
public int getOrCreateLabel( String name )
{
LabelTokenHolder labels = resolve( LabelTokenHolder.class );
return labels.getOrCreateId( name );
}
@Override
public int getOrCreateProperty( String name )
{
PropertyKeyTokenHolder propertyKeyHolder = resolve( PropertyKeyTokenHolder.class );
return propertyKeyHolder.getOrCreateId( name );
}
@Override
public IdAllocation allocateIds( IdType idType )
{
IdGenerator generator = resolve( IdGeneratorFactory.class ).get(idType);
return new IdAllocation( generator.nextIdBatch( ID_GRAB_SIZE ), generator.getHighId(),
generator.getDefragCount() );
}
@Override
public StoreId storeId()
{
return graphDb.storeId();
}
@Override
public long applyPreparedTransaction( String resource, ReadableByteChannel preparedTransaction ) throws IOException
{
XaDataSource dataSource = resolve( XaDataSourceManager.class ).getXaDataSource( resource );
return dataSource.applyPreparedTransaction( preparedTransaction );
}
@Override
public Integer createRelationshipType( String name )
{
return resolve(RelationshipTypeTokenHolder.class).getOrCreateId( name );
}
@Override
public Pair<Integer, Long> getMasterIdForCommittedTx( long txId ) throws IOException
{
XaDataSource nioneoDataSource = resolve(XaDataSourceManager.class).getNeoStoreDataSource();
return nioneoDataSource.getMasterForCommittedTx( txId );
}
@Override
public RequestContext rotateLogsAndStreamStoreFiles( StoreWriter writer )
{
XaDataSourceManager xaDataSourceManager = resolve( XaDataSourceManager.class );
KernelPanicEventGenerator kernelPanicEventGenerator = resolve( KernelPanicEventGenerator.class );
return ServerUtil.rotateLogsAndStreamStoreFiles(
graphDb.getStoreDir(),
xaDataSourceManager,
kernelPanicEventGenerator,
logging.getMessagesLog( MasterImpl.class ),
true,
writer,
new DefaultFileSystemAbstraction(),
monitors.newMonitor( BackupMonitor.class, getClass() ) );
}
@Override
public Response<Void> copyTransactions( String dsName, long startTxId, long endTxId )
{
return ServerUtil.getTransactions( graphDb, dsName, startTxId, endTxId );
}
@Override
public <T> Response<T> packResponse( RequestContext context, T response, Predicate<Long> filter )
{
XaDataSourceManager xaDataSourceManager = resolve( XaDataSourceManager.class );
return ServerUtil.packResponse( storeId(), xaDataSourceManager, context, response, filter );
}
@Override
public void pushTransaction( String resourceName, int eventIdentifier, long tx, int machineId )
{
XaDataSourceManager xaDataSourceManager = resolve( XaDataSourceManager.class );
TxIdGenerator txIdGenerator = resolve( TxIdGenerator.class );
txIdGenerator.committed(
xaDataSourceManager.getXaDataSource(resourceName),
eventIdentifier,
tx,
machineId);
}
private <T> T resolve( Class<T> dependencyType )
{
return dependencyResolver.resolveDependency( dependencyType );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_DefaultMasterImplSPI.java
|
5,013
|
public class DefaultElectionCredentialsProvider
implements ElectionCredentialsProvider
{
private final int serverId;
private final LastTxIdGetter lastTxIdGetter;
private final HighAvailabilityMemberInfoProvider masterInfo;
public DefaultElectionCredentialsProvider( int serverId, LastTxIdGetter lastTxIdGetter,
HighAvailabilityMemberInfoProvider masterInfo )
{
this.serverId = serverId;
this.lastTxIdGetter = lastTxIdGetter;
this.masterInfo = masterInfo;
}
@Override
public ElectionCredentials getCredentials( String role )
{
if ( masterInfo.getHighAvailabilityMemberState().isEligibleForElection() )
{
return new DefaultElectionCredentials( serverId, lastTxIdGetter.getLastTxId(), isMasterOrToMaster() );
}
return new NotElectableElectionCredentials();
}
private boolean isMasterOrToMaster()
{
return masterInfo.getHighAvailabilityMemberState() == HighAvailabilityMemberState.MASTER ||
masterInfo.getHighAvailabilityMemberState() == HighAvailabilityMemberState.TO_MASTER;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_DefaultElectionCredentialsProvider.java
|
5,014
|
public final class DefaultElectionCredentials implements ElectionCredentials, Externalizable
{
private int serverId;
private long latestTxId;
private boolean currentWinner;
// For Externalizable
public DefaultElectionCredentials()
{}
public DefaultElectionCredentials( int serverId, long latestTxId, boolean currentWinner )
{
this.serverId = serverId;
this.latestTxId = latestTxId;
this.currentWinner = currentWinner;
}
@Override
public int compareTo( Object o )
{
DefaultElectionCredentials other = (DefaultElectionCredentials) o;
if ( this.latestTxId == other.latestTxId )
{
// Smaller id means higher priority
if ( this.currentWinner == other.currentWinner )
{
return Integer.signum( other.serverId - this.serverId );
}
else
{
return other.currentWinner ? -1 : 1;
}
}
else
{
return Long.signum( this.latestTxId - other.latestTxId );
}
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !(obj instanceof DefaultElectionCredentials ) )
{
return false;
}
DefaultElectionCredentials other = (DefaultElectionCredentials) obj;
return other.serverId == this.serverId &&
other.latestTxId == this.latestTxId &&
other.currentWinner == this.currentWinner;
}
@Override
public int hashCode()
{
return (int) ( 17 * serverId + latestTxId );
}
@Override
public void writeExternal( ObjectOutput out ) throws IOException
{
out.writeInt( serverId );
out.writeLong( latestTxId );
out.writeBoolean( currentWinner );
}
@Override
public void readExternal( ObjectInput in ) throws IOException, ClassNotFoundException
{
serverId = in.readInt();
latestTxId = in.readLong();
currentWinner = in.readBoolean();
}
@Override
public String toString()
{
return "DefaultElectionCredentials[serverId="+serverId +
", latestTxId=" + latestTxId +
", currentWinner=" + currentWinner+"]";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_DefaultElectionCredentials.java
|
5,015
|
private class DelegateStateSwitcher implements HighAvailabilityMemberListener
{
private T current = null;
@Override
public void masterIsElected( HighAvailabilityMemberChangeEvent event )
{
stateChanged( event );
}
@Override
public void masterIsAvailable( HighAvailabilityMemberChangeEvent event )
{
stateChanged( event );
}
@Override
public void slaveIsAvailable( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void instanceStops( HighAvailabilityMemberChangeEvent event )
{
stateChanged( event );
}
private void stateChanged( HighAvailabilityMemberChangeEvent event )
{
if ( event.getNewState() == event.getOldState() )
{
return;
}
switch ( event.getNewState() )
{
case TO_MASTER:
shutdownCurrent();
delegate.setDelegate( current = life.add( getMasterImpl() ) );
life.start();
break;
case TO_SLAVE:
shutdownCurrent();
delegate.setDelegate( current = life.add( getSlaveImpl( event.getServerHaUri() ) ) );
life.start();
break;
case PENDING:
shutdownCurrent();
break;
}
}
private void shutdownCurrent()
{
if ( current != null )
{
life.shutdown();
life = new LifeSupport();
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_AbstractModeSwitcher.java
|
5,016
|
public abstract class AbstractModeSwitcher<T> implements Lifecycle
{
private final DelegateInvocationHandler<T> delegate;
private LifeSupport life;
private final HighAvailability highAvailability;
private DelegateStateSwitcher delegateStateSwitcher;
protected AbstractModeSwitcher( HighAvailability highAvailability, DelegateInvocationHandler<T> delegate )
{
this.delegate = delegate;
this.life = new LifeSupport();
this.highAvailability = highAvailability;
highAvailability.addHighAvailabilityMemberListener( delegateStateSwitcher = new DelegateStateSwitcher() );
}
@Override
public void init() throws Throwable
{
life.init();
}
@Override
public void start() throws Throwable
{
life.start();
}
@Override
public void stop() throws Throwable
{
life.stop();
highAvailability.removeHighAvailabilityMemberListener( delegateStateSwitcher );
}
@Override
public void shutdown() throws Throwable
{
life.shutdown();
}
protected abstract T getSlaveImpl( URI serverHaUri );
protected abstract T getMasterImpl();
private class DelegateStateSwitcher implements HighAvailabilityMemberListener
{
private T current = null;
@Override
public void masterIsElected( HighAvailabilityMemberChangeEvent event )
{
stateChanged( event );
}
@Override
public void masterIsAvailable( HighAvailabilityMemberChangeEvent event )
{
stateChanged( event );
}
@Override
public void slaveIsAvailable( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void instanceStops( HighAvailabilityMemberChangeEvent event )
{
stateChanged( event );
}
private void stateChanged( HighAvailabilityMemberChangeEvent event )
{
if ( event.getNewState() == event.getOldState() )
{
return;
}
switch ( event.getNewState() )
{
case TO_MASTER:
shutdownCurrent();
delegate.setDelegate( current = life.add( getMasterImpl() ) );
life.start();
break;
case TO_SLAVE:
shutdownCurrent();
delegate.setDelegate( current = life.add( getSlaveImpl( event.getServerHaUri() ) ) );
life.start();
break;
case PENDING:
shutdownCurrent();
break;
}
}
private void shutdownCurrent()
{
if ( current != null )
{
life.shutdown();
life = new LifeSupport();
}
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_AbstractModeSwitcher.java
|
5,017
|
{
Map<InstanceId, URI> backupUris = new HashMap<InstanceId, URI>();
InstanceId master = null;
@Override
public void memberIsAvailable( String role, InstanceId clusterUri, URI roleUri )
{
if ( OnlineBackupKernelExtension.BACKUP.equals( role ) )
{
backupUris.put( clusterUri, roleUri );
}
else if ( HighAvailabilityModeSwitcher.MASTER.equals( role ) )
{
master = clusterUri;
}
if ( master != null && backupUris.containsKey( master ) )
{
backupUri.set( backupUris.get( master ) );
infoReceivedLatch.release();
}
}
/**
* Called when new master has been elected. The new master may not be available a.t.m.
* A call to {@link #memberIsAvailable} will confirm that the master given in
* the most recent {@link #coordinatorIsElected(org.neo4j.cluster.InstanceId)} call is up and running as
* master.
*
* @param coordinatorId the connection information to the master.
*/
@Override
public void coordinatorIsElected( InstanceId coordinatorId )
{
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_backup_HaBackupProvider.java
|
5,018
|
public class BackupLoggerConfigurator implements Configurator
{
// TODO: We've removed ZK. Is this still needed?
@Override
public void doConfigure( URL url, LoggerRepository repository )
{
repository.getRootLogger().setLevel( Level.ERROR );
repository.getRootLogger().addAppender(
new ConsoleAppender( new SimpleLayout(), "System.err" ) );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_backup_BackupLoggerConfigurator.java
|
5,019
|
public class WhenToInitializeTransactionOnMasterFromSlaveIT
{
@Rule
public ClusterRule clusterRule = new ClusterRule(getClass());
private GraphDatabaseService slave;
private ClusterManager.ManagedCluster cluster;
private MasterImpl.Monitor masterMonitor = mock(MasterImpl.Monitor.class);
@Before
public void setUp() throws Exception
{
cluster = clusterRule.startCluster();
slave = cluster.getAnySlave();
// Create some basic data
try(Transaction tx = slave.beginTx())
{
Node node = slave.createNode( DynamicLabel.label( "Person" ) );
node.setProperty( "name", "Bob" );
node.createRelationshipTo( slave.createNode(), DynamicRelationshipType.withName( "KNOWS" ));
tx.success();
}
// And now monitor the master for incoming calls
cluster.getMaster().getDependencyResolver().resolveDependency( Monitors.class ).addMonitorListener( masterMonitor );
}
@Test
public void shouldNotInitializeTxOnReadOnlyOpsOnNeoXaDS() throws Exception
{
long nodeId = 0l;
try(Transaction transaction = slave.beginTx())
{
// When
Node node = slave.getNodeById( nodeId );
// Then
assertDidntStartMasterTx();
// When
count(node.getLabels());
// Then
assertDidntStartMasterTx();
// When
readAllRels( node );
// Then
assertDidntStartMasterTx();
// When
readEachProperty(node);
// Then
assertDidntStartMasterTx();
transaction.success();
}
// Finally
assertDidntStartMasterTx();
}
private void assertDidntStartMasterTx()
{
verifyNoMoreInteractions( masterMonitor );
}
private void readAllRels( Node node )
{
for ( Relationship relationship : node.getRelationships() )
{
readEachProperty( relationship );
}
}
private void readEachProperty( PropertyContainer entity )
{
for ( String k : entity.getPropertyKeys() )
{
entity.getProperty( k );
}
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_WhenToInitializeTransactionOnMasterFromSlaveIT.java
|
5,020
|
{
@Override
public void run()
{
if ( !pullUpdates )
{
return;
}
try
{
pullUpdates();
}
catch ( ComException e )
{
// Ignore
}
catch ( Exception e )
{
logger.logMessage( "Pull updates failed", e );
}
}
}, pullInterval, pullInterval, TimeUnit.MILLISECONDS );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_UpdatePuller.java
|
5,021
|
public class UpdatePuller implements Lifecycle
{
private final HaXaDataSourceManager xaDataSourceManager;
private final Master master;
private final RequestContextFactory requestContextFactory;
private final AbstractTransactionManager txManager;
private final AvailabilityGuard availabilityGuard;
private final LastUpdateTime lastUpdateTime;
private final Config config;
private final StringLogger logger;
private boolean pullUpdates = false;
private ScheduledThreadPoolExecutor updatePuller;
public UpdatePuller( HaXaDataSourceManager xaDataSourceManager, Master master,
RequestContextFactory requestContextFactory, AbstractTransactionManager txManager,
AvailabilityGuard availabilityGuard, LastUpdateTime lastUpdateTime, Config config,
StringLogger logger )
{
this.xaDataSourceManager = xaDataSourceManager;
this.master = master;
this.requestContextFactory = requestContextFactory;
this.txManager = txManager;
this.availabilityGuard = availabilityGuard;
this.lastUpdateTime = lastUpdateTime;
this.config = config;
this.logger = logger;
}
public void pullUpdates()
{
if ( availabilityGuard.isAvailable( 5000 ) )
{
xaDataSourceManager.applyTransactions(
master.pullUpdates( requestContextFactory.newRequestContext( txManager.getEventIdentifier() ) ) );
}
lastUpdateTime.setLastUpdateTime( System.currentTimeMillis() );
}
@Override
public void init() throws Throwable
{
long pullInterval = config.get( HaSettings.pull_interval );
if ( pullInterval > 0 && updatePuller == null )
{
updatePuller = new ScheduledThreadPoolExecutor( 1 );
updatePuller.scheduleWithFixedDelay( new Runnable()
{
@Override
public void run()
{
if ( !pullUpdates )
{
return;
}
try
{
pullUpdates();
}
catch ( ComException e )
{
// Ignore
}
catch ( Exception e )
{
logger.logMessage( "Pull updates failed", e );
}
}
}, pullInterval, pullInterval, TimeUnit.MILLISECONDS );
}
this.pullUpdates = false;
}
@Override
public void start() throws Throwable
{
this.pullUpdates = true;
}
@Override
public void stop() throws Throwable
{
this.pullUpdates = false;
}
@Override
public void shutdown() throws Throwable
{
if ( updatePuller != null )
{
try
{
/*
* Be gentle, interrupting running threads could leave the
* file channels in a bad shape.
*/
this.updatePuller.shutdown();
this.updatePuller.awaitTermination( 5, TimeUnit.SECONDS );
}
catch ( InterruptedException e )
{
logger.logMessage(
"Got exception while waiting for update puller termination", e, true );
}
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_UpdatePuller.java
|
5,022
|
private static class LastTxMapping
{
private final int serverId;
private final long txId;
public LastTxMapping( int serverId, long txId )
{
this.serverId = serverId;
this.txId = txId;
}
public void format( StringBuilder failures, long txId )
{
if ( txId != this.txId )
{
if ( failures.length() > 0 )
failures.append( ", " );
failures.append( String.format( "tx id on server:%d, expected [%d] but was [%d]",
serverId, this.txId, txId ) );
}
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TxPushStrategyConfigIT.java
|
5,023
|
{
@Override
public int compare( HighlyAvailableGraphDatabase o1, HighlyAvailableGraphDatabase o2 )
{
return cluster.getServerId( o1 ) - cluster.getServerId( o2 );
}
} );
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TxPushStrategyConfigIT.java
|
5,024
|
{
@Override
protected void config( GraphDatabaseBuilder builder, String clusterName, int serverId )
{
builder.setConfig( HaSettings.tx_push_factor, "" + pushFactor );
builder.setConfig( HaSettings.tx_push_strategy, pushStrategy );
}
} );
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TxPushStrategyConfigIT.java
|
5,025
|
public class TxPushStrategyConfigIT
{
@Test
public void shouldPushToSlavesInDescendingOrder() throws Exception
{
startCluster( 4, 2, "fixed" );
for ( int i = 0; i < 5; i++ )
{
createTransactionOnMaster();
assertLastTransactions( lastTx( THIRD_SLAVE, 2 + i ) );
assertLastTransactions( lastTx( SECOND_SLAVE, 2 + i ) );
assertLastTransactions( lastTx( FIRST_SLAVE, 1 ) );
}
}
@Test
public void twoRoundRobin() throws Exception
{
startCluster( 5, 2, "round_robin" );
createTransactionOnMaster();
assertLastTransactions( lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 2 ), lastTx( THIRD_SLAVE, 1 ),
lastTx( FOURTH_SLAVE, 1 ) );
createTransactionOnMaster();
assertLastTransactions( lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 3 ), lastTx( THIRD_SLAVE, 3 ), lastTx( FOURTH_SLAVE, 1 ) );
createTransactionOnMaster();
assertLastTransactions( lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 3 ), lastTx( THIRD_SLAVE, 4 ), lastTx( FOURTH_SLAVE, 4 ) );
createTransactionOnMaster();
assertLastTransactions( lastTx( FIRST_SLAVE, 5 ), lastTx( SECOND_SLAVE, 3 ), lastTx( THIRD_SLAVE, 4 ), lastTx( FOURTH_SLAVE, 5 ) );
}
@Test
public void shouldPushToOneLessSlaveOnSlaveCommit() throws Exception
{
startCluster( 4, 2, "fixed" );
createTransactionOn( FIRST_SLAVE );
assertLastTransactions( lastTx( MASTER, 2 ), lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 1 ), lastTx( THIRD_SLAVE, 2 ) );
createTransactionOn( SECOND_SLAVE );
assertLastTransactions( lastTx( MASTER, 3 ), lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 3 ), lastTx( THIRD_SLAVE, 3 ) );
createTransactionOn( THIRD_SLAVE );
assertLastTransactions( lastTx( MASTER, 4 ), lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 4 ), lastTx( THIRD_SLAVE, 4 ) );
}
@Test
public void slavesListGetsUpdatedWhenSlaveLeavesNicely() throws Exception
{
startCluster( 3, 1, "fixed" );
cluster.shutdown( cluster.getAnySlave() );
cluster.await( masterSeesSlavesAsAvailable( 1 ) );
}
@Test
public void slaveListIsCorrectAfterMasterSwitch() throws Exception
{
startCluster( 3, 1, "fixed" );
cluster.shutdown( cluster.getMaster() );
cluster.await( masterAvailable() );
HighlyAvailableGraphDatabase newMaster = cluster.getMaster();
cluster.await( masterSeesSlavesAsAvailable( 1 ) );
createTransaction( newMaster );
assertLastTransactions( lastTx( FIRST_SLAVE, 2 ), lastTx( SECOND_SLAVE, 2 ) );
}
@Test
public void slavesListGetsUpdatedWhenSlaveRageQuits() throws Throwable
{
startCluster( 3, 1, "fixed" );
cluster.fail( cluster.getAnySlave() );
cluster.await( masterSeesSlavesAsAvailable( 1 ) );
}
private final LifeSupport life = new LifeSupport();
private ManagedCluster cluster;
private TargetDirectory dir;
@Rule
public TestName name = new TestName();
/**
* These are _indexes_ of cluster members in machineIds
*/
private static int MASTER = 1;
private static int FIRST_SLAVE = 2;
private static int SECOND_SLAVE = 3;
private static int THIRD_SLAVE = 4;
private static int FOURTH_SLAVE = 5;
private int[] machineIds;
@Before
public void before() throws Exception
{
dir = TargetDirectory.forTest( getClass() );
}
@After
public void after() throws Exception
{
life.shutdown();
}
private void startCluster( int memberCount, final int pushFactor, final String pushStrategy )
{
ClusterManager clusterManager = life.add( new ClusterManager( clusterOfSize( memberCount ), dir.cleanDirectory(
name.getMethodName() ), stringMap() )
{
@Override
protected void config( GraphDatabaseBuilder builder, String clusterName, int serverId )
{
builder.setConfig( HaSettings.tx_push_factor, "" + pushFactor );
builder.setConfig( HaSettings.tx_push_strategy, pushStrategy );
}
} );
life.start();
cluster = clusterManager.getDefaultCluster();
cluster.await( allSeesAllAsAvailable() );
mapMachineIds();
}
private void mapMachineIds()
{
machineIds = new int[cluster.size()];
machineIds[0] = cluster.getServerId( cluster.getMaster() );
List<HighlyAvailableGraphDatabase> slaves = new ArrayList<HighlyAvailableGraphDatabase>();
for ( HighlyAvailableGraphDatabase hadb : cluster.getAllMembers() )
{
if ( !hadb.isMaster() )
{
slaves.add( hadb );
}
}
Collections.sort( slaves, new Comparator<HighlyAvailableGraphDatabase>()
{
@Override
public int compare( HighlyAvailableGraphDatabase o1, HighlyAvailableGraphDatabase o2 )
{
return cluster.getServerId( o1 ) - cluster.getServerId( o2 );
}
} );
Iterator<HighlyAvailableGraphDatabase> iter = slaves.iterator();
for ( int i = 1; iter.hasNext(); i++ )
{
machineIds[i] = cluster.getServerId( iter.next() );
}
}
private void assertLastTransactions( LastTxMapping... transactionMappings )
{
StringBuilder failures = new StringBuilder();
for ( LastTxMapping mapping : transactionMappings )
{
GraphDatabaseAPI db = cluster.getMemberByServerId( mapping.serverId );
mapping.format( failures, getLastTx( db ) );
}
assertTrue( failures.toString(), failures.length() == 0 );
}
private long getLastTx( GraphDatabaseAPI db )
{
return db.getDependencyResolver().resolveDependency( XaDataSourceManager.class )
.getXaDataSource( DEFAULT_DATA_SOURCE_NAME ).getLastCommittedTxId();
}
private LastTxMapping lastTx( int serverIndex, long txId )
{
int serverId = machineIds[serverIndex - 1];
return new LastTxMapping( serverId, txId );
}
private static class LastTxMapping
{
private final int serverId;
private final long txId;
public LastTxMapping( int serverId, long txId )
{
this.serverId = serverId;
this.txId = txId;
}
public void format( StringBuilder failures, long txId )
{
if ( txId != this.txId )
{
if ( failures.length() > 0 )
failures.append( ", " );
failures.append( String.format( "tx id on server:%d, expected [%d] but was [%d]",
serverId, this.txId, txId ) );
}
}
}
private void createTransactionOnMaster()
{
createTransaction( cluster.getMaster() );
}
private void createTransactionOn( int serverIndex )
{
int serverId = machineIds[serverIndex-1];
createTransaction( cluster.getMemberByServerId( serverId ) );
}
private void createTransaction( GraphDatabaseAPI db )
{
try (Transaction tx = db.beginTx())
{
db.createNode();
tx.success();
}
catch ( RuntimeException e )
{
e.printStackTrace();
throw e;
}
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TxPushStrategyConfigIT.java
|
5,026
|
{
@Override
public void run()
{
try
{
latch.await();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
Transaction tx = db.beginTx();
try
{
db.createNode().setProperty( key, "yes" );
tx.success();
}
finally
{
tx.finish();
}
}
};
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TestUniqueKeys.java
|
5,027
|
{
@Override
public void run()
{
try
{
latch.await();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
Transaction tx = db.beginTx();
try
{
db.createNode().createRelationshipTo( db.createNode(), relType );
tx.success();
}
finally
{
tx.finish();
}
}
};
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TestUniqueKeys.java
|
5,028
|
public class TestUniqueKeys extends AbstractClusterTest
{
@Test
public void bruteForceCreateSameRelationshipTypeOnDifferentSlaveAtTheSameTimeShouldYieldSameId() throws Exception
{
// Get a hold of all the slaves in there
List<HighlyAvailableGraphDatabase> slaves = new ArrayList<HighlyAvailableGraphDatabase>();
for ( int i = 0; i < cluster.size()-1; i++ )
slaves.add( cluster.getAnySlave( slaves.toArray( new HighlyAvailableGraphDatabase[0] ) ) );
for ( int i = 0; i < 10; i++ )
{
final RelationshipType relType = DynamicRelationshipType.withName( "Rel" + i );
final CountDownLatch latch = new CountDownLatch( 1 );
List<Thread> threads = new ArrayList<Thread>();
for ( HighlyAvailableGraphDatabase slave : slaves )
{
final GraphDatabaseAPI db = slave;
Thread thread = new Thread()
{
@Override
public void run()
{
try
{
latch.await();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
Transaction tx = db.beginTx();
try
{
db.createNode().createRelationshipTo( db.createNode(), relType );
tx.success();
}
finally
{
tx.finish();
}
}
};
thread.start();
threads.add( thread );
}
latch.countDown();
for ( Thread thread : threads )
{
thread.join();
}
// Verify so that the relationship type on all the machines has got the same id
int highestId = 0;
for ( GraphDatabaseAPI db : cluster.getAllMembers() )
{
RelationshipTypeTokenHolder holder = db.getDependencyResolver()
.resolveDependency( RelationshipTypeTokenHolder.class );
highestId = highestIdOf( holder, highestId );
Set<String> types = new HashSet<>();
for ( int j = 0; j <= highestId; j++ )
{
RelationshipType type = holder.getTokenById( j );
if ( type != null )
{
assertTrue( type.name() + " already existed for " + db, types.add( type.name() ) );
}
}
}
}
}
@Test
public void bruteForceCreateSamePropertyKeyOnDifferentSlaveAtTheSameTimeShouldYieldSameId() throws Exception
{
// Get a hold of all the slaves in there
List<HighlyAvailableGraphDatabase> slaves = new ArrayList<>();
for ( int i = 0; i < cluster.size()-1; i++ )
slaves.add( cluster.getAnySlave( slaves.toArray( new HighlyAvailableGraphDatabase[0] ) ) );
for ( int i = 0; i < 10; i++ )
{
final String key = "Key" + i;
final CountDownLatch latch = new CountDownLatch( 1 );
List<Thread> threads = new ArrayList<Thread>();
for ( HighlyAvailableGraphDatabase slave : slaves )
{
final GraphDatabaseAPI db = slave;
Thread thread = new Thread()
{
@Override
public void run()
{
try
{
latch.await();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
Transaction tx = db.beginTx();
try
{
db.createNode().setProperty( key, "yes" );
tx.success();
}
finally
{
tx.finish();
}
}
};
thread.start();
threads.add( thread );
}
latch.countDown();
for ( Thread thread : threads )
{
thread.join();
}
// Verify so that the property keys on all the machines has got the same id
int highestId = 0;
for ( GraphDatabaseAPI db : cluster.getAllMembers() )
{
TokenHolder<Token> holder = db.getDependencyResolver().resolveDependency( PropertyKeyTokenHolder.class );
highestId = highestIdOf( holder, highestId );
Set<String> types = new HashSet<String>();
for ( int j = 0; j <= highestId; j++ )
{
Token type = holder.getTokenById( j );
if ( type != null )
{
assertTrue( type.name() + " already existed for " + db, types.add( type.name() ) );
}
}
}
}
}
private ManagedCluster cluster;
@Before
public void getCluster() throws Exception
{
cluster = clusterManager.getDefaultCluster();
cluster.await( masterAvailable() );
}
private <KEY extends Token> int highestIdOf( TokenHolder<KEY> holder, int high ) throws TokenNotFoundException
{
for ( Token type : holder.getAllTokens() )
high = Math.max( type.id(), high );
return high;
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_TestUniqueKeys.java
|
5,029
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,030
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,031
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
buffer.writeLong( upToAndIncludingTxId );
}
}, Protocol.VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_master_SlaveClient.java
|
5,032
|
{
@Override
public void write( ChannelBuffer buffer )
throws IOException
{
writeString( buffer, ds );
buffer.writeLong( startTxId );
buffer.writeLong( endTxId );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,033
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( txId );
}
}, new Deserializer<HandshakeResult>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,034
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( success ? 1 : 0 );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,035
|
{
@Override
@SuppressWarnings("boxing")
public Long read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readLong();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,036
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( idType.ordinal() );
}
}, new Deserializer<IdAllocation>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,037
|
public class MasterClient20 extends Client<Master> implements MasterClient
{
/* Version 1 first version
* Version 2 since 2012-01-24
* Version 3 since 2012-02-16
* Version 4 since 2012-07-05 */
public static final byte PROTOCOL_VERSION = 5;
private final long lockReadTimeout;
private final ByteCounterMonitor monitor;
public MasterClient20( String hostNameOrIp, int port, Logging logging, Monitors monitors, StoreId storeId,
long readTimeoutSeconds, long lockReadTimeout, int maxConcurrentChannels, int chunkSize )
{
super( hostNameOrIp, port, logging, monitors, storeId, MasterServer.FRAME_LENGTH, PROTOCOL_VERSION,
readTimeoutSeconds, maxConcurrentChannels, chunkSize );
this.lockReadTimeout = lockReadTimeout;
this.monitor = monitors.newMonitor( ByteCounterMonitor.class, getClass() );
}
public MasterClient20( URI masterUri, Logging logging, Monitors monitors, StoreId storeId, Config config )
{
this( masterUri.getHost(), masterUri.getPort(), logging, monitors, storeId,
config.get( HaSettings.read_timeout ),
config.get( HaSettings.lock_read_timeout ),
config.get( HaSettings.max_concurrent_channels_per_slave ),
config.get( HaSettings.com_chunk_size ).intValue() );
}
@Override
protected long getReadTimeout( RequestType<Master> type, long readTimeout )
{
HaRequestType20 specificType = (HaRequestType20) type;
if ( specificType.isLock() )
{
return lockReadTimeout;
}
if ( specificType == HaRequestType20.COPY_STORE )
{
return readTimeout * 2;
}
return readTimeout;
}
@Override
protected boolean shouldCheckStoreId( RequestType<Master> type )
{
return type != HaRequestType20.COPY_STORE;
}
@Override
public Response<IdAllocation> allocateIds( RequestContext context, final IdType idType )
{
return sendRequest( HaRequestType20.ALLOCATE_IDS, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( idType.ordinal() );
}
}, new Deserializer<IdAllocation>()
{
@Override
public IdAllocation read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
{
return readIdAllocation( buffer );
}
}
);
}
@Override
public Response<Integer> createRelationshipType( RequestContext context, final String name )
{
return sendRequest( HaRequestType20.CREATE_RELATIONSHIP_TYPE, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
}
@Override
public Response<Integer> createPropertyKey( RequestContext context, final String name )
{
return sendRequest( HaRequestType20.CREATE_PROPERTY_KEY, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
}
@Override
public Response<Integer> createLabel( RequestContext context, final String name )
{
return sendRequest( HaRequestType20.CREATE_LABEL, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
});
}
@Override
public Response<Void> initializeTx( RequestContext context )
{
return sendRequest( HaRequestType20.INITIALIZE_TX, context, EMPTY_SERIALIZER, VOID_DESERIALIZER );
}
@Override
public Response<LockResult> acquireNodeWriteLock( RequestContext context, long... nodes )
{
return sendRequest( HaRequestType20.ACQUIRE_NODE_WRITE_LOCK, context,
new AcquireLockSerializer( nodes ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireNodeReadLock( RequestContext context, long... nodes )
{
return sendRequest( HaRequestType20.ACQUIRE_NODE_READ_LOCK, context,
new AcquireLockSerializer( nodes ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireRelationshipWriteLock( RequestContext context,
long... relationships )
{
return sendRequest( HaRequestType20.ACQUIRE_RELATIONSHIP_WRITE_LOCK, context,
new AcquireLockSerializer( relationships ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireRelationshipReadLock( RequestContext context,
long... relationships )
{
return sendRequest( HaRequestType20.ACQUIRE_RELATIONSHIP_READ_LOCK, context,
new AcquireLockSerializer( relationships ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireGraphWriteLock( RequestContext context )
{
return sendRequest( HaRequestType20.ACQUIRE_GRAPH_WRITE_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireGraphReadLock( RequestContext context )
{
return sendRequest( HaRequestType20.ACQUIRE_GRAPH_READ_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexReadLock( RequestContext context, String index, String key )
{
return sendRequest( HaRequestType20.ACQUIRE_INDEX_READ_LOCK, context,
new AcquireIndexLockSerializer( index, key ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexWriteLock( RequestContext context, String index, String key )
{
return sendRequest( HaRequestType20.ACQUIRE_INDEX_WRITE_LOCK, context,
new AcquireIndexLockSerializer( index, key ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireSchemaReadLock( RequestContext context )
{
return sendRequest( HaRequestType20.ACQUIRE_SCHEMA_READ_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireSchemaWriteLock( RequestContext context )
{
return sendRequest( HaRequestType20.ACQUIRE_SCHEMA_WRITE_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexEntryWriteLock( RequestContext context, long labelId, long propertyKeyId,
String propertyValue )
{
return sendRequest( HaRequestType20.ACQUIRE_INDEX_ENTRY_WRITE_LOCK, context,
new AcquireIndexEntryLockSerializer( labelId, propertyKeyId, propertyValue ),
LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<Long> commitSingleResourceTransaction( RequestContext context,
final String resource, final TxExtractor txGetter )
{
return sendRequest( HaRequestType20.COMMIT, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
BlockLogBuffer blockLogBuffer = new BlockLogBuffer( buffer, monitor );
txGetter.extract( blockLogBuffer );
blockLogBuffer.done();
}
}, new Deserializer<Long>()
{
@Override
@SuppressWarnings("boxing")
public Long read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readLong();
}
}
);
}
@Override
public Response<Void> finishTransaction( RequestContext context, final boolean success )
{
try
{
return sendRequest( HaRequestType20.FINISH, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( success ? 1 : 0 );
}
}, VOID_DESERIALIZER );
}
catch ( TransactionAlreadyActiveException e )
{
if ( !success )
{
/* Here we are in a state where the client failed while the request
* was processing on the server and the tx.finish() in the usual
* try-finally transaction block gets called, only to find that
* the transaction is already active... which is totally expected.
* The fact that the transaction is already active here shouldn't
* hide the original exception on the client, the exception which
* cause the client to fail while the request was processing on the master.
* This is effectively the use case of awaiting a lock that isn't granted
* within the lock read timeout period.
*/
return new Response<>( null, getStoreId(), TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
throw e;
}
}
@Override
public void rollbackOngoingTransactions( RequestContext context )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<Void> pullUpdates( RequestContext context )
{
return sendRequest( HaRequestType20.PULL_UPDATES, context, EMPTY_SERIALIZER, VOID_DESERIALIZER );
}
@Override
public Response<HandshakeResult> handshake( final long txId, StoreId storeId )
{
return sendRequest( HaRequestType20.HANDSHAKE, RequestContext.EMPTY, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( txId );
}
}, new Deserializer<HandshakeResult>()
{
@Override
public HandshakeResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws
IOException
{
return new HandshakeResult( buffer.readInt(), buffer.readLong(), -1 );
}
}, storeId
);
}
@Override
public Response<Void> copyStore( RequestContext context, final StoreWriter writer )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType20.COPY_STORE, context, EMPTY_SERIALIZER,
new Protocol.FileStreamsDeserializer( writer ) );
}
private RequestContext stripFromTransactions( RequestContext context )
{
return new RequestContext( context.getEpoch(), context.machineId(), context.getEventIdentifier(),
new RequestContext.Tx[0], context.getMasterId(), context.getChecksum() );
}
@Override
public Response<Void> copyTransactions( RequestContext context,
final String ds, final long startTxId, final long endTxId )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType20.COPY_TRANSACTIONS, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer )
throws IOException
{
writeString( buffer, ds );
buffer.writeLong( startTxId );
buffer.writeLong( endTxId );
}
}, VOID_DESERIALIZER );
}
@Override
public Response<Void> pushTransaction( RequestContext context, final String resourceName, final long tx )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType20.PUSH_TRANSACTION, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resourceName );
buffer.writeLong( tx );
}
}, VOID_DESERIALIZER );
}
protected static IdAllocation readIdAllocation( ChannelBuffer buffer )
{
int numberOfDefragIds = buffer.readInt();
long[] defragIds = new long[numberOfDefragIds];
for ( int i = 0; i < numberOfDefragIds; i++ )
{
defragIds[i] = buffer.readLong();
}
long rangeStart = buffer.readLong();
int rangeLength = buffer.readInt();
long highId = buffer.readLong();
long defragCount = buffer.readLong();
return new IdAllocation( new IdRange( defragIds, rangeStart, rangeLength ),
highId, defragCount );
}
protected static class AcquireLockSerializer implements Serializer
{
private final long[] entities;
AcquireLockSerializer( long... entities )
{
this.entities = entities;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( entities.length );
for ( long entity : entities )
{
buffer.writeLong( entity );
}
}
}
protected static class AcquireIndexLockSerializer implements Serializer
{
private final String index;
private final String key;
AcquireIndexLockSerializer( String index, String key )
{
this.index = index;
this.key = key;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, index );
writeString( buffer, key );
}
}
protected static class AcquireIndexEntryLockSerializer implements Serializer
{
private final long labelId;
private final long propertyKeyId;
private final String value;
AcquireIndexEntryLockSerializer( long labelId, long propertyKeyId, String value )
{
this.labelId = labelId;
this.propertyKeyId = propertyKeyId;
this.value = value;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( labelId );
buffer.writeLong( propertyKeyId );
writeString( buffer, value );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,038
|
public class LastUpdateTime
{
private long lastUpdateTime;
public LastUpdateTime()
{
lastUpdateTime = 0;
}
public long getLastUpdateTime()
{
return lastUpdateTime;
}
public void setLastUpdateTime( long lastUpdateTime )
{
this.lastUpdateTime = lastUpdateTime;
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_LastUpdateTime.java
|
5,039
|
public class LabelTokenCreatorModeSwitcher extends AbstractModeSwitcher<TokenCreator>
{
private final HaXaDataSourceManager xaDsm;
private final DelegateInvocationHandler<Master> master;
private final RequestContextFactory requestContextFactory;
private final Logging logging;
public LabelTokenCreatorModeSwitcher( HighAvailabilityMemberStateMachine stateMachine,
DelegateInvocationHandler<TokenCreator> delegate,
HaXaDataSourceManager xaDsm,
DelegateInvocationHandler<Master> master,
RequestContextFactory requestContextFactory, Logging logging
)
{
super( stateMachine, delegate );
this.xaDsm = xaDsm;
this.master = master;
this.requestContextFactory = requestContextFactory;
this.logging = logging;
}
@Override
protected TokenCreator getMasterImpl()
{
return new DefaultLabelIdCreator( logging );
}
@Override
protected TokenCreator getSlaveImpl( URI serverHaUri )
{
return new SlaveLabelTokenCreator( master.cement(), requestContextFactory, xaDsm );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_LabelTokenCreatorModeSwitcher.java
|
5,040
|
private class StartupWaiter extends LifecycleAdapter
{
@Override
public void start() throws Throwable
{
availabilityGuard.isAvailable( stateSwitchTimeoutMillis );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,041
|
{
@Override
public <T> T resolveDependency( Class<T> type, SelectionStrategy selector )
{
T result;
try
{
result = dependencyResolver.resolveDependency( type, selector );
}
catch ( IllegalArgumentException e )
{
if ( ClusterMemberEvents.class.isAssignableFrom( type ) )
{
result = type.cast( clusterEvents );
}
else if ( ClusterMemberAvailability.class.isAssignableFrom( type ) )
{
result = type.cast( clusterMemberAvailability );
}
else if ( UpdatePuller.class.isAssignableFrom( type ) )
{
result = type.cast( updatePuller );
}
else if ( Slaves.class.isAssignableFrom( type ) )
{
result = type.cast( slaves );
}
else if ( ClusterClient.class.isAssignableFrom( type ) )
{
result = type.cast( clusterClient );
}
else if ( BindingNotifier.class.isAssignableFrom( type ) )
{
result = type.cast( clusterClient );
}
else if ( ClusterMembers.class.isAssignableFrom( type ) )
{
result = type.cast( members );
}
else if ( RequestContextFactory.class.isAssignableFrom( type ) )
{
result = type.cast( requestContextFactory );
}
else
{
throw e;
}
}
return selector.select( type, option( result ) );
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,042
|
{
@Override
public byte[] newInstance()
{
return getNewGlobalId( DEFAULT_SEED, serverId );
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,043
|
{
@Override
public RequestContextFactory get()
{
return requestContextFactory;
}
}, logging.getMessagesLog( TxHookModeSwitcher.class ), dependencyResolver );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,044
|
{
boolean hasRequestedElection = true; // This ensures that the election result is (at least) from our
// request or thereafter
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
clusterClient.performRoleElections();
}
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
if ( hasRequestedElection && role.equals( ClusterConfiguration.COORDINATOR ) )
{
clusterClient.removeClusterListener( this );
}
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,045
|
{
@Override
public boolean accept( PaxosClusterMemberEvents.ClusterMembersSnapshot item )
{
for ( MemberIsAvailable member : item.getCurrentAvailableMembers() )
{
if ( member.getRoleUri().getScheme().equals( "ha" ) )
{
if ( HighAvailabilityModeSwitcher.getServerId( member.getRoleUri() ) ==
config.get( ClusterSettings.server_id ) )
{
msgLog.error( String.format( "Instance %s has the same serverId as ours (%d) - will not " +
"join this cluster",
member.getRoleUri(), config.get( ClusterSettings.server_id ) ) );
return true;
}
}
}
return true;
}
}, new HANewSnapshotFunction(), objectStreamFactory, objectStreamFactory );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,046
|
{
@Override
public TransactionState create( Transaction tx )
{
return new WritableTransactionState( snapshot( lockManager ),
nodeManager, logging, tx, snapshot( txHook ),
snapshot( txIdGenerator ) );
}
};
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighlyAvailableGraphDatabase.java
|
5,047
|
public class HighAvailabilityDiagnostics
implements DiagnosticsProvider
{
private final HighAvailabilityMemberStateMachine memberStateMachine;
private final ClusterClient clusterClient;
public HighAvailabilityDiagnostics( HighAvailabilityMemberStateMachine memberStateMachine,
ClusterClient clusterClient )
{
this.memberStateMachine = memberStateMachine;
this.clusterClient = clusterClient;
}
@Override
public String getDiagnosticsIdentifier()
{
return getClass().getSimpleName();
}
@Override
public void acceptDiagnosticsVisitor( Object visitor )
{
}
@Override
public void dump( DiagnosticsPhase phase, StringLogger log )
{
StringBuilder builder = new StringBuilder();
builder.append( "High Availability diagnostics\n" ).
append( "Member state:" ).append( memberStateMachine.getCurrentState().name() ).append( "\n" ).
append( "State machines:\n" );
clusterClient.dumpDiagnostics( builder );
log.logMessage( builder.toString() );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HighAvailabilityDiagnostics.java
|
5,048
|
{
@Override
public void receive( Payload value )
{
try
{
Object event = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory()).receive( value );
if ( event instanceof MemberIsAvailable )
{
if ( HighAvailabilityModeSwitcher.MASTER.equals( ((MemberIsAvailable) event).getRole() ) )
{
newMasterAvailableLatch.countDown();
}
}
}
catch ( Exception e )
{
fail( e.toString() );
}
}
} );
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_HardKillIT.java
|
5,049
|
public class HardKillIT
{
private static final File path = TargetDirectory.forTest( HardKillIT.class ).makeGraphDbDir();
private ProcessStreamHandler processHandler;
@Test
public void testMasterSwitchHappensOnKillMinus9() throws Exception
{
Process proc = null;
HighlyAvailableGraphDatabase dbWithId2 = null, dbWithId3 = null, oldMaster = null;
try
{
proc = run( "1" );
Thread.sleep( 12000 );
dbWithId2 = startDb( 2 );
dbWithId3 = startDb( 3 );
assertTrue( !dbWithId2.isMaster() );
assertTrue( !dbWithId3.isMaster() );
final CountDownLatch newMasterAvailableLatch = new CountDownLatch( 1 );
dbWithId2.getDependencyResolver().resolveDependency( ClusterClient.class ).addAtomicBroadcastListener(
new AtomicBroadcastListener()
{
@Override
public void receive( Payload value )
{
try
{
Object event = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory()).receive( value );
if ( event instanceof MemberIsAvailable )
{
if ( HighAvailabilityModeSwitcher.MASTER.equals( ((MemberIsAvailable) event).getRole() ) )
{
newMasterAvailableLatch.countDown();
}
}
}
catch ( Exception e )
{
fail( e.toString() );
}
}
} );
proc.destroy();
proc = null;
newMasterAvailableLatch.await( 60, SECONDS );
assertTrue( dbWithId2.isMaster() );
assertTrue( !dbWithId3.isMaster() );
// Ensure that everyone has marked the killed instance as failed, otherwise it cannot rejoin
Thread.sleep(15000);
oldMaster = startDb( 1 );
long oldMasterNode = createNamedNode( oldMaster, "Old master" );
assertEquals( oldMasterNode, getNamedNode( dbWithId2, "Old master" ) );
}
finally
{
if ( proc != null )
{
proc.destroy();
}
if ( oldMaster != null )
{
oldMaster.shutdown();
}
dbWithId2.shutdown();
dbWithId3.shutdown();
}
}
private long getNamedNode( HighlyAvailableGraphDatabase db, String name )
{
Transaction transaction = db.beginTx();
try
{
for ( Node node : GlobalGraphOperations.at( db ).getAllNodes() )
{
if ( name.equals( node.getProperty( "name", null ) ) )
{
return node.getId();
}
}
fail( "Couldn't find named node '" + name + "' at " + db );
// The lone above will prevent this return from happening
return -1;
}
finally
{
transaction.finish();
}
}
private long createNamedNode( HighlyAvailableGraphDatabase db, String name )
{
Transaction tx = db.beginTx();
try
{
Node node = db.createNode();
node.setProperty( "name", name );
tx.success();
return node.getId();
}
finally
{
tx.finish();
}
}
private Process run( String machineId ) throws IOException
{
List<String> allArgs = new ArrayList<String>( Arrays.asList( "java", "-cp",
System.getProperty( "java.class.path" ), HardKillIT.class.getName() ) );
allArgs.add( machineId );
Process process = Runtime.getRuntime().exec( allArgs.toArray( new String[allArgs.size()] ) );
processHandler = new ProcessStreamHandler( process, false );
processHandler.launch();
return process;
}
/*
* Used to launch the master instance
*/
public static void main( String[] args )
{
int machineId = Integer.parseInt( args[0] );
HighlyAvailableGraphDatabase db = startDb( machineId );
}
private static HighlyAvailableGraphDatabase startDb( int serverId )
{
GraphDatabaseBuilder builder = new HighlyAvailableGraphDatabaseFactory()
.newHighlyAvailableDatabaseBuilder( path( serverId ) )
.setConfig( ClusterSettings.initial_hosts, "127.0.0.1:5002,127.0.0.1:5003" )
.setConfig( ClusterSettings.cluster_server, "127.0.0.1:" + (5001 + serverId) )
.setConfig( ClusterSettings.server_id, "" + serverId )
.setConfig( HaSettings.ha_server, ":" + (8001 + serverId) )
.setConfig( HaSettings.tx_push_factor, "0" );
HighlyAvailableGraphDatabase db = (HighlyAvailableGraphDatabase) builder.newGraphDatabase();
Transaction tx = db.beginTx();
tx.finish();
try
{
Thread.sleep( 2000 );
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
return db;
}
private static String path( int i )
{
return new File( path, "" + i ).getAbsolutePath();
}
}
| false
|
enterprise_ha_src_test_java_org_neo4j_kernel_ha_HardKillIT.java
|
5,050
|
public class HaXaDataSourceManager extends XaDataSourceManager
{
public HaXaDataSourceManager( StringLogger msgLog )
{
super( msgLog );
}
public <T> T applyTransactions( Response<T> response )
{
return applyTransactions( response, ServerUtil.NO_ACTION );
}
public <T> T applyTransactions( Response<T> response, ServerUtil.TxHandler txHandler )
{
try
{
for ( Triplet<String, Long, TxExtractor> tx : IteratorUtil.asIterable( response.transactions() ) )
{
String resourceName = tx.first();
XaDataSource dataSource = getXaDataSource( resourceName );
txHandler.accept( tx, dataSource );
ReadableByteChannel txStream = tx.third().extract();
try
{
dataSource.applyCommittedTransaction( tx.second(), txStream );
}
finally
{
txStream.close();
}
}
txHandler.done();
}
catch (Exception e)
{
throw new RuntimeException( e );
}
finally
{
response.close();
}
return response.response();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HaXaDataSourceManager.java
|
5,051
|
public class HaSettings
{
@Migrator
public static final ConfigurationMigrator migrator = new EnterpriseConfigurationMigrator();
@Description( "How long a slave will wait for response from master before giving up." )
public static final Setting<Long> read_timeout = setting( "ha.read_timeout", DURATION, "20s" );
@Description( "Timeout for waiting for instance to become master or slave." )
public static final Setting<Long> state_switch_timeout = setting( "ha.state_switch_timeout", DURATION, "120s" );
@Description( "Timeout for taking remote (write) locks on slaves. Defaults to ha.read_timeout." )
public static final Setting<Long> lock_read_timeout = setting( "ha.lock_read_timeout", DURATION, read_timeout );
@Description( "Maximum number of connections a slave can have to the master." )
public static final Setting<Integer> max_concurrent_channels_per_slave =
setting( "ha.max_concurrent_channels_per_slave", INTEGER, "20", min( 1 ) );
@Description( "Hostname and port to bind the HA server." )
public static final Setting<HostnamePort> ha_server = setting( "ha.server", HOSTNAME_PORT, "0.0.0.0:6001-6011" );
@Description("Whether this instance should only participate as slave in cluster. If set to true, it will never be elected as master.")
public static final Setting<Boolean> slave_only = setting( "ha.slave_only", BOOLEAN, Settings.FALSE );
@Description( "Policy for how to handle branched data." )
public static final Setting<BranchedDataPolicy> branched_data_policy = setting( "ha.branched_data_policy",
options( BranchedDataPolicy.class ), "keep_all" );
@Description( "Max size of the data chunks that flows between master and slaves in HA. Bigger size may increase " +
"throughput, but may be more sensitive to variations in bandwidth, whereas lower size increases tolerance" +
" for bandwidth variations." )
public static final Setting<Long> com_chunk_size =
setting( "ha.com_chunk_size", BYTES, "2M", min( 1024L ) );
@Description( "Interval of pulling updates from master." )
public static final Setting<Long> pull_interval = setting( "ha.pull_interval", DURATION, "0s" );
@Description( "The amount of slaves the master will ask to replicate a committed transaction. " )
public static final Setting<Integer> tx_push_factor = setting( "ha.tx_push_factor", INTEGER, "1", min( 0 ) );
@Description( "Push strategy of a transaction to a slave during commit." )
public static final Setting<TxPushStrategy> tx_push_strategy = setting( "ha.tx_push_strategy", options(
TxPushStrategy.class ), "fixed" );
public static enum TxPushStrategy
{
@Description("Round robin")
round_robin,
@Description("Fixed")
fixed
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_HaSettings.java
|
5,052
|
{
@Override
public HandshakeResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws
IOException
{
return new HandshakeResult( buffer.readInt(), buffer.readLong(), -1 );
}
}, storeId
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,053
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resourceName );
buffer.writeLong( tx );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,054
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,055
|
{
@Override
public IdAllocation read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
{
return readIdAllocation( buffer );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,056
|
{
@Override
public IdAllocation read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
{
return readIdAllocation( buffer );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,057
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resourceName );
buffer.writeLong( tx );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,058
|
{
@Override
public void write( ChannelBuffer buffer )
throws IOException
{
writeString( buffer, ds );
buffer.writeLong( startTxId );
buffer.writeLong( endTxId );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,059
|
{
@Override
public HandshakeResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws
IOException
{
return new HandshakeResult( buffer.readInt(), buffer.readLong(), buffer.readLong() );
}
}, storeId
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,060
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( txId );
}
}, new Deserializer<HandshakeResult>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,061
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( success ? 1 : 0 );
}
}, VOID_DESERIALIZER );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,062
|
{
@Override
@SuppressWarnings("boxing")
public Long read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readLong();
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,063
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( idType.ordinal() );
}
}, new Deserializer<IdAllocation>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,064
|
public class MasterClient201 extends Client<Master> implements MasterClient
{
/* Version 1 first version
* Version 2 since 2012-01-24
* Version 3 since 2012-02-16
* Version 4 since 2012-07-05
* Version 5 since ?
* Version 6 since 2014-01-07
*/
public static final byte PROTOCOL_VERSION = 6;
private final long lockReadTimeout;
private final ByteCounterMonitor monitor;
public MasterClient201( String hostNameOrIp, int port, Logging logging, Monitors monitors, StoreId storeId,
long readTimeoutSeconds, long lockReadTimeout, int maxConcurrentChannels, int chunkSize )
{
super( hostNameOrIp, port, logging, monitors, storeId, MasterServer.FRAME_LENGTH, PROTOCOL_VERSION,
readTimeoutSeconds, maxConcurrentChannels, chunkSize );
this.lockReadTimeout = lockReadTimeout;
this.monitor = monitors.newMonitor( ByteCounterMonitor.class, getClass() );
}
public MasterClient201( URI masterUri, Logging logging, Monitors monitors, StoreId storeId, Config config )
{
this( masterUri.getHost(), masterUri.getPort(), logging, monitors, storeId,
config.get( HaSettings.read_timeout ),
config.get( HaSettings.lock_read_timeout ),
config.get( HaSettings.max_concurrent_channels_per_slave ),
config.get( HaSettings.com_chunk_size ).intValue() );
}
@Override
protected long getReadTimeout( RequestType<Master> type, long readTimeout )
{
HaRequestType201 specificType = (HaRequestType201) type;
if ( specificType.isLock() )
{
return lockReadTimeout;
}
if ( specificType == HaRequestType201.COPY_STORE )
{
return readTimeout * 2;
}
return readTimeout;
}
@Override
protected boolean shouldCheckStoreId( RequestType<Master> type )
{
return type != HaRequestType201.COPY_STORE;
}
@Override
public Response<IdAllocation> allocateIds( RequestContext context, final IdType idType )
{
return sendRequest( HaRequestType201.ALLOCATE_IDS, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( idType.ordinal() );
}
}, new Deserializer<IdAllocation>()
{
@Override
public IdAllocation read( ChannelBuffer buffer, ByteBuffer temporaryBuffer )
{
return readIdAllocation( buffer );
}
}
);
}
@Override
public Response<Integer> createRelationshipType( RequestContext context, final String name )
{
return sendRequest( HaRequestType201.CREATE_RELATIONSHIP_TYPE, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
}
@Override
public Response<Integer> createPropertyKey( RequestContext context, final String name )
{
return sendRequest( HaRequestType201.CREATE_PROPERTY_KEY, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
}
@Override
public Response<Integer> createLabel( RequestContext context, final String name )
{
return sendRequest( HaRequestType201.CREATE_LABEL, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
});
}
@Override
public Response<Void> initializeTx( RequestContext context )
{
return sendRequest( HaRequestType201.INITIALIZE_TX, context, EMPTY_SERIALIZER, VOID_DESERIALIZER );
}
@Override
public Response<LockResult> acquireNodeWriteLock( RequestContext context, long... nodes )
{
return sendRequest( HaRequestType201.ACQUIRE_NODE_WRITE_LOCK, context,
new AcquireLockSerializer( nodes ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireNodeReadLock( RequestContext context, long... nodes )
{
return sendRequest( HaRequestType201.ACQUIRE_NODE_READ_LOCK, context,
new AcquireLockSerializer( nodes ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireRelationshipWriteLock( RequestContext context,
long... relationships )
{
return sendRequest( HaRequestType201.ACQUIRE_RELATIONSHIP_WRITE_LOCK, context,
new AcquireLockSerializer( relationships ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireRelationshipReadLock( RequestContext context,
long... relationships )
{
return sendRequest( HaRequestType201.ACQUIRE_RELATIONSHIP_READ_LOCK, context,
new AcquireLockSerializer( relationships ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireGraphWriteLock( RequestContext context )
{
return sendRequest( HaRequestType201.ACQUIRE_GRAPH_WRITE_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireGraphReadLock( RequestContext context )
{
return sendRequest( HaRequestType201.ACQUIRE_GRAPH_READ_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexReadLock( RequestContext context, String index, String key )
{
return sendRequest( HaRequestType201.ACQUIRE_INDEX_READ_LOCK, context,
new AcquireIndexLockSerializer( index, key ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexWriteLock( RequestContext context, String index, String key )
{
return sendRequest( HaRequestType201.ACQUIRE_INDEX_WRITE_LOCK, context,
new AcquireIndexLockSerializer( index, key ), LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireSchemaReadLock( RequestContext context )
{
return sendRequest( HaRequestType201.ACQUIRE_SCHEMA_READ_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireSchemaWriteLock( RequestContext context )
{
return sendRequest( HaRequestType201.ACQUIRE_SCHEMA_WRITE_LOCK, context,
EMPTY_SERIALIZER, LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<LockResult> acquireIndexEntryWriteLock( RequestContext context, long labelId, long propertyKeyId,
String propertyValue )
{
return sendRequest( HaRequestType201.ACQUIRE_INDEX_ENTRY_WRITE_LOCK, context,
new AcquireIndexEntryLockSerializer( labelId, propertyKeyId, propertyValue ),
LOCK_RESULT_DESERIALIZER );
}
@Override
public Response<Long> commitSingleResourceTransaction( RequestContext context,
final String resource, final TxExtractor txGetter )
{
return sendRequest( HaRequestType201.COMMIT, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
BlockLogBuffer blockLogBuffer = new BlockLogBuffer( buffer, monitor );
txGetter.extract( blockLogBuffer );
blockLogBuffer.done();
}
}, new Deserializer<Long>()
{
@Override
@SuppressWarnings("boxing")
public Long read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readLong();
}
}
);
}
@Override
public Response<Void> finishTransaction( RequestContext context, final boolean success )
{
try
{
return sendRequest( HaRequestType201.FINISH, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeByte( success ? 1 : 0 );
}
}, VOID_DESERIALIZER );
}
catch ( TransactionAlreadyActiveException e )
{
if ( !success )
{
/* Here we are in a state where the client failed while the request
* was processing on the server and the tx.finish() in the usual
* try-finally transaction block gets called, only to find that
* the transaction is already active... which is totally expected.
* The fact that the transaction is already active here shouldn't
* hide the original exception on the client, the exception which
* cause the client to fail while the request was processing on the master.
* This is effectively the use case of awaiting a lock that isn't granted
* within the lock read timeout period.
*/
return new Response<>( null, getStoreId(), TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
throw e;
}
}
@Override
public void rollbackOngoingTransactions( RequestContext context )
{
throw new UnsupportedOperationException( "Should never be called from the client side" );
}
@Override
public Response<Void> pullUpdates( RequestContext context )
{
return sendRequest( HaRequestType201.PULL_UPDATES, context, EMPTY_SERIALIZER, VOID_DESERIALIZER );
}
@Override
public Response<HandshakeResult> handshake( final long txId, StoreId storeId )
{
return sendRequest( HaRequestType201.HANDSHAKE, RequestContext.EMPTY, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( txId );
}
}, new Deserializer<HandshakeResult>()
{
@Override
public HandshakeResult read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws
IOException
{
return new HandshakeResult( buffer.readInt(), buffer.readLong(), buffer.readLong() );
}
}, storeId
);
}
@Override
public Response<Void> copyStore( RequestContext context, final StoreWriter writer )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType201.COPY_STORE, context, EMPTY_SERIALIZER,
new Protocol.FileStreamsDeserializer( writer ) );
}
private RequestContext stripFromTransactions( RequestContext context )
{
return new RequestContext( context.getEpoch(), context.machineId(), context.getEventIdentifier(),
new RequestContext.Tx[0], context.getMasterId(), context.getChecksum() );
}
@Override
public Response<Void> copyTransactions( RequestContext context,
final String ds, final long startTxId, final long endTxId )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType201.COPY_TRANSACTIONS, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer )
throws IOException
{
writeString( buffer, ds );
buffer.writeLong( startTxId );
buffer.writeLong( endTxId );
}
}, VOID_DESERIALIZER );
}
@Override
public Response<Void> pushTransaction( RequestContext context, final String resourceName, final long tx )
{
context = stripFromTransactions( context );
return sendRequest( HaRequestType201.PUSH_TRANSACTION, context, new Serializer()
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resourceName );
buffer.writeLong( tx );
}
}, VOID_DESERIALIZER );
}
protected static IdAllocation readIdAllocation( ChannelBuffer buffer )
{
int numberOfDefragIds = buffer.readInt();
long[] defragIds = new long[numberOfDefragIds];
for ( int i = 0; i < numberOfDefragIds; i++ )
{
defragIds[i] = buffer.readLong();
}
long rangeStart = buffer.readLong();
int rangeLength = buffer.readInt();
long highId = buffer.readLong();
long defragCount = buffer.readLong();
return new IdAllocation( new IdRange( defragIds, rangeStart, rangeLength ),
highId, defragCount );
}
protected static class AcquireLockSerializer implements Serializer
{
private final long[] entities;
AcquireLockSerializer( long... entities )
{
this.entities = entities;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( entities.length );
for ( long entity : entities )
{
buffer.writeLong( entity );
}
}
}
protected static class AcquireIndexLockSerializer implements Serializer
{
private final String index;
private final String key;
AcquireIndexLockSerializer( String index, String key )
{
this.index = index;
this.key = key;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, index );
writeString( buffer, key );
}
}
protected static class AcquireIndexEntryLockSerializer implements Serializer
{
private final long labelId;
private final long propertyKeyId;
private final String value;
AcquireIndexEntryLockSerializer( long labelId, long propertyKeyId, String value )
{
this.labelId = labelId;
this.propertyKeyId = propertyKeyId;
this.value = value;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( labelId );
buffer.writeLong( propertyKeyId );
writeString( buffer, value );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient201.java
|
5,065
|
protected static class AcquireLockSerializer implements Serializer
{
private final long[] entities;
AcquireLockSerializer( long... entities )
{
this.entities = entities;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeInt( entities.length );
for ( long entity : entities )
{
buffer.writeLong( entity );
}
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,066
|
protected static class AcquireIndexLockSerializer implements Serializer
{
private final String index;
private final String key;
AcquireIndexLockSerializer( String index, String key )
{
this.index = index;
this.key = key;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, index );
writeString( buffer, key );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,067
|
protected static class AcquireIndexEntryLockSerializer implements Serializer
{
private final long labelId;
private final long propertyKeyId;
private final String value;
AcquireIndexEntryLockSerializer( long labelId, long propertyKeyId, String value )
{
this.labelId = labelId;
this.propertyKeyId = propertyKeyId;
this.value = value;
}
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
buffer.writeLong( labelId );
buffer.writeLong( propertyKeyId );
writeString( buffer, value );
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,068
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, resource );
BlockLogBuffer blockLogBuffer = new BlockLogBuffer( buffer, monitor );
txGetter.extract( blockLogBuffer );
blockLogBuffer.done();
}
}, new Deserializer<Long>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,069
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
});
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,070
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,071
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,072
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,073
|
{
@Override
@SuppressWarnings("boxing")
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
} );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,074
|
{
@Override
public void write( ChannelBuffer buffer ) throws IOException
{
writeString( buffer, name );
}
}, new Deserializer<Integer>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_MasterClient20.java
|
5,075
|
public class HighAvailabilityMemberChangeEvent
{
private final HighAvailabilityMemberState oldState;
private final HighAvailabilityMemberState newState;
private final InstanceId instanceId;
private final URI serverHaUri;
public HighAvailabilityMemberChangeEvent( HighAvailabilityMemberState oldState,
HighAvailabilityMemberState newState,
InstanceId instanceId, URI serverHaUri )
{
this.oldState = oldState;
this.newState = newState;
this.instanceId = instanceId;
this.serverHaUri = serverHaUri;
}
public HighAvailabilityMemberState getOldState()
{
return oldState;
}
public HighAvailabilityMemberState getNewState()
{
return newState;
}
public InstanceId getInstanceId()
{
return instanceId;
}
public URI getServerHaUri()
{
return serverHaUri;
}
@Override
public String toString()
{
return "HA Member State Event[ old state: "+oldState+", new state: "+newState+", server cluster URI: "+
instanceId +", server HA URI: "+serverHaUri+"]";
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_HighAvailabilityMemberChangeEvent.java
|
5,076
|
public static class Adapter implements HighAvailabilityMemberListener
{
@Override
public void masterIsElected( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void masterIsAvailable( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void slaveIsAvailable( HighAvailabilityMemberChangeEvent event )
{
}
@Override
public void instanceStops( HighAvailabilityMemberChangeEvent event )
{
}
}
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_HighAvailabilityMemberListener.java
|
5,077
|
PENDING
{
@Override
public HighAvailabilityMemberState masterIsElected( HighAvailabilityMemberContext context,
InstanceId masterId )
{
assert context.getAvailableHaMaster() == null;
if ( masterId.equals( context.getMyId() ) )
{
return TO_MASTER;
}
return PENDING;
}
@Override
public HighAvailabilityMemberState masterIsAvailable( HighAvailabilityMemberContext context,
InstanceId masterId, URI masterHaURI )
{
// assert context.getAvailableMaster() == null;
if ( masterId.equals( context.getMyId() ) )
{
throw new RuntimeException( "Received a MasterIsAvailable event for my InstanceId while in" +
" PENDING state" );
}
return TO_SLAVE;
}
@Override
public HighAvailabilityMemberState slaveIsAvailable( HighAvailabilityMemberContext context,
InstanceId slaveId,
URI slaveUri )
{
if ( slaveId.equals( context.getMyId() ) )
{
throw new RuntimeException( "Cannot go from pending to slave" );
}
return this;
}
@Override
public boolean isEligibleForElection()
{
return true;
}
@Override
public boolean isAccessAllowed()
{
return false;
}
},
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_cluster_HighAvailabilityMemberState.java
|
5,078
|
{
@Override
public Response<Void> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.pushTransaction( context, readString( input ), input.readLong() );
}
}, VOID_SERIALIZER, true );
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,079
|
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireIndexWriteLock( context, readString( input ), readString( input ) );
}
}, LOCK_SERIALIZER, true )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,080
|
ACQUIRE_INDEX_READ_LOCK( new TargetCaller<Master, LockResult>()
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireIndexReadLock( context, readString( input ), readString( input ) );
}
}, LOCK_SERIALIZER, true )
{
@Override
public boolean isLock()
{
return true;
}
},
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,081
|
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireIndexReadLock( context, readString( input ), readString( input ) );
}
}, LOCK_SERIALIZER, true )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,082
|
ACQUIRE_GRAPH_READ_LOCK( new TargetCaller<Master, LockResult>()
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireGraphReadLock( context );
}
}, LOCK_SERIALIZER, true )
{
@Override
public boolean isLock()
{
return true;
}
},
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,083
|
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireGraphReadLock( context );
}
}, LOCK_SERIALIZER, true )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,084
|
ACQUIRE_GRAPH_WRITE_LOCK( new TargetCaller<Master, LockResult>()
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireGraphWriteLock( context );
}
}, LOCK_SERIALIZER, true )
{
@Override
public boolean isLock()
{
return true;
}
},
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,085
|
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireGraphWriteLock( context );
}
}, LOCK_SERIALIZER, true )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,086
|
{
@Override
public void write( IdAllocation idAllocation, ChannelBuffer result ) throws IOException
{
IdRange idRange = idAllocation.getIdRange();
result.writeInt( idRange.getDefragIds().length );
for ( long id : idRange.getDefragIds() )
{
result.writeLong( id );
}
result.writeLong( idRange.getRangeStart() );
result.writeInt( idRange.getRangeLength() );
result.writeLong( idAllocation.getHighestIdInUse() );
result.writeLong( idAllocation.getDefragCount() );
}
}, false ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,087
|
{
@Override
public Response<Void> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.initializeTx( context );
}
}, VOID_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,088
|
{
@Override
public Response<Void> call( Master master, RequestContext context, ChannelBuffer input,
final ChannelBuffer target )
{
return master.copyTransactions( context, readString( input ), input.readLong(), input.readLong() );
}
}, VOID_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,089
|
{
@Override
public Response<Void> call( Master master, RequestContext context, ChannelBuffer input,
final ChannelBuffer target )
{
return master.copyStore( context, new ToNetworkStoreWriter( target, new Monitors() ) );
}
}, VOID_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,090
|
{
@Override
public void write( HandshakeResult responseObject, ChannelBuffer result ) throws IOException
{
result.writeInt( responseObject.txAuthor() );
result.writeLong( responseObject.txChecksum() );
}
}, false ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,091
|
{
@Override
public Response<HandshakeResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.handshake( input.readLong(), null );
}
}, new ObjectSerializer<HandshakeResult>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,092
|
{
@Override
public Response<Void> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.finishTransaction( context, readBoolean( input ) );
}
}, VOID_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,093
|
{
@Override
public Response<Void> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.pullUpdates( context );
}
}, VOID_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,094
|
{
@Override
public Response<Long> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
String resource = readString( input );
final ReadableByteChannel reader = new BlockLogReader( input );
return master.commitSingleResourceTransaction( context, resource, TxExtractor.create( reader ) );
}
}, LONG_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,095
|
ACQUIRE_RELATIONSHIP_READ_LOCK( new AquireLockCall()
{
@Override
protected Response<LockResult> lock( Master master, RequestContext context, long... ids )
{
return master.acquireRelationshipReadLock( context, ids );
}
}, LOCK_SERIALIZER, true )
{
@Override
public boolean isLock()
{
return true;
}
},
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,096
|
{
@Override
protected Response<LockResult> lock( Master master, RequestContext context, long... ids )
{
return master.acquireRelationshipReadLock( context, ids );
}
}, LOCK_SERIALIZER, true )
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,097
|
{
@Override
public Response<IdAllocation> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
IdType idType = IdType.values()[input.readByte()];
return master.allocateIds( context, idType );
}
}, new ObjectSerializer<IdAllocation>()
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,098
|
ACQUIRE_INDEX_WRITE_LOCK( new TargetCaller<Master, LockResult>()
{
@Override
public Response<LockResult> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.acquireIndexWriteLock( context, readString( input ), readString( input ) );
}
}, LOCK_SERIALIZER, true )
{
@Override
public boolean isLock()
{
return true;
}
},
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
5,099
|
{
@Override
public Response<Integer> call( Master master, RequestContext context, ChannelBuffer input,
ChannelBuffer target )
{
return master.createRelationshipType( context, readString( input ) );
}
}, INTEGER_SERIALIZER, true ),
| false
|
enterprise_ha_src_main_java_org_neo4j_kernel_ha_com_HaRequestType18.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.