Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
3,800
|
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node "+joinServer+" could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
} );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,801
|
{
@Override
public void run()
{
Cluster joinCluster = servers.get( joinServer - 1 );
for ( final Cluster cluster : out )
{
if ( cluster.equals( joinCluster ) )
{
out.remove( cluster );
logger.getLogger().debug( "Join:" + cluster.toString() );
if (joinServers.length == 0)
{
if ( in.isEmpty() )
{
cluster.create( "default" );
} else
{
// Use test info to figure out who to join
final Future<ClusterConfiguration> result = cluster.join( "default", URI.create( in.get( 0 ).toString() ) );
executor.submit( new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node "+joinServer+" could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
} );
}
} else
{
// List of servers to join was explicitly specified, so use that
URI[] instanceUris = new URI[joinServers.length];
for ( int i = 0; i < joinServers.length; i++ )
{
int server = joinServers[i];
instanceUris[i] = URI.create( servers.get( server - 1 ).toString() );
}
final Future<ClusterConfiguration> result = cluster.join( "default", instanceUris );
executor.submit( new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
if ( !(e.getCause() instanceof IllegalStateException ))
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
} );
}
/*
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
try
{
cluster.join( "default", new URI( in.get( 0 ).toString() ) );
}
catch ( URISyntaxException e )
{
e.printStackTrace();
}
}*/
break;
}
}
}
};
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,802
|
public static class ClusterTestScriptDSL
implements ClusterTestScript
{
public abstract static class ClusterAction
implements Runnable
{
public long time;
}
private Queue<ClusterAction> actions = new LinkedList<ClusterAction>();
private long now = 0;
public ClusterTestScriptDSL join( long time, final int joinServer, final int... joinServers )
{
ClusterAction joinAction = new ClusterAction()
{
@Override
public void run()
{
Cluster joinCluster = servers.get( joinServer - 1 );
for ( final Cluster cluster : out )
{
if ( cluster.equals( joinCluster ) )
{
out.remove( cluster );
logger.getLogger().debug( "Join:" + cluster.toString() );
if (joinServers.length == 0)
{
if ( in.isEmpty() )
{
cluster.create( "default" );
} else
{
// Use test info to figure out who to join
final Future<ClusterConfiguration> result = cluster.join( "default", URI.create( in.get( 0 ).toString() ) );
executor.submit( new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node "+joinServer+" could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
} );
}
} else
{
// List of servers to join was explicitly specified, so use that
URI[] instanceUris = new URI[joinServers.length];
for ( int i = 0; i < joinServers.length; i++ )
{
int server = joinServers[i];
instanceUris[i] = URI.create( servers.get( server - 1 ).toString() );
}
final Future<ClusterConfiguration> result = cluster.join( "default", instanceUris );
executor.submit( new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
if ( !(e.getCause() instanceof IllegalStateException ))
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
} );
}
/*
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
try
{
cluster.join( "default", new URI( in.get( 0 ).toString() ) );
}
catch ( URISyntaxException e )
{
e.printStackTrace();
}
}*/
break;
}
}
}
};
joinAction.time = now + time;
actions.offer( joinAction );
now += time;
return this;
}
public ClusterTestScriptDSL leave( long time, final int leaveServer )
{
ClusterAction leaveAction = new ClusterAction()
{
@Override
public void run()
{
Cluster leaveCluster = servers.get( leaveServer - 1 );
for ( Cluster cluster : in )
{
if ( cluster.equals( leaveCluster ) )
{
in.remove( cluster );
cluster.leave();
logger.getLogger().debug( "Leave:" + cluster.toString() );
break;
}
}
}
};
leaveAction.time = now + time;
actions.offer( leaveAction );
now += time;
return this;
}
@Override
public void tick( long time )
{
// logger.getLogger().debug( actions.size()+" actions remaining" );
while ( !actions.isEmpty() && actions.peek().time <= time )
{
actions.poll().run();
}
}
@Override
public long getLength()
{
return now;
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,803
|
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
logger.getLogger().debug( uri + " entered cluster:" + clusterConfiguration.getMemberURIs() );
config.set( new ClusterConfiguration( clusterConfiguration ) );
in.add( cluster );
}
@Override
public void joinedCluster( InstanceId instanceId, URI member )
{
logger.getLogger().debug( uri + " sees a join from " + instanceId + " at URI " + member.toString() );
config.get().joined( instanceId, member );
}
@Override
public void leftCluster( InstanceId instanceId )
{
logger.getLogger().debug( uri + " sees a leave:" + instanceId );
config.get().left( instanceId );
}
@Override
public void leftCluster()
{
out.add( cluster );
config.set( null );
}
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
logger.getLogger().debug( uri + " sees an election:" + instanceId +
"was elected as " + role + " on URI " + electedMember );
}
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
logger.getLogger().debug( uri + " sees an unelection:" + instanceId +
"was removed from " + role + " on URI " + electedMember );
}
} );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,804
|
{
int i = 0;
@Override
public void run()
{
long now = System.currentTimeMillis() - start;
logger.getLogger().debug( "Round " + i + ", time:" + now );
script.tick( now );
if ( ++i == 1000 )
{
timer.cancel();
}
}
}, 0, 10 );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,805
|
@RunWith(value = Parameterized.class)
public class ClusterNetworkTest
{
@Parameterized.Parameters
public static Collection<Object[]> data()
{
return Arrays.asList( new Object[][]
{
{
3, new ClusterTestScriptDSL().
join( 10L, 1, 2, 3 ).
join( 0L, 2, 1, 3 ).
join( 0L, 3, 1, 2 ).
leave( 10000L, 3 ).
leave( 100L, 2 ).
leave( 100L, 1 )
},
/* {
// 3 nodes join and then leaves
3, new ClusterTestScriptDSL().
join( 10L, 1 ).
join( 10L, 2 ).
join( 100L, 3 ).
leave( 100L, 3 ).
leave( 100L, 2 ).
leave( 100L, 1 )
},
{
// 7 nodes join and then leaves
3, new ClusterTestScriptDSL().
join( 100L, 1 ).
join( 100L, 2 ).
join( 100L, 3 ).
join( 100L, 4 ).
join( 100L, 5 ).
join( 100L, 6 ).
join( 100L, 7 ).
leave( 100L, 7 ).
leave( 100L, 6 ).
leave( 100L, 5 ).
leave( 100L, 4 ).
leave( 100L, 3 ).
leave( 100L, 2 ).
leave( 100L, 1 )
},
{
// 1 node join, then 3 nodes try to join at roughly the same time
4, new ClusterTestScriptDSL().
join( 100L, 1 ).
join( 100L, 2 ).
join( 10L, 3 ).
leave( 500L, 3 ).
leave( 100L, 2 ).
leave( 100L, 1 )
},
{
// 2 nodes join, and then one leaves as the third joins
3, new ClusterTestScriptDSL().
join( 100L, 1 ).
join( 100L, 2 ).
leave( 90L, 2 ).
join( 20L, 3 )
},
{
3, new ClusterTestScriptRandom( 1337830212532839000L )
}*/
} );
}
static List<Cluster> servers = new ArrayList<Cluster>();
static List<Cluster> out = new ArrayList<Cluster>();
static List<Cluster> in = new ArrayList<Cluster>();
@ClassRule
public static LoggerRule logger = new LoggerRule();
List<AtomicReference<ClusterConfiguration>> configurations = new ArrayList<AtomicReference<ClusterConfiguration>>();
ClusterTestScript script;
Timer timer = new Timer();
LifeSupport life = new LifeSupport();
static ExecutorService executor;
public ClusterNetworkTest( int nrOfServers, ClusterTestScript script )
throws URISyntaxException
{
this.script = script;
out.clear();
in.clear();
LogbackService logbackService = new LogbackService( new Config( MapUtil.stringMap() ), new LoggerContext() );
for ( int i = 0; i < nrOfServers; i++ )
{
final URI uri = new URI( "neo4j://localhost:800" + (i + 1) );
NetworkedServerFactory factory = new NetworkedServerFactory( life,
new MultiPaxosServerFactory( new ClusterConfiguration( "default", StringLogger.SYSTEM ),
new LogbackService( null,
(LoggerContext) LoggerFactory.getILoggerFactory() ) ),
new FixedTimeoutStrategy( 1000 ),
logbackService, new ObjectStreamFactory(), new ObjectStreamFactory() );
ServerIdElectionCredentialsProvider electionCredentialsProvider = new ServerIdElectionCredentialsProvider();
ProtocolServer server = factory.newNetworkedServer(
new Config( MapUtil.stringMap( ClusterSettings.cluster_server.name(),
uri.getHost() + ":" + uri.getPort(),
ClusterSettings.server_id.name(), ""+i ) ),
new InMemoryAcceptorInstanceStore(),
electionCredentialsProvider );
server.addBindingListener( electionCredentialsProvider );
final Cluster cluster2 = server.newClient( Cluster.class );
final AtomicReference<ClusterConfiguration> config2 = clusterStateListener( uri, cluster2 );
servers.add( cluster2 );
out.add( cluster2 );
configurations.add( config2 );
}
life.start();
}
@Before
public void setup()
{
executor = Executors.newSingleThreadExecutor( new NamedThreadFactory( "Threaded actions" ) );
}
@After
public void tearDown()
{
executor.shutdownNow();
}
@Test
public void testCluster()
throws ExecutionException, InterruptedException, URISyntaxException, TimeoutException
{
final long start = System.currentTimeMillis();
timer.scheduleAtFixedRate( new TimerTask()
{
int i = 0;
@Override
public void run()
{
long now = System.currentTimeMillis() - start;
logger.getLogger().debug( "Round " + i + ", time:" + now );
script.tick( now );
if ( ++i == 1000 )
{
timer.cancel();
}
}
}, 0, 10 );
// Let messages settle
Thread.sleep( script.getLength() + 1000 );
logger.getLogger().debug( "All nodes leave" );
// All leave
for ( Cluster cluster : new ArrayList<Cluster>( in ) )
{
logger.getLogger().debug( "Leaving:" + cluster );
cluster.leave();
Thread.sleep( 100 );
}
}
@After
public void shutdown()
{
life.shutdown();
}
private AtomicReference<ClusterConfiguration> clusterStateListener( final URI uri, final Cluster cluster )
{
final AtomicReference<ClusterConfiguration> config = new AtomicReference<ClusterConfiguration>();
cluster.addClusterListener( new ClusterListener()
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
logger.getLogger().debug( uri + " entered cluster:" + clusterConfiguration.getMemberURIs() );
config.set( new ClusterConfiguration( clusterConfiguration ) );
in.add( cluster );
}
@Override
public void joinedCluster( InstanceId instanceId, URI member )
{
logger.getLogger().debug( uri + " sees a join from " + instanceId + " at URI " + member.toString() );
config.get().joined( instanceId, member );
}
@Override
public void leftCluster( InstanceId instanceId )
{
logger.getLogger().debug( uri + " sees a leave:" + instanceId );
config.get().left( instanceId );
}
@Override
public void leftCluster()
{
out.add( cluster );
config.set( null );
}
@Override
public void elected( String role, InstanceId instanceId, URI electedMember )
{
logger.getLogger().debug( uri + " sees an election:" + instanceId +
"was elected as " + role + " on URI " + electedMember );
}
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
logger.getLogger().debug( uri + " sees an unelection:" + instanceId +
"was removed from " + role + " on URI " + electedMember );
}
} );
return config;
}
private void verifyConfigurations()
{
List<URI> nodes = null;
for ( int j = 0; j < configurations.size(); j++ )
{
AtomicReference<ClusterConfiguration> configurationAtomicReference = configurations.get( j );
if ( configurationAtomicReference.get() != null )
{
if ( nodes == null )
{
nodes = configurationAtomicReference.get().getMemberURIs();
}
else
{
assertEquals( "Config for server" + (j + 1) + " is wrong", nodes,
configurationAtomicReference.get().getMemberURIs() );
}
}
}
}
public interface ClusterTestScript
{
void tick( long time );
long getLength();
}
public static class ClusterTestScriptDSL
implements ClusterTestScript
{
public abstract static class ClusterAction
implements Runnable
{
public long time;
}
private Queue<ClusterAction> actions = new LinkedList<ClusterAction>();
private long now = 0;
public ClusterTestScriptDSL join( long time, final int joinServer, final int... joinServers )
{
ClusterAction joinAction = new ClusterAction()
{
@Override
public void run()
{
Cluster joinCluster = servers.get( joinServer - 1 );
for ( final Cluster cluster : out )
{
if ( cluster.equals( joinCluster ) )
{
out.remove( cluster );
logger.getLogger().debug( "Join:" + cluster.toString() );
if (joinServers.length == 0)
{
if ( in.isEmpty() )
{
cluster.create( "default" );
} else
{
// Use test info to figure out who to join
final Future<ClusterConfiguration> result = cluster.join( "default", URI.create( in.get( 0 ).toString() ) );
executor.submit( new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node "+joinServer+" could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
} );
}
} else
{
// List of servers to join was explicitly specified, so use that
URI[] instanceUris = new URI[joinServers.length];
for ( int i = 0; i < joinServers.length; i++ )
{
int server = joinServers[i];
instanceUris[i] = URI.create( servers.get( server - 1 ).toString() );
}
final Future<ClusterConfiguration> result = cluster.join( "default", instanceUris );
executor.submit( new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
if ( !(e.getCause() instanceof IllegalStateException ))
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
} );
}
/*
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
try
{
cluster.join( "default", new URI( in.get( 0 ).toString() ) );
}
catch ( URISyntaxException e )
{
e.printStackTrace();
}
}*/
break;
}
}
}
};
joinAction.time = now + time;
actions.offer( joinAction );
now += time;
return this;
}
public ClusterTestScriptDSL leave( long time, final int leaveServer )
{
ClusterAction leaveAction = new ClusterAction()
{
@Override
public void run()
{
Cluster leaveCluster = servers.get( leaveServer - 1 );
for ( Cluster cluster : in )
{
if ( cluster.equals( leaveCluster ) )
{
in.remove( cluster );
cluster.leave();
logger.getLogger().debug( "Leave:" + cluster.toString() );
break;
}
}
}
};
leaveAction.time = now + time;
actions.offer( leaveAction );
now += time;
return this;
}
@Override
public void tick( long time )
{
// logger.getLogger().debug( actions.size()+" actions remaining" );
while ( !actions.isEmpty() && actions.peek().time <= time )
{
actions.poll().run();
}
}
@Override
public long getLength()
{
return now;
}
}
public static class ClusterTestScriptRandom
implements ClusterTestScript
{
private final long seed;
private final Random random;
public ClusterTestScriptRandom( long seed )
{
if ( seed == -1 )
{
seed = System.nanoTime();
}
this.seed = seed;
random = new Random( seed );
}
@Override
public void tick( long time )
{
if ( time == 0 )
{
logger.getLogger().debug( "Random seed:" + seed );
}
if ( random.nextDouble() >= 0.9 )
{
if ( random.nextDouble() > 0.5 && !out.isEmpty() )
{
int idx = random.nextInt( out.size() );
Cluster cluster = out.remove( idx );
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
try
{
cluster.join( "default", new URI( in.get( 0 ).toString() ) );
}
catch ( URISyntaxException e )
{
e.printStackTrace();
}
}
logger.getLogger().debug( "Enter cluster:" + cluster.toString() );
}
else if ( !in.isEmpty() )
{
int idx = random.nextInt( in.size() );
Cluster cluster = in.remove( idx );
cluster.leave();
logger.getLogger().debug( "Leave cluster:" + cluster.toString() );
}
}
}
@Override
public long getLength()
{
return 5000;
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,806
|
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,807
|
public class ClusterTestScriptRandom
implements ClusterTestScript
{
private final long seed;
private final Random random;
public ClusterTestScriptRandom( long seed )
{
if ( seed == -1 )
{
seed = System.nanoTime();
}
this.seed = seed;
random = new Random( seed );
}
@Override
public int rounds()
{
return 300;
}
@Override
public void tick( long time )
{
if ( time >= (rounds() - 100) * 10 )
{
return;
}
if ( time == 0 )
{
logger.getLogger().debug( "Random seed:" + seed + "L" );
}
if ( random.nextDouble() >= 0.8 )
{
double inOrOut = (in.size() - out.size()) / ((double) servers.size());
double whatToDo = random.nextDouble() + inOrOut;
logger.getLogger().debug( "What to do:" + whatToDo );
if ( whatToDo < 0.5 && !out.isEmpty() )
{
int idx = random.nextInt( out.size() );
final Cluster cluster = out.remove( idx );
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
final Future<ClusterConfiguration> result = cluster.join( "default",
URI.create( in.get( 0 ).toString() ) );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
network.addFutureWaiter( result, joiner );
}
logger.getLogger().debug( "Enter cluster:" + cluster.toString() );
}
else if ( !in.isEmpty() )
{
int idx = random.nextInt( in.size() );
Cluster cluster = in.remove( idx );
cluster.leave();
logger.getLogger().debug( "Leave cluster:" + cluster.toString() );
}
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,808
|
public abstract class ClusterAction
implements Runnable
{
public long time;
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,809
|
{
@Override
public void run()
{
ClusterMockTest.this.getRoles( roles );
}
}, 0 );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,810
|
{
@Override
public void run()
{
ClusterMockTest.this.verifyConfigurations();
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,811
|
{
@Override
public void run()
{
logger.getLogger().debug( msg );
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,812
|
{
@Override
public void run()
{
logger.getLogger().debug( "Slept for " + sleepTime );
}
}, sleepTime );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,813
|
{
@Override
public void run()
{
AtomicBroadcast broadcast = servers.get( server - 1 ).newClient( AtomicBroadcast.class );
try
{
broadcast.broadcast( serializer.broadcast( value ) );
}
catch ( IOException e )
{
e.printStackTrace();
}
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,814
|
{
@Override
public void run()
{
Cluster server = servers.get( serverUp - 1 ).newClient( Cluster.class );
network.getNetworkLatencyStrategy()
.getStrategy( ScriptableNetworkFailureLatencyStrategy.class )
.nodeIsUp( "server"+server.toString() );
logger.getLogger().debug( server + " is up" );
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,815
|
{
@Override
public void run()
{
Cluster server = servers.get( serverDown - 1 ).newClient( Cluster.class );
network.getNetworkLatencyStrategy().getStrategy( ScriptableNetworkFailureLatencyStrategy.class )
.nodeIsDown( "server"+server.toString() );
logger.getLogger().debug( server + " is down" );
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,816
|
{
@Override
public void run()
{
Cluster leaveCluster = servers.get( leaveServer - 1 ).newClient( Cluster.class );
for ( Cluster cluster : in )
{
if ( cluster.equals( leaveCluster ) )
{
in.remove( cluster );
cluster.leave();
logger.getLogger().debug( "Leave:" + cluster.toString() );
break;
}
}
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,817
|
public abstract static class ClusterAction
implements Runnable
{
public long time;
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterNetworkTest.java
|
3,818
|
@Ignore
@RunWith(Parameterized.class)
public class ClusterRandomTest
extends ClusterMockTest
{
@Parameterized.Parameters
public static Collection<Object[]> data()
{
return seeds( 1338272692543769000L, 1338272692343957000L, 1338272692188718000L, 1338272692117545000L,
1338272692020413000L, 1338272691938947000L, 1338272691895131000L, 1338272691832332000L,
1338272691540039000L,
1338272632660010000L, 1337830212532839000L );
// return seeds( 1349765117306363000L );
// return seeds( -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 );
}
private static Collection<Object[]> seeds( long... s )
{
List<Object[]> seedList = new ArrayList<Object[]>();
for ( long seed : s )
{
seedList.add( new Object[]{seed} );
}
return seedList;
}
final long seed;
public ClusterRandomTest( long s )
{
seed = s;
}
@Test
public void randomTest()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 7, DEFAULT_NETWORK(), new ClusterTestScriptRandom( seed ) );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterRandomTest.java
|
3,819
|
class ClusterContextImpl
extends AbstractContextImpl
implements ClusterContext
{
// ClusterContext
private Iterable<ClusterListener> clusterListeners = Listeners.newListeners();
private final List<ClusterMessage.ConfigurationRequestState> discoveredInstances = new ArrayList<ClusterMessage
.ConfigurationRequestState>();
private Iterable<URI> joiningInstances;
private ClusterMessage.ConfigurationResponseState joinDeniedConfigurationResponseState;
private final Map<InstanceId, URI> currentlyJoiningInstances =
new HashMap<InstanceId, URI>();
private final Executor executor;
private final ObjectOutputStreamFactory objectOutputStreamFactory;
private final ObjectInputStreamFactory objectInputStreamFactory;
private final LearnerContext learnerContext;
private final HeartbeatContext heartbeatContext;
ClusterContextImpl( InstanceId me, CommonContextState commonState, Logging logging,
Timeouts timeouts, Executor executor,
ObjectOutputStreamFactory objectOutputStreamFactory,
ObjectInputStreamFactory objectInputStreamFactory,
LearnerContext learnerContext, HeartbeatContext heartbeatContext )
{
super( me, commonState, logging, timeouts );
this.executor = executor;
this.objectOutputStreamFactory = objectOutputStreamFactory;
this.objectInputStreamFactory = objectInputStreamFactory;
this.learnerContext = learnerContext;
this.heartbeatContext = heartbeatContext;
}
private ClusterContextImpl( InstanceId me, CommonContextState commonState, Logging logging, Timeouts timeouts,
Iterable<URI> joiningInstances, ClusterMessage.ConfigurationResponseState
joinDeniedConfigurationResponseState, Executor executor,
ObjectOutputStreamFactory objectOutputStreamFactory,
ObjectInputStreamFactory objectInputStreamFactory, LearnerContext learnerContext,
HeartbeatContext heartbeatContext )
{
super( me, commonState, logging, timeouts );
this.joiningInstances = joiningInstances;
this.joinDeniedConfigurationResponseState = joinDeniedConfigurationResponseState;
this.executor = executor;
this.objectOutputStreamFactory = objectOutputStreamFactory;
this.objectInputStreamFactory = objectInputStreamFactory;
this.learnerContext = learnerContext;
this.heartbeatContext = heartbeatContext;
}
// Cluster API
@Override
public void addClusterListener( ClusterListener listener )
{
clusterListeners = Listeners.addListener( listener, clusterListeners );
}
@Override
public void removeClusterListener( ClusterListener listener )
{
clusterListeners = Listeners.removeListener( listener, clusterListeners );
}
// Implementation
@Override
public void created( String name )
{
commonState.setConfiguration(
new ClusterConfiguration( name, logging.getMessagesLog( ClusterConfiguration.class ),
Collections.singleton( commonState.boundAt() ) ));
joined();
}
@Override
public void joining( String name, Iterable<URI> instanceList )
{
joiningInstances = instanceList;
discoveredInstances.clear();
joinDeniedConfigurationResponseState = null;
}
@Override
public void acquiredConfiguration( final Map<InstanceId, URI> memberList, final Map<String,
InstanceId> roles )
{
commonState.configuration().setMembers( memberList );
commonState.configuration().setRoles( roles );
}
@Override
public void joined()
{
commonState.configuration().joined( me, commonState.boundAt() );
Listeners.notifyListeners( clusterListeners, executor, new Listeners.Notification<ClusterListener>()
{
@Override
public void notify( ClusterListener listener )
{
listener.enteredCluster( commonState.configuration() );
}
});
}
@Override
public void left()
{
timeouts.cancelAllTimeouts();
commonState.configuration().left();
Listeners.notifyListeners( clusterListeners, executor, new Listeners.Notification<ClusterListener>()
{
@Override
public void notify( ClusterListener listener )
{
listener.leftCluster();
}
} );
}
@Override
public void joined( final InstanceId instanceId, final URI atURI )
{
commonState.configuration().joined( instanceId, atURI );
if ( commonState.configuration().getMembers().containsKey( me ) )
{
// Make sure this node is in cluster before notifying of others joining and leaving
Listeners.notifyListeners( clusterListeners, executor, new Listeners.Notification<ClusterListener>()
{
@Override
public void notify( ClusterListener listener )
{
listener.joinedCluster( instanceId, atURI );
}
} );
}
// else:
// This typically happens in situations when several nodes join at once, and the ordering
// of join messages is a little out of whack.
currentlyJoiningInstances.remove( instanceId );
}
@Override
public void left( final InstanceId node )
{
commonState.configuration().left( node );
Listeners.notifyListeners( clusterListeners, executor, new Listeners.Notification<ClusterListener>()
{
@Override
public void notify( ClusterListener listener )
{
listener.leftCluster( node );
}
} );
}
@Override
public void elected( final String roleName, final InstanceId instanceId )
{
commonState.configuration().elected( roleName, instanceId );
Listeners.notifyListeners( clusterListeners, executor, new Listeners.Notification<ClusterListener>()
{
@Override
public void notify( ClusterListener listener )
{
listener.elected( roleName, instanceId,
commonState.configuration().getUriForId( instanceId ) );
}
} );
}
@Override
public void unelected( final String roleName, final InstanceId instanceId )
{
commonState.configuration().unelected( roleName );
Listeners.notifyListeners( clusterListeners, executor, new Listeners.Notification<ClusterListener>()
{
@Override
public void notify( ClusterListener listener )
{
listener.unelected( roleName, instanceId, commonState.configuration().getUriForId( instanceId ) );
}
} );
}
@Override
public ClusterConfiguration getConfiguration()
{
return commonState.configuration();
}
@Override
public boolean isElectedAs( String roleName )
{
return me.equals( commonState.configuration().getElected( roleName ) );
}
@Override
public boolean isInCluster()
{
return Iterables.count( commonState.configuration().getMemberURIs() ) != 0;
}
@Override
public Iterable<URI> getJoiningInstances()
{
return joiningInstances;
}
@Override
public ObjectOutputStreamFactory getObjectOutputStreamFactory()
{
return objectOutputStreamFactory;
}
@Override
public ObjectInputStreamFactory getObjectInputStreamFactory()
{
return objectInputStreamFactory;
}
@Override
public List<ClusterMessage.ConfigurationRequestState> getDiscoveredInstances()
{
return discoveredInstances;
}
@Override
public String toString()
{
return "Me: " + me + " Bound at: " + commonState.boundAt() + " Config:" + commonState.configuration();
}
@Override
public void setBoundAt( URI boundAt )
{
commonState.setBoundAt( boundAt );
}
@Override
public void joinDenied( ClusterMessage.ConfigurationResponseState configurationResponseState )
{
if ( configurationResponseState == null )
{
throw new IllegalArgumentException( "Join denied configuration response state was null" );
}
this.joinDeniedConfigurationResponseState = configurationResponseState;
}
@Override
public boolean hasJoinBeenDenied()
{
return joinDeniedConfigurationResponseState != null;
}
@Override
public ClusterMessage.ConfigurationResponseState getJoinDeniedConfigurationResponseState()
{
if ( !hasJoinBeenDenied() )
{
throw new IllegalStateException( "Join has not been denied" );
}
return joinDeniedConfigurationResponseState;
}
@Override
public Iterable<InstanceId> getOtherInstances()
{
return Iterables.filter( not( in( me ) ), commonState.configuration().getMemberIds() );
}
/** Used to ensure that no other instance is trying to join with the same id from a different machine */
@Override
public boolean isInstanceJoiningFromDifferentUri( InstanceId joiningId, URI uri )
{
return currentlyJoiningInstances.containsKey( joiningId )
&& !currentlyJoiningInstances.get( joiningId ).equals(uri);
}
@Override
public void instanceIsJoining( InstanceId joiningId, URI uri )
{
currentlyJoiningInstances.put( joiningId, uri );
}
@Override
public String myName()
{
String name = parameter( "name" ).apply( commonState.boundAt() );
if ( name != null )
{
return name;
}
else
{
return me.toString();
}
}
@Override
public void discoveredLastReceivedInstanceId( long id )
{
learnerContext.setLastDeliveredInstanceId( id );
learnerContext.learnedInstanceId( id );
learnerContext.setNextInstanceId( id + 1 );
}
@Override
public boolean isCurrentlyAlive( InstanceId joiningId )
{
return !heartbeatContext.getFailed().contains( joiningId );
}
@Override
public long getLastDeliveredInstanceId()
{
return learnerContext.getLastDeliveredInstanceId();
}
public ClusterContextImpl snapshot( CommonContextState commonStateSnapshot, Logging logging, Timeouts timeouts,
Executor executor, ObjectOutputStreamFactory objectOutputStreamFactory,
ObjectInputStreamFactory objectInputStreamFactory,
LearnerContextImpl snapshotLearnerContext,
HeartbeatContextImpl snapshotHeartbeatContext )
{
return new ClusterContextImpl( me, commonStateSnapshot, logging, timeouts,
joiningInstances == null ? null : new ArrayList<>(toList(joiningInstances)),
joinDeniedConfigurationResponseState == null ? null : joinDeniedConfigurationResponseState.snapshot(),
executor, objectOutputStreamFactory, objectInputStreamFactory, snapshotLearnerContext,
snapshotHeartbeatContext);
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ClusterContextImpl that = (ClusterContextImpl) o;
if ( currentlyJoiningInstances != null ? !currentlyJoiningInstances.equals( that.currentlyJoiningInstances )
: that.currentlyJoiningInstances != null )
{
return false;
}
if ( discoveredInstances != null ? !discoveredInstances.equals( that.discoveredInstances ) : that
.discoveredInstances != null )
{
return false;
}
if ( heartbeatContext != null ? !heartbeatContext.equals( that.heartbeatContext ) : that.heartbeatContext !=
null )
{
return false;
}
if ( joinDeniedConfigurationResponseState != null ? !joinDeniedConfigurationResponseState.equals( that
.joinDeniedConfigurationResponseState ) : that.joinDeniedConfigurationResponseState != null )
{
return false;
}
if ( joiningInstances != null ? !joiningInstances.equals( that.joiningInstances ) : that.joiningInstances !=
null )
{
return false;
}
if ( learnerContext != null ? !learnerContext.equals( that.learnerContext ) : that.learnerContext != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = 0;
result = 31 * result + (discoveredInstances != null ? discoveredInstances.hashCode() : 0);
result = 31 * result + (joiningInstances != null ? joiningInstances.hashCode() : 0);
result = 31 * result + (joinDeniedConfigurationResponseState != null ? joinDeniedConfigurationResponseState
.hashCode() : 0);
result = 31 * result + (currentlyJoiningInstances != null ? currentlyJoiningInstances.hashCode() : 0);
result = 31 * result + (learnerContext != null ? learnerContext.hashCode() : 0);
result = 31 * result + (heartbeatContext != null ? heartbeatContext.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,820
|
start
{
@Override
public State<?, ?> handle( ClusterContext context, Message<ClusterMessage> message,
MessageHolder outgoing ) throws Throwable
{
switch ( message.getMessageType() )
{
case addClusterListener:
{
context.addClusterListener( message.<ClusterListener>getPayload() );
break;
}
case removeClusterListener:
{
context.removeClusterListener( message.<ClusterListener>getPayload() );
break;
}
case create:
{
String name = message.getPayload();
context.getLogger( ClusterState.class ).info( "Creating cluster: " + name );
context.created( name );
return entered;
}
case join:
{
// Send configuration request to all instances
Object[] args = message.<Object[]>getPayload();
String name = ( String ) args[0];
URI[] clusterInstanceUris = ( URI[] ) args[1];
context.joining( name, Iterables.<URI,URI>iterable( clusterInstanceUris ) );
for ( URI potentialClusterInstanceUri : clusterInstanceUris )
{
outgoing.offer( to( ClusterMessage.configurationRequest,
potentialClusterInstanceUri,
new ClusterMessage.ConfigurationRequestState( context.getMyId(), context.boundAt() ) ) );
}
context.setTimeout( "discovery",
timeout( ClusterMessage.configurationTimeout, message,
new ClusterMessage.ConfigurationTimeoutState(
/*
* The time when this becomes relevant is if indeed there are
* other instances present in the configuration. If there aren't
* we won't wait for them anyway and only this delay prevents us
* from going ahead and creating the cluster. We still wait a bit
* though because even if we don't have them configured they still
* might contact us.
* If, on the other hand, we have some configured, then we won't
* startup anyway until half are available. So this delay doesn't
* enter into it anyway.
* In summary, this offers no upside if there are configured
* instances
* and causes unnecessary delay if we are supposed to go ahead and
* create the cluster.
*/
1 ) ) );
return discovery;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterState.java
|
3,821
|
public class ServerIdElectionCredentials implements ElectionCredentials, Serializable
{
private final URI credentials;
public ServerIdElectionCredentials( URI credentials )
{
this.credentials = credentials;
}
@Override
public int compareTo( Object o )
{
// Alphabetically lower URI means higher prio
return -credentials.toString().compareTo( ((ServerIdElectionCredentials) o).credentials.toString() );
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ServerIdElectionCredentials.java
|
3,822
|
public class NotElectableElectionCredentialsProvider
implements ElectionCredentialsProvider
{
@Override
public ElectionCredentials getCredentials( String role )
{
return new NotElectableElectionCredentials();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_NotElectableElectionCredentialsProvider.java
|
3,823
|
public final class NotElectableElectionCredentials implements ElectionCredentials, Externalizable
{
// For Externalizable
public NotElectableElectionCredentials()
{}
@Override
public int compareTo( Object o )
{
return -1;
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !(obj instanceof NotElectableElectionCredentials) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
return 0;
}
@Override
public void writeExternal( ObjectOutput out ) throws IOException
{
}
@Override
public void readExternal( ObjectInput in ) throws IOException, ClassNotFoundException
{
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_NotElectableElectionCredentials.java
|
3,824
|
public class IntegerElectionCredentials implements ElectionCredentials
{
private final int credential;
public IntegerElectionCredentials( int credential )
{
this.credential = credential;
}
@Override
public int compareTo( Object o )
{
return o instanceof IntegerElectionCredentials
? Integer.valueOf(credential).compareTo(Integer.valueOf(( (IntegerElectionCredentials) o).credential)) : 0;
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_election_IntegerElectionCredentials.java
|
3,825
|
public class HeartbeatReelectionListener
implements HeartbeatListener
{
private final Election election;
private final StringLogger messagesLog;
public HeartbeatReelectionListener( Election election, StringLogger messagesLog )
{
this.election = election;
this.messagesLog = messagesLog;
}
@Override
public void failed( InstanceId server )
{
// Suggest reelection for all roles of this node
messagesLog.warn( " instance " + server +" is being demoted since it failed" );
election.demote( server );
}
@Override
public void alive( InstanceId server )
{
election.performRoleElections();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_HeartbeatReelectionListener.java
|
3,826
|
public class ElectionStateTest
{
@Test
public void testElectionRequestIsRejectedIfNoQuorum() throws Throwable
{
ElectionContext context = mock( ElectionContext.class );
ClusterContext clusterContextMock = mock( ClusterContext.class );
when( context.electionOk() ).thenReturn( false );
when( clusterContextMock.getLogger( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
// when( context.getClusterContext() ).thenReturn( clusterContextMock );
MessageHolder holder = mock( MessageHolder.class );
election.handle( context,
Message.<ElectionMessage>internal( performRoleElections ), holder );
verifyZeroInteractions( holder );
}
@Test
public void testElectionFromDemoteIsRejectedIfNoQuorum() throws Throwable
{
ElectionContext context = mock( ElectionContext.class );
ClusterContext clusterContextMock = mock( ClusterContext.class );
when( context.electionOk() ).thenReturn( false );
when( clusterContextMock.getLogger( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
when( context.getLogger( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
MessageHolder holder = mock( MessageHolder.class );
election.handle( context,
Message.<ElectionMessage>internal( demote ), holder );
verifyZeroInteractions( holder );
}
@Test
public void electionShouldRemainLocalIfStartedBySingleInstanceWhichIsTheRoleHolder() throws Throwable
{
/*
* Ensures that when an instance is alone in the cluster, elections for roles that it holds do not set
* timeouts or try to reach other instances.
*/
// Given
ElectionContext context = mock( ElectionContext.class );
ClusterContext clusterContextMock = mock( ClusterContext.class );
when( clusterContextMock.getLogger( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
// when( context.getClusterContext() ).thenReturn( clusterContextMock );
MessageHolder holder = mock( MessageHolder.class );
// These mean the election can proceed normally, by us
when( context.electionOk() ).thenReturn( true );
when( context.isInCluster() ).thenReturn( true );
when( context.isElector() ).thenReturn( true );
// Like it says on the box, we are the only instance
final InstanceId myInstanceId = new InstanceId( 1 );
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
members.put( myInstanceId, URI.create( "ha://me" ) );
when( context.getMembers() ).thenReturn( members );
// Any role would do, just make sure we have it
final String role = "master";
when( context.getPossibleRoles() ).thenReturn(
Collections.<ElectionRole>singletonList( new ElectionRole( role ) ) );
when( context.getElected( role ) ).thenReturn( myInstanceId );
// Required for logging
when( context.getLogger( Mockito.<Class>any()) ).thenReturn( mock( StringLogger.class ) );
// When
election.handle( context,
Message.<ElectionMessage>internal( performRoleElections ), holder );
// Then
// Make sure that we asked ourselves to vote for that role and that no timer was set
verify( holder, times(1) ).offer( Matchers.argThat( new MessageArgumentMatcher<ElectionMessage>()
.onMessageType( ElectionMessage.vote ).withPayload( role ) ) );
verify( context, times( 0 ) ).setTimeout( Matchers.<String>any(), Matchers.<Message>any() );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_election_ElectionStateTest.java
|
3,827
|
private static class ElectionTimeoutData
{
private final String role;
private final Message message;
private ElectionTimeoutData( String role, Message message )
{
this.role = role;
this.message = message;
}
public String getRole()
{
return role;
}
public Message getMessage()
{
return message;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ElectionState.java
|
3,828
|
election
{
@Override
public State<?, ?> handle( ElectionContext context,
Message<ElectionMessage> message,
MessageHolder outgoing
)
throws Throwable
{
StringLogger logger = context.getLogger( ElectionState.class );
switch ( message.getMessageType() )
{
case demote:
{
if ( !context.electionOk() )
{
logger.warn( "Context says election is not OK to proceed. " +
"Failed instances are: " +
context.getFailed() +
", cluster members are: " +
context.getMembers() );
break;
}
InstanceId demoteNode = message.getPayload();
// TODO Could perhaps be done better?
context.nodeFailed( demoteNode );
if ( context.isInCluster() )
{
// Only the first alive server should try elections. Everyone else waits
List<InstanceId> aliveInstances = Iterables.toList(context.getAlive());
Collections.sort( aliveInstances );
boolean isElector = aliveInstances.indexOf( context.getMyId() ) == 0;
if ( isElector )
{
logger.debug( "I (" + context.getMyId() + ") am the elector, " +
"executing the election" );
// Start election process for all roles that are currently unassigned
Iterable<String> rolesRequiringElection = context.getRolesRequiringElection();
for ( String role : rolesRequiringElection )
{
if ( !context.isElectionProcessInProgress( role ) )
{
logger.debug(
"Starting election process for role " + role );
context.startDemotionProcess( role, demoteNode );
// Allow other live nodes to vote which one should take over
for ( Map.Entry<InstanceId, URI> server : context.getMembers().entrySet() )
{
if ( !context.getFailed().contains( server.getKey() ) )
{
// This is a candidate - allow it to vote itself for promotion
outgoing.offer( Message.to( ElectionMessage.vote, server.getValue(), role ) );
}
}
context.setTimeout( "election-" + role,
Message.timeout( ElectionMessage.electionTimeout, message,
new ElectionTimeoutData( role, message ) ) );
}
else
{
logger.debug(
"Election already in progress for role " + role );
}
}
}
}
break;
}
case performRoleElections:
{
if ( !context.electionOk() )
{
break;
}
if ( context.isInCluster() )
{
boolean isElector = context.isElector();
if ( isElector )
{
// Start election process for all roles
Iterable<ElectionRole> rolesRequiringElection = context.getPossibleRoles();
for ( ElectionRole role : rolesRequiringElection )
{
String roleName = role.getName();
if ( !context.isElectionProcessInProgress( roleName ) )
{
context.getLogger(ElectionState.class).debug(
"Starting election process for role " + roleName );
context.startElectionProcess( roleName );
boolean sentSome = false;
// Allow other live nodes to vote which one should take over
for ( Map.Entry<InstanceId, URI> server : context.getMembers().entrySet() )
{
/*
* Skip dead nodes and the current role holder. Dead nodes are not
* candidates anyway and the current role holder will be asked last,
* after everyone else has cast votes.
*/
if ( !context.isFailed( server.getKey() ) &&
!server.getKey().equals( context.getElected( roleName ) ) )
{
// This is a candidate - allow it to vote itself for promotion
outgoing.offer( Message.to( ElectionMessage.vote, server.getValue(), roleName ) );
sentSome = true;
}
}
if ( !sentSome )
{
/*
* If we didn't send any messages, we are the only non-failed cluster
* member and probably (not necessarily) hold the role, though that
* doesn't matter. So we ask ourselves to vote, if we didn't above.
* In this case, no timeout is required, because no messages are
* expected. If we are indeed the role holder, then we'll cast our
* vote as a response to this message, which will complete the election.
*/
outgoing.offer( Message.internal( ElectionMessage.vote, roleName ) );
}
else
{
context.setTimeout( "election-" + roleName,
Message.timeout( ElectionMessage.electionTimeout, message,
new ElectionTimeoutData( roleName, message ) ) );
}
}
else
{
logger.debug(
"Election already in progress for role " + roleName );
}
}
}
else
{
List<InstanceId> aliveInstances = Iterables.toList( context.getAlive() );
Collections.sort( aliveInstances );
outgoing.offer( message.setHeader( Message.TO,
context.getUriForId( first( aliveInstances ) ).toString() ) );
}
}
break;
}
case promote:
{
Object[] args = message.getPayload();
InstanceId promoteNode = (InstanceId) args[0];
String role = (String) args[1];
// Start election process for coordinator role
if ( context.isInCluster() && !context.isElectionProcessInProgress( role ) )
{
context.startPromotionProcess( role, promoteNode );
// Allow other live nodes to vote which one should take over
for ( Map.Entry<InstanceId, URI> server : context.getMembers().entrySet() )
{
if ( !context.getFailed().contains( server.getKey() ) )
{
// This is a candidate - allow it to vote itself for promotion
outgoing.offer( Message.to( ElectionMessage.vote, server.getValue(), role ) );
}
}
context.setTimeout( "election-" + role, Message.timeout( ElectionMessage
.electionTimeout, message, new ElectionTimeoutData( role, message ) ) );
}
break;
}
case vote:
{
String role = message.getPayload();
outgoing.offer( Message.respond( ElectionMessage.voted, message,
new ElectionMessage.VotedData( role, context.getMyId(),
context.getCredentialsForRole( role ) ) ) );
break;
}
case voted:
{
ElectionMessage.VotedData data = message.getPayload();
context.voted( data.getRole(), data.getInstanceId(), data.getVoteCredentials() );
String voter = message.hasHeader( Message.FROM ) ? message.getHeader( Message.FROM ) : "I";
logger.debug( voter + " voted " + data );
/*
* This is the URI of the current role holder and, yes, it could very well be null. However
* we don't really care. If it is null then the election would not have sent one vote
* request less than needed (i.e. ask the master last) since, well, it doesn't exist. So
* the immediate effect is that the else (which checks for null) will never be called.
*/
InstanceId currentElected = context.getElected( data.getRole() );
if ( context.getVoteCount( data.getRole() ) == context.getNeededVoteCount() )
{
// We have all votes now
InstanceId winner = context.getElectionWinner( data.getRole() );
if ( winner != null )
{
logger.debug( "Elected " + winner + " as " + data.getRole() );
// Broadcast this
ClusterMessage.ConfigurationChangeState configurationChangeState = new
ClusterMessage.ConfigurationChangeState();
configurationChangeState.elected( data.getRole(), winner );
outgoing.offer( Message.internal( ProposerMessage.propose,
configurationChangeState ) );
}
else
{
logger.warn( "Election could not pick a winner" );
if ( currentElected != null )
{
// Someone had the role and doesn't anymore. Broadcast this
ClusterMessage.ConfigurationChangeState configurationChangeState = new
ClusterMessage.ConfigurationChangeState();
configurationChangeState.unelected( data.getRole(), currentElected );
outgoing.offer( Message.internal( ProposerMessage.propose,
configurationChangeState ) );
}
}
context.cancelTimeout( "election-" + data.getRole() );
}
else if ( context.getVoteCount( data.getRole() ) == context.getNeededVoteCount() - 1 &&
currentElected != null && !context.hasCurrentlyElectedVoted(data.getRole(), currentElected))
{
// Missing one vote, the one from the current role holder
outgoing.offer( Message.to( ElectionMessage.vote,
context.getUriForId( currentElected ),
data.getRole() ) );
}
break;
}
case electionTimeout:
{
// Election failed - try again
ElectionTimeoutData electionTimeoutData = message.getPayload();
logger.warn( String.format(
"Election timed out for '%s'- trying again", electionTimeoutData.getRole() ) );
context.cancelElection( electionTimeoutData.getRole() );
outgoing.offer( electionTimeoutData.getMessage() );
break;
}
case leave:
{
return start;
}
}
return this;
}
};
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ElectionState.java
|
3,829
|
start
{
@Override
public State<?, ?> handle( ElectionContext context,
Message<ElectionMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case created:
{
context.created();
return election;
}
case join:
{
return election;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ElectionState.java
|
3,830
|
public class ElectionRole
{
private String name;
public ElectionRole( String name )
{
this.name = name;
}
public String getName()
{
return name;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ElectionRole.java
|
3,831
|
public static class VotedData
implements Serializable
{
private final String role;
private final InstanceId instanceId;
private final Comparable<Object> voteCredentials;
public VotedData( String role, InstanceId instanceId, Comparable<Object> voteCredentials )
{
this.role = role;
this.instanceId = instanceId;
this.voteCredentials = voteCredentials;
}
public String getRole()
{
return role;
}
public InstanceId getInstanceId()
{
return instanceId;
}
public Comparable<Object> getVoteCredentials()
{
return voteCredentials;
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[role:" + role + ", instance:" + instanceId + ", credentials:" +
voteCredentials + "]";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_election_ElectionMessage.java
|
3,832
|
public class ElectionContextTest
{
@Test
public void testElectionOkNoFailed()
{
Set<InstanceId> failed = new HashSet<InstanceId>();
baseTestForElectionOk( failed, false );
}
@Test
public void testElectionOkLessThanQuorumFailed()
{
Set<InstanceId> failed = new HashSet<InstanceId>();
failed.add( new InstanceId( 1 ) );
baseTestForElectionOk( failed, false );
}
@Test
public void testElectionNotOkMoreThanQuorumFailed()
{
Set<InstanceId> failed = new HashSet<InstanceId>();
failed.add( new InstanceId( 1 ) );
failed.add( new InstanceId( 2 ) );
baseTestForElectionOk( failed, true );
}
@Test
public void testElectionNotOkQuorumFailedTwoInstances()
{
Set<InstanceId> failed = new HashSet<InstanceId>();
failed.add( new InstanceId( 2 ) );
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
members.put( new InstanceId( 1 ), URI.create( "server1" ) );
members.put( new InstanceId( 2 ), URI.create( "server2" ) );
ClusterConfiguration clusterConfiguration = mock( ClusterConfiguration.class );
when( clusterConfiguration.getMembers() ).thenReturn( members );
ClusterContext clusterContext = mock( ClusterContext.class );
when( clusterContext.getConfiguration() ).thenReturn( clusterConfiguration );
MultiPaxosContext context = new MultiPaxosContext( new InstanceId(1), Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), clusterConfiguration,
Mockito.mock(Executor.class), Mockito.mock(Logging.class),
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
context.getHeartbeatContext().getFailed().addAll( failed );
ElectionContext toTest = context.getElectionContext();
assertFalse( toTest.electionOk() );
}
@Test
public void testElectionNotOkQuorumFailedFourInstances()
{
Set<InstanceId> failed = new HashSet<InstanceId>();
failed.add( new InstanceId( 2 ) );
failed.add( new InstanceId( 3 ) );
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
members.put( new InstanceId( 1 ), URI.create( "server1" ) );
members.put( new InstanceId( 2 ), URI.create( "server2" ) );
members.put( new InstanceId( 3 ), URI.create( "server3" ) );
members.put( new InstanceId( 4 ), URI.create( "server4" ) );
ClusterConfiguration clusterConfiguration = mock( ClusterConfiguration.class );
when( clusterConfiguration.getMembers() ).thenReturn( members );
ClusterContext clusterContext = mock( ClusterContext.class );
when( clusterContext.getConfiguration() ).thenReturn( clusterConfiguration );
MultiPaxosContext context = new MultiPaxosContext( new InstanceId(1), Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), clusterConfiguration,
Mockito.mock(Executor.class), Mockito.mock(Logging.class),
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
context.getHeartbeatContext().getFailed().addAll( failed );
ElectionContext toTest = context.getElectionContext();
assertFalse( toTest.electionOk() );
}
@Test
public void testElectionNotOkQuorumFailedFiveInstances()
{
Set<InstanceId> failed = new HashSet<InstanceId>();
failed.add( new InstanceId( 2 ) );
failed.add( new InstanceId( 3 ) );
failed.add( new InstanceId( 4 ) );
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
members.put( new InstanceId( 1 ), URI.create( "server1" ) );
members.put( new InstanceId( 2 ), URI.create( "server2" ) );
members.put( new InstanceId( 3 ), URI.create( "server3" ) );
members.put( new InstanceId( 4 ), URI.create( "server4" ) );
members.put( new InstanceId( 5 ), URI.create( "server5" ) );
ClusterConfiguration clusterConfiguration = mock( ClusterConfiguration.class );
when( clusterConfiguration.getMembers() ).thenReturn( members );
ClusterContext clusterContext = mock( ClusterContext.class );
when( clusterContext.getConfiguration() ).thenReturn( clusterConfiguration );
MultiPaxosContext context = new MultiPaxosContext( new InstanceId(1), Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), clusterConfiguration,
Mockito.mock(Executor.class), Mockito.mock(Logging.class),
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
context.getHeartbeatContext().getFailed().addAll( failed );
ElectionContext toTest = context.getElectionContext();
assertFalse( toTest.electionOk() );
}
@Test
public void twoVotesFromSameInstanceForSameRoleShouldBeConsolidated() throws Exception
{
// Given
final String coordinatorRole = "coordinator";
HeartbeatContext heartbeatContext = mock(HeartbeatContext.class);
when( heartbeatContext.getFailed() ).thenReturn( Collections.<InstanceId>emptySet() );
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
members.put( new InstanceId( 1 ), URI.create( "server1" ) );
members.put( new InstanceId( 2 ), URI.create( "server2" ) );
members.put( new InstanceId( 3 ), URI.create( "server3" ) );
ClusterConfiguration clusterConfiguration = mock( ClusterConfiguration.class );
when( clusterConfiguration.getMembers() ).thenReturn( members );
ClusterContext clusterContext = mock( ClusterContext.class );
when( clusterContext.getConfiguration() ).thenReturn( clusterConfiguration );
Logging logging = Mockito.mock( Logging.class );
when ( logging.getMessagesLog( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
MultiPaxosContext context = new MultiPaxosContext( new InstanceId(1), Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( coordinatorRole ) ), clusterConfiguration,
Mockito.mock(Executor.class), logging,
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
ElectionContext toTest = context.getElectionContext();
// When
toTest.startElectionProcess( coordinatorRole );
toTest.voted( coordinatorRole, new InstanceId( 1 ), new IntegerElectionCredentials( 100 ) );
toTest.voted( coordinatorRole, new InstanceId( 2 ), new IntegerElectionCredentials( 100 ) );
toTest.voted( coordinatorRole, new InstanceId( 2 ), new IntegerElectionCredentials( 101 ) );
// Then
assertNull( toTest.getElectionWinner( coordinatorRole ) );
assertEquals( 2, toTest.getVoteCount( coordinatorRole ) );
}
private void baseTestForElectionOk( Set<InstanceId> failed, boolean moreThanQuorum )
{
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
members.put( new InstanceId( 1 ), URI.create( "server1" ) );
members.put( new InstanceId( 2 ), URI.create( "server2" ) );
members.put( new InstanceId( 3 ), URI.create( "server3" ) );
ClusterConfiguration clusterConfiguration = mock( ClusterConfiguration.class );
when( clusterConfiguration.getMembers() ).thenReturn( members );
ClusterContext clusterContext = mock( ClusterContext.class );
when( clusterContext.getConfiguration() ).thenReturn( clusterConfiguration );
MultiPaxosContext context = new MultiPaxosContext( new InstanceId(1), Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), clusterConfiguration,
Mockito.mock(Executor.class), Mockito.mock(Logging.class),
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
context.getHeartbeatContext().getFailed().addAll( failed );
ElectionContext toTest = context.getElectionContext();
assertEquals( moreThanQuorum, !toTest.electionOk() );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_election_ElectionContextTest.java
|
3,833
|
public class InstanceIdTest
extends ClusterMockTest
{
@Test
public void nodeTriesToJoinAnotherNodeWithSameServerId() throws InterruptedException, ExecutionException,
TimeoutException, URISyntaxException
{
testCluster( new int[] { 1, 1 }, new VerifyInstanceConfiguration[]
{
new VerifyInstanceConfiguration( Collections.<URI>emptyList(), Collections.<String, InstanceId>emptyMap(),
Collections.<InstanceId>emptySet() ),
new VerifyInstanceConfiguration( Collections.<URI>emptyList(), Collections.<String, InstanceId>emptyMap(),
Collections.<InstanceId>emptySet() )
},
DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 600 ).
join( 100, 1, 1, 2 ).
join( 100, 2, 1, 2 ).
message( 500, "*** All nodes tried to start, should be in failed mode" )
);
}
@Test
public void nodeTriesToJoinRunningClusterWithExistingServerId() throws InterruptedException, ExecutionException,
TimeoutException, URISyntaxException
{
List<URI> correctMembers = new ArrayList<URI>();
correctMembers.add( URI.create( "server1" ) );
correctMembers.add( URI.create( "server2" ) );
correctMembers.add( URI.create( "server3" ) );
Map<String, InstanceId> roles = new HashMap<String, InstanceId>();
roles.put( "coordinator", new InstanceId( 1 ) );
testCluster( new int[] {1, 2, 3, 3},
new VerifyInstanceConfiguration[]{
new VerifyInstanceConfiguration( correctMembers, roles, Collections.<InstanceId>emptySet() ),
new VerifyInstanceConfiguration( correctMembers, roles, Collections.<InstanceId>emptySet() ),
new VerifyInstanceConfiguration( correctMembers, roles, Collections.<InstanceId>emptySet() ),
new VerifyInstanceConfiguration( Collections.<URI>emptyList(), Collections.<String, InstanceId>emptyMap(),
Collections.<InstanceId>emptySet() )}, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 600 ).
join( 100, 1, 1 ).
join( 100, 2, 1 ).
join( 100, 3, 1 ).
join( 5000, 4, 1 ).
message( 0, "*** Conflicting node tried to join" )
);
}
@Test
public void substituteFailedNode() throws InterruptedException, ExecutionException, TimeoutException,
URISyntaxException
{
ClusterAssertion assertion = ClusterAssertion.basedOn( new int[]{1, 2, 3, 3} );
assertion = assertion.joins( 1, 2, 3);
assertion = assertion.elected( 1, "coordinator" );
assertion = assertion.failed( 3 );
assertion = assertion.joins( 4 );
List<URI> correctMembers = new ArrayList<URI>();
correctMembers.add( URI.create( "server1" ) );
correctMembers.add( URI.create( "server2" ) );
correctMembers.add( URI.create( "server4" ) );
List<URI> wrongMembers = new ArrayList<URI>();
wrongMembers.add( URI.create( "server1" ) );
wrongMembers.add( URI.create( "server2" ) );
wrongMembers.add( URI.create( "server3" ) );
Map<String, InstanceId> roles = new HashMap<String, InstanceId>();
roles.put( "coordinator", new InstanceId( 1 ) );
Set<InstanceId> failed = new HashSet<InstanceId>();
testCluster( new int[]{ 1, 2, 3, 3 },
new VerifyInstanceConfiguration[]{
new VerifyInstanceConfiguration( correctMembers, roles, failed ),
new VerifyInstanceConfiguration( correctMembers, roles, failed ),
new VerifyInstanceConfiguration( wrongMembers, roles, Collections.<InstanceId>emptySet() ),
new VerifyInstanceConfiguration( correctMembers, roles, failed )},
DEFAULT_NETWORK(),
new ClusterTestScriptDSL().
rounds( 8000 ).
join( 100, 1, 1 ).
join( 100, 2, 1 ).
join( 100, 3, 1 ).
// assertThat(electionHappened(1, "coordinator")).
down( 3000, 3 ).
join( 1000, 4, 1, 2, 3 )
);
}
@Test
public void substituteFailedNodeAndFailedComesOnlineAgain() throws InterruptedException, ExecutionException, TimeoutException,
URISyntaxException
{
List<URI> correctMembers = new ArrayList<URI>();
correctMembers.add( URI.create( "server1" ) );
correctMembers.add( URI.create( "server2" ) );
correctMembers.add( URI.create( "server4" ) );
List<URI> badMembers = new ArrayList<URI>();
badMembers.add( URI.create( "server1" ) );
badMembers.add( URI.create( "server2" ) );
badMembers.add( URI.create( "server3" ) );
Map<String, InstanceId> roles = new HashMap<String, InstanceId>();
roles.put( "coordinator", new InstanceId( 1 ) );
Set<InstanceId> failed = new HashSet<InstanceId>();
testCluster( new int[]{1, 2, 3, 3},
new VerifyInstanceConfiguration[]{
new VerifyInstanceConfiguration( correctMembers, roles, failed ),
new VerifyInstanceConfiguration( correctMembers, roles, failed ),
new VerifyInstanceConfiguration( badMembers, roles, failed ),
new VerifyInstanceConfiguration( correctMembers, roles, failed )},
DEFAULT_NETWORK(),
new ClusterTestScriptDSL().
rounds( 800 ).
join( 100, 1, 1 ).
join( 100, 2, 1 ).
join( 100, 3, 1 ).
down( 3000, 3 ).
join( 1000, 4, 1, 2, 3 ).
up( 1000, 3 )
);
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_InstanceIdTest.java
|
3,834
|
private static class ConfigurationResponseStateMatcher extends ArgumentMatcher<ConfigurationResponseState>
{
private Map<InstanceId, URI> members;
public ConfigurationResponseStateMatcher withMembers( Map<InstanceId, URI> members )
{
this.members = members;
return this;
}
@Override
public boolean matches( Object argument )
{
ConfigurationResponseState arg = (ConfigurationResponseState) argument;
return arg.getMembers().equals( this.members );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterStateTest.java
|
3,835
|
public class ClusterStateTest
{
@Test
public void joinDeniedResponseShouldContainRespondersConfiguration() throws Throwable
{
// GIVEN
ClusterContext context = mock( ClusterContext.class );
Map<InstanceId, URI> existingMembers = members( 1, 2 );
when( context.isCurrentlyAlive( any( InstanceId.class ) ) ).thenReturn( true );
when( context.getMembers() ).thenReturn( existingMembers );
when( context.getConfiguration() ).thenReturn( clusterConfiguration( existingMembers ) );
when( context.getLogger( any( Class.class ) ) ).thenReturn( StringLogger.DEV_NULL );
TrackingMessageHolder outgoing = new TrackingMessageHolder();
Message<ClusterMessage> message = to( configurationRequest, uri( 1 ), configuration( 2 ) )
.setHeader( Message.FROM, uri( 2 ).toString() );
// WHEN an instance responds to a join request, responding that the joining instance cannot join
ClusterState.entered.handle( context, message, outgoing );
// THEN assert that the responding instance sends its configuration along with the response
Message<ClusterMessage> response = outgoing.single();
assertTrue( response.getPayload() instanceof ConfigurationResponseState );
ConfigurationResponseState responseState = response.getPayload();
assertEquals( existingMembers, responseState.getMembers() );
}
@Test
public void joinDeniedHandlingShouldKeepResponseConfiguration() throws Throwable
{
// GIVEN
ClusterContext context = mock( ClusterContext.class );
when( context.getLogger( any( Class.class ) ) ).thenReturn( StringLogger.DEV_NULL );
TrackingMessageHolder outgoing = new TrackingMessageHolder();
Map<InstanceId, URI> members = members( 1, 2 );
// WHEN a joining instance receives a denial to join
ClusterState.discovery.handle( context, to( joinDenied, uri( 2 ),
configurationResponseState( members ) ), outgoing );
// THEN assert that the response contains the configuration
verify( context ).joinDenied( argThat(
new ConfigurationResponseStateMatcher().withMembers( members ) ) );
}
@Test
public void joinDeniedTimeoutShouldBeHandledWithExceptionIncludingConfiguration() throws Throwable
{
// GIVEN
ClusterContext context = mock( ClusterContext.class );
Map<InstanceId, URI> existingMembers = members( 1, 2 );
when( context.getLogger( any( Class.class ) ) ).thenReturn( StringLogger.DEV_NULL );
when( context.getJoiningInstances() ).thenReturn( Collections.<URI>emptyList() );
when( context.hasJoinBeenDenied() ).thenReturn( true );
when( context.getJoinDeniedConfigurationResponseState() )
.thenReturn( configurationResponseState( existingMembers ) );
TrackingMessageHolder outgoing = new TrackingMessageHolder();
// WHEN the join denial actually takes effect (signaled by a join timeout locally)
ClusterState.joining.handle( context, to( ClusterMessage.joiningTimeout, uri( 2 ) )
.setHeader( Message.CONVERSATION_ID, "bla" ), outgoing );
// THEN assert that the failure contains the received configuration
Message<? extends MessageType> response = outgoing.single();
ClusterEntryDeniedException deniedException = response.getPayload();
assertEquals( existingMembers, deniedException.getConfigurationResponseState().getMembers() );
}
@Test
public void shouldNotDenyJoinToInstanceThatRejoinsBeforeTimingOut() throws Throwable
{
// GIVEN
ClusterContext context = mock( ClusterContext.class );
Map<InstanceId, URI> existingMembers = members( 1, 2 );
when( context.isCurrentlyAlive( id( 2 ) ) ).thenReturn( true );
when( context.getMembers() ).thenReturn( existingMembers );
when( context.getConfiguration() ).thenReturn( clusterConfiguration( existingMembers ) );
when( context.getLogger( any( Class.class ) ) ).thenReturn( StringLogger.DEV_NULL );
when( context.getUriForId( id( 2 ) ) ).thenReturn( uri( 2 ) );
TrackingMessageHolder outgoing = new TrackingMessageHolder();
Message<ClusterMessage> message = to( configurationRequest, uri( 1 ), configuration( 2 ) )
.setHeader( Message.FROM, uri( 2 ).toString() );
// WHEN the join denial actually takes effect (signaled by a join timeout locally)
ClusterState.entered.handle( context, message, outgoing );
// THEN assert that the failure contains the received configuration
Message<? extends MessageType> response = outgoing.single();
assertEquals( ClusterMessage.configurationResponse, response.getMessageType() );
}
private ConfigurationResponseState configurationResponseState( Map<InstanceId, URI> existingMembers )
{
return new ConfigurationResponseState( Collections.<String,InstanceId>emptyMap(),
existingMembers, null, "ClusterStateTest" );
}
private ClusterConfiguration clusterConfiguration( Map<InstanceId, URI> members )
{
ClusterConfiguration config = new ClusterConfiguration( "ClusterStateTest", StringLogger.DEV_NULL );
config.setMembers( members );
return config;
}
private Map<InstanceId,URI> members( int... memberIds )
{
Map<InstanceId,URI> members = new HashMap<>();
for ( int memberId : memberIds )
{
members.put( new InstanceId( memberId ), uri( memberId ) );
}
return members;
}
private ConfigurationRequestState configuration( int joiningInstance )
{
return new ConfigurationRequestState( new InstanceId( joiningInstance ), uri( joiningInstance ) );
}
private URI uri( int i )
{
return URI.create( "http://localhost:" + (6000+i) + "?serverId=" + i );
}
private InstanceId id( int i )
{
return new InstanceId( i );
}
private static class ConfigurationResponseStateMatcher extends ArgumentMatcher<ConfigurationResponseState>
{
private Map<InstanceId, URI> members;
public ConfigurationResponseStateMatcher withMembers( Map<InstanceId, URI> members )
{
this.members = members;
return this;
}
@Override
public boolean matches( Object argument )
{
ConfigurationResponseState arg = (ConfigurationResponseState) argument;
return arg.getMembers().equals( this.members );
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterStateTest.java
|
3,836
|
leaving
{
@Override
public State<?, ?> handle( ClusterContext context,
Message<ClusterMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case configurationChanged:
{
ClusterMessage.ConfigurationChangeState state = message.getPayload();
if ( state.isLeaving( context.getMyId() ) )
{
context.cancelTimeout( "leave" );
context.left();
return start;
}
else
{
state.apply( context );
return leaving;
}
}
case leaveTimedout:
{
context.getLogger( ClusterState.class ).warn( "Failed to leave. Cluster may consider this" +
" instance still a member" );
context.left();
return start;
}
}
return this;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterState.java
|
3,837
|
entered
{
@Override
public State<?, ?> handle( ClusterContext context, Message<ClusterMessage> message,
MessageHolder outgoing ) throws Throwable
{
switch ( message.getMessageType() )
{
case addClusterListener:
{
context.addClusterListener( message.<ClusterListener>getPayload() );
break;
}
case removeClusterListener:
{
context.removeClusterListener( message.<ClusterListener>getPayload() );
break;
}
case configurationRequest:
{
ClusterMessage.ConfigurationRequestState request = message.getPayload();
request = new ClusterMessage.ConfigurationRequestState( request.getJoiningId(), URI.create(message.getHeader( Message.FROM ) ));
InstanceId joiningId = request.getJoiningId();
URI joiningUri = request.getJoiningUri();
boolean isInCluster = context.getMembers().containsKey( joiningId );
boolean isCurrentlyAlive = context.isCurrentlyAlive(joiningId);
boolean messageComesFromSameHost = request.getJoiningId().equals( context.getMyId() );
boolean otherInstanceJoiningWithSameId = context.isInstanceJoiningFromDifferentUri(
joiningId, joiningUri );
boolean isFromSameURIAsTheOneWeAlreadyKnow = context.getUriForId( joiningId ) != null &&
context.getUriForId( joiningId ).equals( joiningUri );
boolean somethingIsWrong =
( isInCluster && !messageComesFromSameHost && isCurrentlyAlive && !isFromSameURIAsTheOneWeAlreadyKnow )
|| otherInstanceJoiningWithSameId ;
if ( somethingIsWrong )
{
if(otherInstanceJoiningWithSameId)
{
context.getLogger( ClusterState.class ).info( "Denying entry to instance " + joiningId + " because another instance is currently joining with the same id.");
}
else
{
context.getLogger( ClusterState.class ).info( "Denying entry to instance " + joiningId + " because that instance is already in the cluster.");
}
outgoing.offer( message.copyHeadersTo( respond( ClusterMessage.joinDenied, message,
new ClusterMessage.ConfigurationResponseState( context.getConfiguration()
.getRoles(), context.getConfiguration().getMembers(),
new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( context.getLastDeliveredInstanceId() ),
context.getConfiguration().getName() ) ) ) );
}
else
{
context.instanceIsJoining(joiningId, joiningUri );
outgoing.offer( message.copyHeadersTo( respond( ClusterMessage.configurationResponse, message,
new ClusterMessage.ConfigurationResponseState( context.getConfiguration()
.getRoles(), context.getConfiguration().getMembers(),
new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( context.getLastDeliveredInstanceId() ),
context.getConfiguration().getName() ) ) ) );
}
break;
}
case configurationChanged:
{
ClusterMessage.ConfigurationChangeState state = message.getPayload();
state.apply( context );
break;
}
case leave:
{
List<URI> nodeList = new ArrayList<URI>( context.getConfiguration().getMemberURIs() );
if ( nodeList.size() == 1 )
{
context.getLogger( ClusterState.class ).info( "Shutting down cluster: " + context
.getConfiguration().getName() );
context.left();
return start;
}
else
{
context.getLogger( ClusterState.class ).info( "Leaving:" + nodeList );
ClusterMessage.ConfigurationChangeState newState = new ClusterMessage
.ConfigurationChangeState();
newState.leave( context.getMyId() );
outgoing.offer( internal( AtomicBroadcastMessage.broadcast, newState ) );
context.setTimeout( "leave", timeout( ClusterMessage.leaveTimedout,
message ) );
return leaving;
}
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterState.java
|
3,838
|
joining
{
@Override
public State<?, ?> handle( ClusterContext context,
Message<ClusterMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case configurationChanged:
{
ClusterMessage.ConfigurationChangeState state = message.getPayload();
if ( context.getMyId().equals( state.getJoin() ) )
{
context.cancelTimeout( "join" );
context.joined();
outgoing.offer( message.copyHeadersTo( internal( ClusterMessage.joinResponse, context.getConfiguration() ) ) );
return entered;
}
else
{
state.apply( context );
return this;
}
}
case joiningTimeout:
{
context.getLogger( ClusterState.class ).info( "Join timeout for " + message.getHeader(
Message.CONVERSATION_ID ) );
if ( context.hasJoinBeenDenied() )
{
outgoing.offer( internal( ClusterMessage.joinFailure,
new ClusterEntryDeniedException( context.getMyId(),
context.getJoinDeniedConfigurationResponseState() ) ) );
return start;
}
// Go back to requesting configurations from potential members
for ( URI potentialClusterInstanceUri : context.getJoiningInstances() )
{
outgoing.offer( to( ClusterMessage.configurationRequest,
potentialClusterInstanceUri,
new ClusterMessage.ConfigurationRequestState( context.getMyId(), context.boundAt() ) ) );
}
context.setTimeout( "discovery",
timeout( ClusterMessage.configurationTimeout, message,
new ClusterMessage.ConfigurationTimeoutState( 4 ) ) );
return discovery;
}
case joinFailure:
{
// This causes an exception from the join() method
return start;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterState.java
|
3,839
|
discovery
{
@Override
public State<?, ?> handle( ClusterContext context, Message<ClusterMessage> message,
MessageHolder outgoing ) throws Throwable
{
List<ClusterMessage.ConfigurationRequestState> discoveredInstances = context.getDiscoveredInstances();
switch ( message.getMessageType() )
{
case configurationResponse:
{
context.cancelTimeout( "discovery" );
ClusterMessage.ConfigurationResponseState state = message.getPayload();
context.getLogger( ClusterState.class ).info( "Joining cluster " + state.getClusterName() );
if ( !context.getConfiguration().getName().equals( state.getClusterName() ) )
{
context.getLogger( ClusterState.class ).warn( "Joined cluster name is different than " +
"the one configured. Expected " + context.getConfiguration().getName() +
", got " + state.getClusterName() + "." );
}
HashMap<InstanceId, URI> memberList = new HashMap<InstanceId, URI>( state.getMembers() );
context.discoveredLastReceivedInstanceId( state.getLatestReceivedInstanceId().getId() );
context.acquiredConfiguration( memberList, state.getRoles() );
if ( !memberList.containsKey( context.getMyId() ) ||
!memberList.get( context.getMyId() ).equals( context.boundAt() ) )
{
context.getLogger( ClusterState.class ).info( String.format( "%s joining:%s, " +
"last delivered:%d", context.getMyId().toString(),
context.getConfiguration().toString(),
state.getLatestReceivedInstanceId().getId() ) );
ClusterMessage.ConfigurationChangeState newState = new ClusterMessage
.ConfigurationChangeState();
newState.join(context.getMyId(), context.boundAt());
// Let the coordinator propose this if possible
InstanceId coordinator = state.getRoles().get( ClusterConfiguration.COORDINATOR );
if ( coordinator != null )
{
URI coordinatorUri = context.getConfiguration().getUriForId( coordinator );
outgoing.offer( to( ProposerMessage.propose, coordinatorUri, newState ) );
}
else
{
outgoing.offer( to( ProposerMessage.propose, new URI( message.getHeader(
Message.FROM ) ), newState ) );
}
context.getLogger( ClusterState.class ).debug( "Setup join timeout for " + message
.getHeader( Message.CONVERSATION_ID ) );
context.setTimeout( "join", timeout( ClusterMessage.joiningTimeout, message,
new URI( message.getHeader( Message.FROM ) ) ) );
return joining;
}
else
{
// Already in (probably due to crash of this server previously), go to entered state
context.joined();
outgoing.offer( internal( ClusterMessage.joinResponse, context.getConfiguration() ) );
return entered;
}
}
case configurationTimeout:
{
if ( context.hasJoinBeenDenied() )
{
outgoing.offer( internal( ClusterMessage.joinFailure,
new ClusterEntryDeniedException( context.getMyId(),
context.getJoinDeniedConfigurationResponseState() ) ) );
return start;
}
ClusterMessage.ConfigurationTimeoutState state = message.getPayload();
if ( state.getRemainingPings() > 0 )
{
// Send out requests again
for ( URI potentialClusterInstanceUri : context.getJoiningInstances() )
{
outgoing.offer( to( ClusterMessage.configurationRequest,
potentialClusterInstanceUri,
new ClusterMessage.ConfigurationRequestState(
context.getMyId(), context.boundAt() ) ) );
}
context.setTimeout( "join",
timeout( ClusterMessage.configurationTimeout, message,
new ClusterMessage.ConfigurationTimeoutState(
state.getRemainingPings() - 1 ) ) );
}
else
{
/*
* No configuration responses. Check if we picked up any other instances' requests during this phase.
* If we did, or we are the only instance in the configuration we can go ahead and try to start the
* cluster.
*/
if ( !discoveredInstances.isEmpty() || count( context.getJoiningInstances() ) == 1 )
{
Collections.sort( discoveredInstances );
/*
* The assumption here is that the lowest in the list of discovered instances
* will create the cluster. Keep in mind that this is run on all instances so
* everyone will pick the same one.
* If the one picked up is configured to not init a cluster then the timeout
* set in else{} will take care of that.
* We also start the cluster if we are the only configured instance. joiningInstances
* does not contain us, ever.
*/
ClusterMessage.ConfigurationRequestState ourRequestState =
new ClusterMessage.ConfigurationRequestState(context.getMyId(), context.boundAt());
// No one to join with
boolean imAlone =
count(context.getJoiningInstances()) == 1
&& discoveredInstances.contains(ourRequestState)
&& discoveredInstances.size() == 1;
// Enough instances discovered (half or more - i don't count myself here)
boolean haveDiscoveredMajority =
discoveredInstances.size() > count(context.getJoiningInstances()) / 2;
// I am supposed to create the cluster (i am before the first in the list of the discovered instances)
boolean wantToStartCluster =
!discoveredInstances.isEmpty()
&& discoveredInstances.get( 0 ).getJoiningId().compareTo(context.getMyId() ) >= 0;
if ( imAlone || haveDiscoveredMajority && wantToStartCluster )
{
discoveredInstances.clear();
// I'm supposed to create the cluster - fail the join
outgoing.offer( internal( ClusterMessage.joinFailure,
new TimeoutException(
"Join failed, timeout waiting for configuration" ) ) );
return start;
}
else
{
discoveredInstances.clear();
// Someone else is supposed to create the cluster - restart the join discovery
for ( URI potentialClusterInstanceUri : context.getJoiningInstances() )
{
outgoing.offer( to( ClusterMessage.configurationRequest,
potentialClusterInstanceUri,
new ClusterMessage.ConfigurationRequestState( context.getMyId(),
context.boundAt() ) ) );
}
context.setTimeout( "discovery",
timeout( ClusterMessage.configurationTimeout, message,
new ClusterMessage.ConfigurationTimeoutState( 4 ) ) );
}
}
else
{
context.setTimeout( "join",
timeout( ClusterMessage.configurationTimeout, message,
new ClusterMessage.ConfigurationTimeoutState( 4 ) ) );
}
}
return this;
}
case configurationRequest:
{
// We're listening for existing clusters, but if all instances start up at the same time
// and look for each other, this allows us to pick that up
ClusterMessage.ConfigurationRequestState configurationRequested = message.getPayload();
configurationRequested = new ClusterMessage.ConfigurationRequestState( configurationRequested.getJoiningId(), URI.create(message.getHeader( Message.FROM ) ));
if ( !discoveredInstances.contains( configurationRequested ))
{
for ( ClusterMessage.ConfigurationRequestState discoveredInstance :
discoveredInstances )
{
if ( discoveredInstance.getJoiningId().equals( configurationRequested.getJoiningId() ) )
{
// we are done
StringBuffer errorMessage = new StringBuffer( "Failed to join cluster because I saw two instances with the same ServerId" );
errorMessage.append( "One is " ).append( discoveredInstance.getJoiningId() );
errorMessage.append( " The other is " ).append( configurationRequested );
outgoing.offer( internal( ClusterMessage.joinFailure,
new IllegalStateException( errorMessage.toString() ) ) );
return start;
}
}
discoveredInstances.add( configurationRequested );
}
break;
}
case joinDenied:
{
// outgoing.offer( internal( ClusterMessage.joinFailure,
// new ClusterEntryDeniedException( context.me, context.configuration ) ) );
// return start;
context.joinDenied( (ClusterMessage.ConfigurationResponseState) message.getPayload() );
return this;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterState.java
|
3,840
|
{
@Override
public void run()
{
HashMap<String, InstanceId> roles = new HashMap<String, InstanceId>();
ClusterMockTest.this.getRoles( roles );
InstanceId oldCoordinator = comparedTo.get( ClusterConfiguration.COORDINATOR );
InstanceId newCoordinator = roles.get( ClusterConfiguration.COORDINATOR );
assertNotNull( "Should have had a coordinator before bringing it down", oldCoordinator );
assertNotNull( "Should have a new coordinator after the previous failed", newCoordinator );
assertTrue( "Should have elected a new coordinator", !oldCoordinator.equals( newCoordinator ) );
}
}, 0 );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,841
|
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug(
"**** Node " + joinServer + " could not join cluster:" + e
.getMessage() );
if ( !(e.getCause() instanceof IllegalStateException) )
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
};
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,842
|
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,843
|
{
@Override
public void run()
{
Cluster joinCluster = servers.get( joinServer - 1 ).newClient( Cluster.class );
for ( final Cluster cluster : out )
{
if ( cluster.equals( joinCluster ) )
{
out.remove( cluster );
logger.getLogger().debug( "Join:" + cluster.toString() );
if ( joinServers.length == 0 )
{
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
// Use test info to figure out who to join
URI[] toJoin = new URI[servers.size()];
for ( int i = 0; i < servers.size(); i++ )
{
toJoin[i] = servers.get( i ).getServer().boundAt();
}
final Future<ClusterConfiguration> result = cluster.join( "default", toJoin );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
network.addFutureWaiter( result, joiner );
}
}
else
{
// List of servers to join was explicitly specified, so use that
URI[] instanceUris = new URI[joinServers.length];
for ( int i = 0; i < joinServers.length; i++ )
{
int server = joinServers[i];
instanceUris[i] = URI.create( "server" + server );
}
final Future<ClusterConfiguration> result = cluster.join( "default", instanceUris );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug(
"**** Node " + joinServer + " could not join cluster:" + e
.getMessage() );
if ( !(e.getCause() instanceof IllegalStateException) )
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
};
network.addFutureWaiter( result, joiner );
}
break;
}
}
}
}, time );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,844
|
{
@Override
public void notify( HeartbeatListener listener )
{
listener.failed( node );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_HeartbeatContextImpl.java
|
3,845
|
{
@Override
public void notify( HeartbeatListener listener )
{
listener.failed( node );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_HeartbeatContextImpl.java
|
3,846
|
{
@Override
public void notify( HeartbeatListener listener )
{
listener.alive( node );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_HeartbeatContextImpl.java
|
3,847
|
class HeartbeatContextImpl
extends AbstractContextImpl
implements HeartbeatContext
{
// HeartbeatContext
private Set<InstanceId> failed = new HashSet<InstanceId>();
private Map<InstanceId, Set<InstanceId>> nodeSuspicions = new HashMap<InstanceId, Set<InstanceId>>();
private Iterable<HeartbeatListener> heartBeatListeners = Listeners.newListeners();
private final Executor executor;
private ClusterContext clusterContext;
private LearnerContext learnerContext;
HeartbeatContextImpl( InstanceId me, CommonContextState commonState, Logging logging,
Timeouts timeouts, Executor executor )
{
super( me, commonState, logging, timeouts );
this.executor = executor;
}
private HeartbeatContextImpl( InstanceId me, CommonContextState commonState, Logging logging, Timeouts timeouts,
Set<InstanceId> failed, Map<InstanceId, Set<InstanceId>> nodeSuspicions,
Iterable<HeartbeatListener> heartBeatListeners, Executor executor)
{
super( me, commonState, logging, timeouts );
this.failed = failed;
this.nodeSuspicions = nodeSuspicions;
this.heartBeatListeners = heartBeatListeners;
this.executor = executor;
}
public void setCircularDependencies( ClusterContext clusterContext, LearnerContext learnerContext )
{
this.clusterContext = clusterContext;
this.learnerContext = learnerContext;
}
@Override
public void started()
{
failed.clear();
}
/**
* @return True iff the node was suspected
*/
@Override
public boolean alive( final InstanceId node )
{
Set<InstanceId> serverSuspicions = suspicionsFor( getMyId() );
boolean suspected = serverSuspicions.remove( node );
if ( !isFailed( node ) && failed.remove( node ) )
{
getLogger( HeartbeatContext.class ).info( "Notifying listeners that instance " + node + " is alive" );
Listeners.notifyListeners( heartBeatListeners, new Listeners.Notification<HeartbeatListener>()
{
@Override
public void notify( HeartbeatListener listener )
{
listener.alive( node );
}
} );
}
return suspected;
}
@Override
public void suspect( final InstanceId node )
{
Set<InstanceId> serverSuspicions = suspicionsFor( getMyId() );
if ( !serverSuspicions.contains( node ) )
{
serverSuspicions.add( node );
getLogger( HeartbeatContext.class ).info( getMyId() + "(me) is now suspecting " + node );
}
if ( isFailed( node ) && !failed.contains( node ) )
{
getLogger( HeartbeatContext.class ).info( "Notifying listeners that instance " + node + " is failed" );
failed.add( node );
Listeners.notifyListeners( heartBeatListeners, executor, new Listeners.Notification<HeartbeatListener>()
{
@Override
public void notify( HeartbeatListener listener )
{
listener.failed( node );
}
} );
}
}
@Override
public void suspicions( InstanceId from, Set<InstanceId> suspicions )
{
Set<InstanceId> serverSuspicions = suspicionsFor( from );
// Check removals
Iterator<InstanceId> suspicionsIterator = serverSuspicions.iterator();
while ( suspicionsIterator.hasNext() )
{
InstanceId currentSuspicion = suspicionsIterator.next();
if ( !suspicions.contains( currentSuspicion ) )
{
getLogger( HeartbeatContext.class ).info( from + " is no longer suspecting " + currentSuspicion );
suspicionsIterator.remove();
}
}
// Check additions
for ( InstanceId suspicion : suspicions )
{
if ( !serverSuspicions.contains( suspicion ) )
{
getLogger( HeartbeatContext.class ).info( from + " is now suspecting " + suspicion );
serverSuspicions.add( suspicion );
}
}
// Check if anyone is considered failed
for ( final InstanceId node : suspicions )
{
if ( isFailed( node ) && !failed.contains( node ) )
{
failed.add( node );
Listeners.notifyListeners( heartBeatListeners, executor, new Listeners.Notification<HeartbeatListener>()
{
@Override
public void notify( HeartbeatListener listener )
{
listener.failed( node );
}
} );
}
}
}
@Override
public Set<InstanceId> getFailed()
{
return failed;
}
@Override
public Iterable<InstanceId> getAlive()
{
return Iterables.filter( new Predicate<InstanceId>()
{
@Override
public boolean accept( InstanceId item )
{
return !isFailed( item );
}
}, commonState.configuration().getMemberIds() );
}
@Override
public void addHeartbeatListener( HeartbeatListener listener )
{
heartBeatListeners = Listeners.addListener( listener, heartBeatListeners );
}
@Override
public void removeHeartbeatListener( HeartbeatListener listener )
{
heartBeatListeners = Listeners.removeListener( listener, heartBeatListeners );
}
@Override
public void serverLeftCluster( InstanceId node )
{
failed.remove( node );
for ( Set<InstanceId> uris : nodeSuspicions.values() )
{
uris.remove( node );
}
}
@Override
public boolean isFailed( InstanceId node )
{
List<InstanceId> suspicions = getSuspicionsOf( node );
/*
* This looks weird but trust me, there is a reason for it.
* See below in the test, where we subtract the failed size() from the total cluster size? If the instance
* under question is already in the failed set then that's it, as expected. But if it is not in the failed set
* then we must not take it's opinion under consideration (which we implicitly don't for every member of the
* failed set). That's what the adjust represents - the node's opinion on whether it is alive or not. Run a
* 3 cluster simulation in your head with 2 instances failed and one coming back online and you'll see why.
*/
int adjust = failed.contains( node ) ? 0 : 1;
// If more than half suspect this node, fail it
return suspicions.size() >
(commonState.configuration().getMembers().size() - failed.size() - adjust) / 2;
}
@Override
public List<InstanceId> getSuspicionsOf( InstanceId server )
{
List<InstanceId> suspicions = new ArrayList<InstanceId>();
for ( InstanceId member : commonState.configuration().getMemberIds() )
{
Set<InstanceId> memberSuspicions = nodeSuspicions.get( member );
if ( memberSuspicions != null && !failed.contains( member )
&& memberSuspicions.contains( server ) )
{
suspicions.add( member );
}
}
return suspicions;
}
@Override
public Set<InstanceId> getSuspicionsFor( InstanceId uri )
{
Set<org.neo4j.cluster.InstanceId> suspicions = suspicionsFor( uri );
return new HashSet<org.neo4j.cluster.InstanceId>( suspicions );
}
private Set<InstanceId> suspicionsFor( InstanceId uri )
{
Set<InstanceId> serverSuspicions = nodeSuspicions.get( uri );
if ( serverSuspicions == null )
{
serverSuspicions = new HashSet<InstanceId>();
nodeSuspicions.put( uri, serverSuspicions );
}
return serverSuspicions;
}
@Override
public Iterable<InstanceId> getOtherInstances()
{
return clusterContext.getOtherInstances();
}
@Override
public long getLastKnownLearnedInstanceInCluster()
{
return learnerContext.getLastKnownLearnedInstanceInCluster();
}
@Override
public long getLastLearnedInstanceId()
{
return learnerContext.getLastLearnedInstanceId();
}
public HeartbeatContextImpl snapshot( CommonContextState commonStateSnapshot, Logging logging, Timeouts timeouts,
Executor executor )
{
return new HeartbeatContextImpl( me, commonStateSnapshot, logging, timeouts, new HashSet<>(failed),
new HashMap<>(nodeSuspicions), new ArrayList<>(toList(heartBeatListeners)), executor );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
HeartbeatContextImpl that = (HeartbeatContextImpl) o;
if ( failed != null ? !failed.equals( that.failed ) : that.failed != null )
{
return false;
}
if ( nodeSuspicions != null ? !nodeSuspicions.equals( that.nodeSuspicions ) : that.nodeSuspicions != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = failed != null ? failed.hashCode() : 0;
result = 31 * result + (nodeSuspicions != null ? nodeSuspicions.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_HeartbeatContextImpl.java
|
3,848
|
private static class Vote
implements Comparable<Vote>
{
private final org.neo4j.cluster.InstanceId suggestedNode;
private final Comparable<Object> voteCredentials;
private Vote( org.neo4j.cluster.InstanceId suggestedNode, Comparable<Object> voteCredentials )
{
this.suggestedNode = suggestedNode;
this.voteCredentials = voteCredentials;
}
public org.neo4j.cluster.InstanceId getSuggestedNode()
{
return suggestedNode;
}
public Comparable<Object> getCredentials()
{
return voteCredentials;
}
@Override
public String toString()
{
return suggestedNode + ":" + voteCredentials;
}
@Override
public int compareTo( Vote o )
{
return this.voteCredentials.compareTo( o.voteCredentials );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
Vote vote = (Vote) o;
if ( !suggestedNode.equals( vote.suggestedNode ) )
{
return false;
}
if ( !voteCredentials.equals( vote.voteCredentials ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = suggestedNode.hashCode();
result = 31 * result + voteCredentials.hashCode();
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,849
|
static class Election
{
private final WinnerStrategy winnerStrategy;
private final Map<org.neo4j.cluster.InstanceId, Vote> votes;
private Election( WinnerStrategy winnerStrategy )
{
this.winnerStrategy = winnerStrategy;
this.votes = new HashMap<org.neo4j.cluster.InstanceId, Vote>();
}
private Election( WinnerStrategy winnerStrategy, HashMap<InstanceId, Vote> votes )
{
this.votes = votes;
this.winnerStrategy = winnerStrategy;
}
public Map<org.neo4j.cluster.InstanceId, Vote> getVotes()
{
return votes;
}
public org.neo4j.cluster.InstanceId pickWinner()
{
return winnerStrategy.pickWinner( votes.values() );
}
public Election snapshot()
{
return new Election( winnerStrategy, new HashMap<>(votes));
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,850
|
private class BiasedWinnerStrategy implements WinnerStrategy
{
private final org.neo4j.cluster.InstanceId biasedNode;
private final boolean positiveSuggestion;
public BiasedWinnerStrategy( org.neo4j.cluster.InstanceId biasedNode, boolean positiveSuggestion )
{
this.biasedNode = biasedNode;
this.positiveSuggestion = positiveSuggestion;
}
@Override
public org.neo4j.cluster.InstanceId pickWinner( Collection<Vote> voteList )
{
// Remove blank votes
List<Vote> filteredVoteList = removeBlankVotes( voteList );
// Sort based on credentials
// The most suited candidate should come out on top
Collections.sort( filteredVoteList );
Collections.reverse( filteredVoteList );
clusterContext.getLogger( getClass() ).debug( "Election started with " + voteList +
", ended up with " + filteredVoteList + " where " + biasedNode + " is biased for " +
(positiveSuggestion ? "promotion" : "demotion") );
for ( Vote vote : filteredVoteList )
{
// Elect the biased instance biased as winner
if ( vote.getSuggestedNode().equals( biasedNode ) == positiveSuggestion )
{
return vote.getSuggestedNode();
}
}
// No possible winner
return null;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,851
|
{
@Override
public boolean accept( Vote item )
{
return !(item.getCredentials() instanceof NotElectableElectionCredentials);
}
}, voteList ) );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,852
|
{
@Override
public String apply( ElectionRole role )
{
return role.getName();
}
}, roles ) );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,853
|
{
@Override
public boolean accept( String role )
{
return clusterContext.getConfiguration().getElected( role ) == null;
}
}, map( new Function<ElectionRole, String>() // Convert ElectionRole to String
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,854
|
{
@Override
public org.neo4j.cluster.InstanceId pickWinner( Collection<Vote> voteList )
{
// Remove blank votes
List<Vote> filteredVoteList = removeBlankVotes( voteList );
// Sort based on credentials
// The most suited candidate should come out on top
Collections.sort( filteredVoteList );
Collections.reverse( filteredVoteList );
clusterContext.getLogger( getClass() ).debug( "Election started with " + voteList +
", ended up with " + filteredVoteList );
// Elect this highest voted instance
for ( Vote vote : filteredVoteList )
{
return vote.getSuggestedNode();
}
// No possible winner
return null;
}
} ) );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,855
|
class ElectionContextImpl
extends AbstractContextImpl
implements ElectionContext
{
private final ClusterContext clusterContext;
private final HeartbeatContext heartbeatContext;
private final List<ElectionRole> roles;
private final Map<String, Election> elections;
private final ElectionCredentialsProvider electionCredentialsProvider;
ElectionContextImpl( org.neo4j.cluster.InstanceId me, CommonContextState commonState,
Logging logging,
Timeouts timeouts, Iterable<ElectionRole> roles, ClusterContext clusterContext,
HeartbeatContext heartbeatContext, ElectionCredentialsProvider electionCredentialsProvider )
{
super( me, commonState, logging, timeouts );
this.electionCredentialsProvider = electionCredentialsProvider;
this.roles = new ArrayList<>(toList(roles));
this.elections = new HashMap<>();
this.clusterContext = clusterContext;
this.heartbeatContext = heartbeatContext;
}
ElectionContextImpl( InstanceId me, CommonContextState commonState, Logging logging, Timeouts timeouts,
ClusterContext clusterContext, HeartbeatContext heartbeatContext, List<ElectionRole> roles,
Map<String, Election> elections, ElectionCredentialsProvider electionCredentialsProvider )
{
super( me, commonState, logging, timeouts );
this.clusterContext = clusterContext;
this.heartbeatContext = heartbeatContext;
this.roles = roles;
this.elections = elections;
this.electionCredentialsProvider = electionCredentialsProvider;
}
@Override
public void created()
{
for ( ElectionRole role : roles )
{
// Elect myself for all roles
clusterContext.elected( role.getName(), clusterContext.getMyId() );
}
}
@Override
public List<ElectionRole> getPossibleRoles()
{
return roles;
}
/*
* Removes all roles from the provided node. This is expected to be the first call when receiving a demote
* message for a node, since it is the way to ensure that election will happen for each role that node had
*/
@Override
public void nodeFailed( org.neo4j.cluster.InstanceId node )
{
Iterable<String> rolesToDemote = getRoles( node );
for ( String role : rolesToDemote )
{
clusterContext.getConfiguration().removeElected( role );
}
}
@Override
public Iterable<String> getRoles( org.neo4j.cluster.InstanceId server )
{
return clusterContext.getConfiguration().getRolesOf( server );
}
@Override
public void unelect( String roleName )
{
clusterContext.getConfiguration().removeElected( roleName );
}
@Override
public boolean isElectionProcessInProgress( String role )
{
return elections.containsKey( role );
}
@Override
public void startDemotionProcess( String role, final org.neo4j.cluster.InstanceId demoteNode )
{
elections.put( role, new Election( new BiasedWinnerStrategy( demoteNode, false /*demotion*/ ) ) );
}
@Override
public void startElectionProcess( String role )
{
clusterContext.getLogger( getClass() ).info( "Doing elections for role " + role );
elections.put( role, new Election( new WinnerStrategy()
{
@Override
public org.neo4j.cluster.InstanceId pickWinner( Collection<Vote> voteList )
{
// Remove blank votes
List<Vote> filteredVoteList = removeBlankVotes( voteList );
// Sort based on credentials
// The most suited candidate should come out on top
Collections.sort( filteredVoteList );
Collections.reverse( filteredVoteList );
clusterContext.getLogger( getClass() ).debug( "Election started with " + voteList +
", ended up with " + filteredVoteList );
// Elect this highest voted instance
for ( Vote vote : filteredVoteList )
{
return vote.getSuggestedNode();
}
// No possible winner
return null;
}
} ) );
}
@Override
public void startPromotionProcess( String role, final org.neo4j.cluster.InstanceId promoteNode )
{
elections.put( role, new Election( new BiasedWinnerStrategy( promoteNode, true /*promotion*/ ) ) );
}
@Override
public void voted( String role, org.neo4j.cluster.InstanceId suggestedNode, Comparable<Object> suggestionCredentials )
{
if ( isElectionProcessInProgress( role ) )
{
Map<org.neo4j.cluster.InstanceId, Vote> votes = elections.get( role ).getVotes();
votes.put( suggestedNode, new Vote( suggestedNode, suggestionCredentials ) );
}
}
@Override
public org.neo4j.cluster.InstanceId getElectionWinner( String role )
{
Election election = elections.get( role );
if ( election == null || election.getVotes().size() != getNeededVoteCount() )
{
return null;
}
elections.remove( role );
return election.pickWinner();
}
@Override
public Comparable<Object> getCredentialsForRole( String role )
{
return electionCredentialsProvider.getCredentials( role );
}
@Override
public int getVoteCount( String role )
{
Election election = elections.get( role );
if ( election != null )
{
Map<org.neo4j.cluster.InstanceId, Vote> voteList = election.getVotes();
if ( voteList == null )
{
return 0;
}
return voteList.size();
}
else
{
return 0;
}
}
@Override
public int getNeededVoteCount()
{
return clusterContext.getConfiguration().getMembers().size() - heartbeatContext.getFailed().size();
}
@Override
public void cancelElection( String role )
{
elections.remove( role );
}
@Override
public Iterable<String> getRolesRequiringElection()
{
return Iterables.filter( new Predicate<String>() // Only include roles that are not elected
{
@Override
public boolean accept( String role )
{
return clusterContext.getConfiguration().getElected( role ) == null;
}
}, map( new Function<ElectionRole, String>() // Convert ElectionRole to String
{
@Override
public String apply( ElectionRole role )
{
return role.getName();
}
}, roles ) );
}
@Override
public boolean electionOk()
{
int total = clusterContext.getConfiguration().getMembers().size();
int available = total - heartbeatContext.getFailed().size();
return isQuorum(available, total);
}
@Override
public boolean isInCluster()
{
return clusterContext.isInCluster();
}
@Override
public Iterable<org.neo4j.cluster.InstanceId> getAlive()
{
return heartbeatContext.getAlive();
}
@Override
public org.neo4j.cluster.InstanceId getMyId()
{
return clusterContext.getMyId();
}
@Override
public boolean isElector()
{
// Only the first alive server should try elections. Everyone else waits
List<org.neo4j.cluster.InstanceId> aliveInstances = toList( getAlive() );
Collections.sort( aliveInstances );
return aliveInstances.indexOf( getMyId() ) == 0;
}
@Override
public boolean isFailed( org.neo4j.cluster.InstanceId key )
{
return heartbeatContext.getFailed().contains( key );
}
@Override
public org.neo4j.cluster.InstanceId getElected( String roleName )
{
return clusterContext.getConfiguration().getElected( roleName );
}
@Override
public boolean hasCurrentlyElectedVoted( String role, org.neo4j.cluster.InstanceId currentElected )
{
return elections.containsKey( role ) && elections.get(role).getVotes().containsKey( currentElected );
}
@Override
public Set<InstanceId> getFailed()
{
return heartbeatContext.getFailed();
}
public ElectionContextImpl snapshot( CommonContextState commonStateSnapshot, Logging logging, Timeouts timeouts,
ClusterContextImpl snapshotClusterContext,
HeartbeatContextImpl snapshotHeartbeatContext,
ElectionCredentialsProvider credentialsProvider )
{
Map<String, Election> electionsSnapshot = new HashMap<>();
for ( Map.Entry<String, Election> election : elections.entrySet() )
{
electionsSnapshot.put( election.getKey(), election.getValue().snapshot() );
}
return new ElectionContextImpl( me, commonStateSnapshot, logging, timeouts, snapshotClusterContext,
snapshotHeartbeatContext, new ArrayList<>(roles), electionsSnapshot, credentialsProvider );
}
private static class Vote
implements Comparable<Vote>
{
private final org.neo4j.cluster.InstanceId suggestedNode;
private final Comparable<Object> voteCredentials;
private Vote( org.neo4j.cluster.InstanceId suggestedNode, Comparable<Object> voteCredentials )
{
this.suggestedNode = suggestedNode;
this.voteCredentials = voteCredentials;
}
public org.neo4j.cluster.InstanceId getSuggestedNode()
{
return suggestedNode;
}
public Comparable<Object> getCredentials()
{
return voteCredentials;
}
@Override
public String toString()
{
return suggestedNode + ":" + voteCredentials;
}
@Override
public int compareTo( Vote o )
{
return this.voteCredentials.compareTo( o.voteCredentials );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
Vote vote = (Vote) o;
if ( !suggestedNode.equals( vote.suggestedNode ) )
{
return false;
}
if ( !voteCredentials.equals( vote.voteCredentials ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = suggestedNode.hashCode();
result = 31 * result + voteCredentials.hashCode();
return result;
}
}
static class Election
{
private final WinnerStrategy winnerStrategy;
private final Map<org.neo4j.cluster.InstanceId, Vote> votes;
private Election( WinnerStrategy winnerStrategy )
{
this.winnerStrategy = winnerStrategy;
this.votes = new HashMap<org.neo4j.cluster.InstanceId, Vote>();
}
private Election( WinnerStrategy winnerStrategy, HashMap<InstanceId, Vote> votes )
{
this.votes = votes;
this.winnerStrategy = winnerStrategy;
}
public Map<org.neo4j.cluster.InstanceId, Vote> getVotes()
{
return votes;
}
public org.neo4j.cluster.InstanceId pickWinner()
{
return winnerStrategy.pickWinner( votes.values() );
}
public Election snapshot()
{
return new Election( winnerStrategy, new HashMap<>(votes));
}
}
interface WinnerStrategy
{
org.neo4j.cluster.InstanceId pickWinner( Collection<Vote> votes );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ElectionContextImpl that = (ElectionContextImpl) o;
if ( elections != null ? !elections.equals( that.elections ) : that.elections != null )
{
return false;
}
if ( roles != null ? !roles.equals( that.roles ) : that.roles != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = roles != null ? roles.hashCode() : 0;
result = 31 * result + (elections != null ? elections.hashCode() : 0);
return result;
}
private class BiasedWinnerStrategy implements WinnerStrategy
{
private final org.neo4j.cluster.InstanceId biasedNode;
private final boolean positiveSuggestion;
public BiasedWinnerStrategy( org.neo4j.cluster.InstanceId biasedNode, boolean positiveSuggestion )
{
this.biasedNode = biasedNode;
this.positiveSuggestion = positiveSuggestion;
}
@Override
public org.neo4j.cluster.InstanceId pickWinner( Collection<Vote> voteList )
{
// Remove blank votes
List<Vote> filteredVoteList = removeBlankVotes( voteList );
// Sort based on credentials
// The most suited candidate should come out on top
Collections.sort( filteredVoteList );
Collections.reverse( filteredVoteList );
clusterContext.getLogger( getClass() ).debug( "Election started with " + voteList +
", ended up with " + filteredVoteList + " where " + biasedNode + " is biased for " +
(positiveSuggestion ? "promotion" : "demotion") );
for ( Vote vote : filteredVoteList )
{
// Elect the biased instance biased as winner
if ( vote.getSuggestedNode().equals( biasedNode ) == positiveSuggestion )
{
return vote.getSuggestedNode();
}
}
// No possible winner
return null;
}
}
private static List<Vote> removeBlankVotes( Collection<Vote> voteList )
{
return toList( Iterables.filter( new Predicate<Vote>()
{
@Override
public boolean accept( Vote item )
{
return !(item.getCredentials() instanceof NotElectableElectionCredentials);
}
}, voteList ) );
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ElectionContextImpl.java
|
3,856
|
class CommonContextState
{
private URI boundAt;
private long lastKnownLearnedInstanceInCluster = -1;
private org.neo4j.cluster.InstanceId lastKnownAliveUpToDateInstance;
private long nextInstanceId = 0;
private ClusterConfiguration configuration;
public CommonContextState( ClusterConfiguration configuration )
{
this.configuration = configuration;
}
private CommonContextState( URI boundAt, long lastKnownLearnedInstanceInCluster, long nextInstanceId,
ClusterConfiguration configuration )
{
this.boundAt = boundAt;
this.lastKnownLearnedInstanceInCluster = lastKnownLearnedInstanceInCluster;
this.nextInstanceId = nextInstanceId;
this.configuration = configuration;
}
public URI boundAt()
{
return boundAt;
}
public void setBoundAt( URI boundAt )
{
this.boundAt = boundAt;
}
public long lastKnownLearnedInstanceInCluster()
{
return lastKnownLearnedInstanceInCluster;
}
public void setLastKnownLearnedInstanceInCluster( long lastKnownLearnedInstanceInCluster, InstanceId instanceId )
{
if(this.lastKnownLearnedInstanceInCluster <= lastKnownLearnedInstanceInCluster)
{
this.lastKnownLearnedInstanceInCluster = lastKnownLearnedInstanceInCluster;
if ( instanceId != null )
{
this.lastKnownAliveUpToDateInstance = instanceId;
}
}
else if(lastKnownLearnedInstanceInCluster == -1)
{
// Special case for clearing the state
this.lastKnownLearnedInstanceInCluster = -1;
}
}
public org.neo4j.cluster.InstanceId getLastKnownAliveUpToDateInstance()
{
return this.lastKnownAliveUpToDateInstance;
}
public long nextInstanceId()
{
return nextInstanceId;
}
public void setNextInstanceId( long nextInstanceId )
{
this.nextInstanceId = nextInstanceId;
}
public long getAndIncrementInstanceId()
{
return nextInstanceId++;
}
public ClusterConfiguration configuration()
{
return configuration;
}
public void setConfiguration( ClusterConfiguration configuration )
{
this.configuration = configuration;
}
public CommonContextState snapshot( StringLogger logger )
{
return new CommonContextState( boundAt, lastKnownLearnedInstanceInCluster, nextInstanceId,
configuration.snapshot(logger) );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
CommonContextState that = (CommonContextState) o;
if ( lastKnownLearnedInstanceInCluster != that.lastKnownLearnedInstanceInCluster )
{
return false;
}
if ( nextInstanceId != that.nextInstanceId )
{
return false;
}
if ( boundAt != null ? !boundAt.equals( that.boundAt ) : that.boundAt != null )
{
return false;
}
if ( configuration != null ? !configuration.equals( that.configuration ) : that.configuration != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = boundAt != null ? boundAt.hashCode() : 0;
result = 31 * result + (int) (lastKnownLearnedInstanceInCluster ^ (lastKnownLearnedInstanceInCluster >>> 32));
result = 31 * result + (int) (nextInstanceId ^ (nextInstanceId >>> 32));
result = 31 * result + (configuration != null ? configuration.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_CommonContextState.java
|
3,857
|
{
@Override
public void notify( ClusterListener listener )
{
listener.unelected( roleName, instanceId, commonState.configuration().getUriForId( instanceId ) );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,858
|
{
@Override
public void notify( ClusterListener listener )
{
listener.elected( roleName, instanceId,
commonState.configuration().getUriForId( instanceId ) );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,859
|
{
@Override
public void notify( ClusterListener listener )
{
listener.leftCluster( node );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,860
|
{
@Override
public void notify( ClusterListener listener )
{
listener.joinedCluster( instanceId, atURI );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,861
|
{
@Override
public void notify( ClusterListener listener )
{
listener.leftCluster();
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,862
|
{
@Override
public void notify( ClusterListener listener )
{
listener.enteredCluster( commonState.configuration() );
}
});
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ClusterContextImpl.java
|
3,863
|
{
@Override
public boolean accept( InstanceId item )
{
return !isFailed( item );
}
}, commonState.configuration().getMemberIds() );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_HeartbeatContextImpl.java
|
3,864
|
class LearnerContextImpl
extends AbstractContextImpl
implements LearnerContext
{
// LearnerContext
private long lastDeliveredInstanceId = -1;
private long lastLearnedInstanceId = -1;
/** To minimize logging, keep track of the latest learn miss, only log when it changes. */
private final CappedOperation<org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId> learnMissLogging =
new CappedOperation<org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId>(
CappedOperation.differentItems() )
{
@Override
protected void triggered( InstanceId instanceId )
{
getLogger( LearnerState.class ).debug( "Did not have learned value for instance " + instanceId );
}
};
private final HeartbeatContext heartbeatContext;
private final AcceptorInstanceStore instanceStore;
private final ObjectInputStreamFactory objectInputStreamFactory;
private final ObjectOutputStreamFactory objectOutputStreamFactory;
private final PaxosInstanceStore paxosInstances;
LearnerContextImpl( org.neo4j.cluster.InstanceId me, CommonContextState commonState,
Logging logging,
Timeouts timeouts, PaxosInstanceStore paxosInstances,
AcceptorInstanceStore instanceStore,
ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory,
HeartbeatContext heartbeatContext )
{
super( me, commonState, logging, timeouts );
this.heartbeatContext = heartbeatContext;
this.instanceStore = instanceStore;
this.objectInputStreamFactory = objectInputStreamFactory;
this.objectOutputStreamFactory = objectOutputStreamFactory;
this.paxosInstances = paxosInstances;
}
private LearnerContextImpl( org.neo4j.cluster.InstanceId me, CommonContextState commonState, Logging logging,
Timeouts timeouts, long lastDeliveredInstanceId, long lastLearnedInstanceId,
HeartbeatContext heartbeatContext,
AcceptorInstanceStore instanceStore, ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory, PaxosInstanceStore paxosInstances )
{
super( me, commonState, logging, timeouts );
this.lastDeliveredInstanceId = lastDeliveredInstanceId;
this.lastLearnedInstanceId = lastLearnedInstanceId;
this.heartbeatContext = heartbeatContext;
this.instanceStore = instanceStore;
this.objectInputStreamFactory = objectInputStreamFactory;
this.objectOutputStreamFactory = objectOutputStreamFactory;
this.paxosInstances = paxosInstances;
}
@Override
public long getLastDeliveredInstanceId()
{
return lastDeliveredInstanceId;
}
@Override
public void setLastDeliveredInstanceId( long lastDeliveredInstanceId )
{
this.lastDeliveredInstanceId = lastDeliveredInstanceId;
instanceStore.lastDelivered( new InstanceId( lastDeliveredInstanceId ) );
}
@Override
public long getLastLearnedInstanceId()
{
return lastLearnedInstanceId;
}
@Override
public long getLastKnownLearnedInstanceInCluster()
{
return commonState.lastKnownLearnedInstanceInCluster();
}
@Override
public void setLastKnownLearnedInstanceInCluster( long lastKnownLearnedInstanceInCluster,
org.neo4j.cluster.InstanceId instanceId )
{
commonState.setLastKnownLearnedInstanceInCluster( lastKnownLearnedInstanceInCluster, instanceId );
}
@Override
public org.neo4j.cluster.InstanceId getLastKnownAliveUpToDateInstance()
{
return commonState.getLastKnownAliveUpToDateInstance();
}
@Override
public void learnedInstanceId( long instanceId )
{
this.lastLearnedInstanceId = Math.max( lastLearnedInstanceId, instanceId );
if ( lastLearnedInstanceId > commonState.lastKnownLearnedInstanceInCluster() )
{
commonState.setLastKnownLearnedInstanceInCluster( lastLearnedInstanceId, null );
}
}
@Override
public boolean hasDeliveredAllKnownInstances()
{
return lastDeliveredInstanceId == commonState.lastKnownLearnedInstanceInCluster();
}
@Override
public void leave()
{
lastDeliveredInstanceId = -1;
lastLearnedInstanceId = -1;
commonState.setLastKnownLearnedInstanceInCluster( -1, null );
}
@Override
public PaxosInstance getPaxosInstance( InstanceId instanceId )
{
return paxosInstances.getPaxosInstance( instanceId );
}
@Override
public AtomicBroadcastSerializer newSerializer()
{
return new AtomicBroadcastSerializer( objectInputStreamFactory, objectOutputStreamFactory );
}
@Override
public Iterable<org.neo4j.cluster.InstanceId> getAlive()
{
return heartbeatContext.getAlive();
}
@Override
public void setNextInstanceId( long id )
{
commonState.setNextInstanceId( id );
}
@Override
public void notifyLearnMiss( InstanceId instanceId )
{
learnMissLogging.event( instanceId );
}
public LearnerContextImpl snapshot( CommonContextState commonStateSnapshot, Logging logging, Timeouts timeouts,
PaxosInstanceStore paxosInstancesSnapshot, AcceptorInstanceStore instanceStore,
ObjectInputStreamFactory objectInputStreamFactory, ObjectOutputStreamFactory
objectOutputStreamFactory, HeartbeatContextImpl snapshotHeartbeatContext )
{
return new LearnerContextImpl( me, commonStateSnapshot, logging, timeouts, lastDeliveredInstanceId,
lastLearnedInstanceId, snapshotHeartbeatContext, instanceStore, objectInputStreamFactory,
objectOutputStreamFactory, paxosInstancesSnapshot );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
LearnerContextImpl that = (LearnerContextImpl) o;
if ( lastDeliveredInstanceId != that.lastDeliveredInstanceId )
{
return false;
}
if ( lastLearnedInstanceId != that.lastLearnedInstanceId )
{
return false;
}
if ( heartbeatContext != null ? !heartbeatContext.equals( that.heartbeatContext ) : that.heartbeatContext !=
null )
{
return false;
}
if ( instanceStore != null ? !instanceStore.equals( that.instanceStore ) : that.instanceStore != null )
{
return false;
}
if ( paxosInstances != null ? !paxosInstances.equals( that.paxosInstances ) : that.paxosInstances != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = (int) (lastDeliveredInstanceId ^ (lastDeliveredInstanceId >>> 32));
result = 31 * result + (int) (lastLearnedInstanceId ^ (lastLearnedInstanceId >>> 32));
result = 31 * result + (heartbeatContext != null ? heartbeatContext.hashCode() : 0);
result = 31 * result + (instanceStore != null ? instanceStore.hashCode() : 0);
result = 31 * result + (paxosInstances != null ? paxosInstances.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_LearnerContextImpl.java
|
3,865
|
{
@Override
protected void triggered( InstanceId instanceId )
{
getLogger( LearnerState.class ).debug( "Did not have learned value for instance " + instanceId );
}
};
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_LearnerContextImpl.java
|
3,866
|
public static class ConfigurationChangeState
implements Serializable
{
private InstanceId join;
private URI joinUri;
private InstanceId leave;
private String roleWon;
private InstanceId winner;
private String roleLost;
private InstanceId loser;
public void join( InstanceId join, URI joinUri )
{
this.join = join;
this.joinUri = joinUri;
}
public void leave( InstanceId uri )
{
this.leave = uri;
}
public void elected( String role, InstanceId winner )
{
this.roleWon = role;
this.winner = winner;
}
public void unelected( String role, InstanceId unelected )
{
roleLost = role;
loser = unelected;
}
public InstanceId getJoin()
{
return join;
}
public URI getJoinUri()
{
return joinUri;
}
public InstanceId getLeave()
{
return leave;
}
public void apply( ClusterContext context )
{
if ( join != null )
{
context.joined( join, joinUri );
}
if ( leave != null )
{
context.left( leave );
}
if ( roleWon != null )
{
context.elected( roleWon, winner );
}
if ( roleLost != null )
{
context.unelected( roleLost, loser );
}
}
public boolean isLeaving( InstanceId me )
{
return me.equals( leave );
}
@Override
public String toString()
{
if ( join != null )
{
return "Change cluster config, join:" + join;
}
if ( leave != null )
{
return "Change cluster config, leave:" + leave;
}
if (roleWon != null)
return "Change cluster config, elected:" + winner + " as " + roleWon;
else
return "Change cluster config, unelected:" + loser + " as " + roleWon;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ConfigurationChangeState that = (ConfigurationChangeState) o;
if ( join != null ? !join.equals( that.join ) : that.join != null )
{
return false;
}
if ( joinUri != null ? !joinUri.equals( that.joinUri ) : that.joinUri != null )
{
return false;
}
if ( leave != null ? !leave.equals( that.leave ) : that.leave != null )
{
return false;
}
if ( loser != null ? !loser.equals( that.loser ) : that.loser != null )
{
return false;
}
if ( roleLost != null ? !roleLost.equals( that.roleLost ) : that.roleLost != null )
{
return false;
}
if ( roleWon != null ? !roleWon.equals( that.roleWon ) : that.roleWon != null )
{
return false;
}
if ( winner != null ? !winner.equals( that.winner ) : that.winner != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = join != null ? join.hashCode() : 0;
result = 31 * result + (joinUri != null ? joinUri.hashCode() : 0);
result = 31 * result + (leave != null ? leave.hashCode() : 0);
result = 31 * result + (roleWon != null ? roleWon.hashCode() : 0);
result = 31 * result + (winner != null ? winner.hashCode() : 0);
result = 31 * result + (roleLost != null ? roleLost.hashCode() : 0);
result = 31 * result + (loser != null ? loser.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterMessage.java
|
3,867
|
public class ClusterTestScriptDSL
implements ClusterTestScript
{
public abstract class ClusterAction
implements Runnable
{
public long time;
}
private final Queue<ClusterAction> actions = new LinkedList<ClusterAction>();
private final AtomicBroadcastSerializer serializer = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory());
private int rounds = 100;
private long now = 0;
public ClusterTestScriptDSL rounds( int n )
{
rounds = n;
return this;
}
public ClusterTestScriptDSL join( int time, final int joinServer, final int... joinServers )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster joinCluster = servers.get( joinServer - 1 ).newClient( Cluster.class );
for ( final Cluster cluster : out )
{
if ( cluster.equals( joinCluster ) )
{
out.remove( cluster );
logger.getLogger().debug( "Join:" + cluster.toString() );
if ( joinServers.length == 0 )
{
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
// Use test info to figure out who to join
URI[] toJoin = new URI[servers.size()];
for ( int i = 0; i < servers.size(); i++ )
{
toJoin[i] = servers.get( i ).getServer().boundAt();
}
final Future<ClusterConfiguration> result = cluster.join( "default", toJoin );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
network.addFutureWaiter( result, joiner );
}
}
else
{
// List of servers to join was explicitly specified, so use that
URI[] instanceUris = new URI[joinServers.length];
for ( int i = 0; i < joinServers.length; i++ )
{
int server = joinServers[i];
instanceUris[i] = URI.create( "server" + server );
}
final Future<ClusterConfiguration> result = cluster.join( "default", instanceUris );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug(
"**** Node " + joinServer + " could not join cluster:" + e
.getMessage() );
if ( !(e.getCause() instanceof IllegalStateException) )
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
};
network.addFutureWaiter( result, joiner );
}
break;
}
}
}
}, time );
}
public ClusterTestScriptDSL leave( long time, final int leaveServer )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster leaveCluster = servers.get( leaveServer - 1 ).newClient( Cluster.class );
for ( Cluster cluster : in )
{
if ( cluster.equals( leaveCluster ) )
{
in.remove( cluster );
cluster.leave();
logger.getLogger().debug( "Leave:" + cluster.toString() );
break;
}
}
}
}, time );
}
public ClusterTestScriptDSL down( int time, final int serverDown )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster server = servers.get( serverDown - 1 ).newClient( Cluster.class );
network.getNetworkLatencyStrategy().getStrategy( ScriptableNetworkFailureLatencyStrategy.class )
.nodeIsDown( "server"+server.toString() );
logger.getLogger().debug( server + " is down" );
}
}, time );
}
public ClusterTestScriptDSL up( int time, final int serverUp )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster server = servers.get( serverUp - 1 ).newClient( Cluster.class );
network.getNetworkLatencyStrategy()
.getStrategy( ScriptableNetworkFailureLatencyStrategy.class )
.nodeIsUp( "server"+server.toString() );
logger.getLogger().debug( server + " is up" );
}
}, time );
}
public ClusterTestScriptDSL broadcast( int time, final int server, final Object value )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
AtomicBroadcast broadcast = servers.get( server - 1 ).newClient( AtomicBroadcast.class );
try
{
broadcast.broadcast( serializer.broadcast( value ) );
}
catch ( IOException e )
{
e.printStackTrace();
}
}
}, time );
}
public ClusterTestScriptDSL sleep( final int sleepTime )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
logger.getLogger().debug( "Slept for " + sleepTime );
}
}, sleepTime );
}
public ClusterTestScriptDSL message( int time, final String msg )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
logger.getLogger().debug( msg );
}
}, time );
}
public ClusterTestScriptDSL verifyConfigurations( long time )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
ClusterMockTest.this.verifyConfigurations();
}
}, time );
}
private ClusterTestScriptDSL addAction( ClusterAction action, long time )
{
action.time = now + time;
actions.offer( action );
now += time;
return this;
}
@Override
public int rounds()
{
return rounds;
}
@Override
public void tick( long time )
{
while ( !actions.isEmpty() && actions.peek().time == time )
{
actions.poll().run();
}
}
public ClusterTestScriptDSL getRoles( final Map<String, InstanceId> roles )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
ClusterMockTest.this.getRoles( roles );
}
}, 0 );
}
public ClusterTestScriptDSL verifyCoordinatorRoleSwitched( final Map<String, InstanceId> comparedTo )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
HashMap<String, InstanceId> roles = new HashMap<String, InstanceId>();
ClusterMockTest.this.getRoles( roles );
InstanceId oldCoordinator = comparedTo.get( ClusterConfiguration.COORDINATOR );
InstanceId newCoordinator = roles.get( ClusterConfiguration.COORDINATOR );
assertNotNull( "Should have had a coordinator before bringing it down", oldCoordinator );
assertNotNull( "Should have a new coordinator after the previous failed", newCoordinator );
assertTrue( "Should have elected a new coordinator", !oldCoordinator.equals( newCoordinator ) );
}
}, 0 );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,868
|
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
logger.getLogger().debug( uri + " entered cluster:" + clusterConfiguration.getMemberURIs() );
in.add( cluster );
}
@Override
public void joinedCluster( InstanceId id, URI member )
{
logger.getLogger().debug( uri + " sees a join from " + id + " at URI " + member );
}
@Override
public void leftCluster( InstanceId id )
{
logger.getLogger().debug( uri + " sees a leave from " + id );
}
@Override
public void leftCluster()
{
logger.getLogger().debug( uri + " left cluster" );
out.add( cluster );
}
@Override
public void elected( String role, InstanceId id, URI electedMember )
{
logger.getLogger().debug(
uri + " sees an election: " + id + " elected as " + role + " at URI " + electedMember );
}
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
logger.getLogger().debug(
uri + " sees an unelection: " + instanceId + " removed from " + role + " at URI " + electedMember );
}
} );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,869
|
{
AtomicBroadcastSerializer serializer = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory());
@Override
public void receive( Payload value )
{
try
{
logger.getLogger().debug( uri + " received: " + serializer.receive( value ) );
}
catch ( IOException e )
{
e.printStackTrace();
}
catch ( ClassNotFoundException e )
{
e.printStackTrace();
}
}
} );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,870
|
{
@Override
public void failed( InstanceId server )
{
logger.getLogger().warn( uri + ": Failed:" + server );
}
@Override
public void alive( InstanceId server )
{
logger.getLogger().debug( uri + ": Alive:" + server );
}
} );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,871
|
public class ClusterMockTest
{
public static NetworkMock DEFAULT_NETWORK()
{
return new NetworkMock( 10,
new MultipleFailureLatencyStrategy( new FixedNetworkLatencyStrategy( 10 ),
new ScriptableNetworkFailureLatencyStrategy() ),
new MessageTimeoutStrategy( new FixedTimeoutStrategy( 500 ) )
.timeout( HeartbeatMessage.sendHeartbeat, 200 ) );
}
List<TestProtocolServer> servers = new ArrayList<TestProtocolServer>();
List<Cluster> out = new ArrayList<Cluster>();
List<Cluster> in = new ArrayList<Cluster>();
Map<Integer, URI> members = new HashMap<Integer, URI>();
@Rule
public LoggerRule logger = new LoggerRule();
public NetworkMock network;
ClusterTestScript script;
@After
public void tearDown()
{
logger.getLogger().info( "Current threads" );
for ( Map.Entry<Thread, StackTraceElement[]> threadEntry : Thread.getAllStackTraces().entrySet() )
{
logger.getLogger().info( threadEntry.getKey().getName() );
for ( StackTraceElement stackTraceElement : threadEntry.getValue() )
{
logger.getLogger().info( " " + stackTraceElement.toString() );
}
}
}
protected void testCluster( int nrOfServers, NetworkMock mock,
ClusterTestScript script )
throws ExecutionException, InterruptedException, URISyntaxException, TimeoutException
{
int[] serverIds = new int[nrOfServers];
for ( int i = 1; i <= nrOfServers; i++ )
{
serverIds[i - 1] = i;
}
testCluster( serverIds, null, mock, script );
}
protected void testCluster( int[] serverIds, VerifyInstanceConfiguration[] finalConfig, NetworkMock mock, ClusterTestScript script )
throws ExecutionException, InterruptedException, URISyntaxException, TimeoutException
{
this.script = script;
network = mock;
servers.clear();
out.clear();
in.clear();
for ( int i = 0; i < serverIds.length; i++ )
{
final URI uri = new URI( "server" + (i + 1) );
members.put( serverIds[i], uri );
TestProtocolServer server = network.addServer( serverIds[i], uri );
final Cluster cluster = server.newClient( Cluster.class );
clusterStateListener( uri, cluster );
server.newClient( Heartbeat.class ).addHeartbeatListener( new HeartbeatListener()
{
@Override
public void failed( InstanceId server )
{
logger.getLogger().warn( uri + ": Failed:" + server );
}
@Override
public void alive( InstanceId server )
{
logger.getLogger().debug( uri + ": Alive:" + server );
}
} );
server.newClient( AtomicBroadcast.class ).addAtomicBroadcastListener( new AtomicBroadcastListener()
{
AtomicBroadcastSerializer serializer = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory());
@Override
public void receive( Payload value )
{
try
{
logger.getLogger().debug( uri + " received: " + serializer.receive( value ) );
}
catch ( IOException e )
{
e.printStackTrace();
}
catch ( ClassNotFoundException e )
{
e.printStackTrace();
}
}
} );
servers.add( server );
out.add( cluster );
}
// Run test
for ( int i = 0; i < script.rounds(); i++ )
{
logger.getLogger().debug( "Round " + i + ", time:" + network.getTime() );
script.tick( network.getTime() );
network.tick();
}
// Let messages settle
network.tick( 100 );
if ( finalConfig == null )
{
verifyConfigurations();
}
else
{
verifyConfigurations( finalConfig );
}
logger.getLogger().debug( "All nodes leave" );
// All leave
for ( Cluster cluster : new ArrayList<Cluster>( in ) )
{
logger.getLogger().debug( "Leaving:" + cluster );
cluster.leave();
in.remove( cluster );
network.tick( 400 );
}
if ( finalConfig != null )
{
verifyConfigurations( finalConfig );
}
else
{
verifyConfigurations();
}
}
private void clusterStateListener( final URI uri, final Cluster cluster )
{
cluster.addClusterListener( new ClusterListener()
{
@Override
public void enteredCluster( ClusterConfiguration clusterConfiguration )
{
logger.getLogger().debug( uri + " entered cluster:" + clusterConfiguration.getMemberURIs() );
in.add( cluster );
}
@Override
public void joinedCluster( InstanceId id, URI member )
{
logger.getLogger().debug( uri + " sees a join from " + id + " at URI " + member );
}
@Override
public void leftCluster( InstanceId id )
{
logger.getLogger().debug( uri + " sees a leave from " + id );
}
@Override
public void leftCluster()
{
logger.getLogger().debug( uri + " left cluster" );
out.add( cluster );
}
@Override
public void elected( String role, InstanceId id, URI electedMember )
{
logger.getLogger().debug(
uri + " sees an election: " + id + " elected as " + role + " at URI " + electedMember );
}
@Override
public void unelected( String role, InstanceId instanceId, URI electedMember )
{
logger.getLogger().debug(
uri + " sees an unelection: " + instanceId + " removed from " + role + " at URI " + electedMember );
}
} );
}
public void verifyConfigurations( VerifyInstanceConfiguration[] toCheckAgainst )
{
logger.getLogger().debug( "Verify configurations against given" );
List<URI> members;
Map<String, InstanceId> roles;
Set<InstanceId> failed;
List<AssertionError> errors = new LinkedList<AssertionError>();
List<TestProtocolServer> protocolServers = network.getServers();
assertEquals( "You must provide a configuration for all instances",
protocolServers.size(), toCheckAgainst.length );
for ( int j = 0; j < protocolServers.size(); j++ )
{
members = toCheckAgainst[j].members;
roles = toCheckAgainst[j].roles;
failed = toCheckAgainst[j].failed;
StateMachines stateMachines = protocolServers.get( j ).getServer().getStateMachines();
State<?, ?> clusterState = stateMachines.getStateMachine( ClusterMessage.class ).getState();
if ( !clusterState.equals( ClusterState.entered ) )
{
logger.getLogger().warn( "Instance " + (j + 1) + " is not in the cluster (" + clusterState + ")" );
continue;
}
ClusterContext context = (ClusterContext) stateMachines.getStateMachine( ClusterMessage.class )
.getContext();
HeartbeatContext heartbeatContext = (HeartbeatContext) stateMachines.getStateMachine(
HeartbeatMessage.class ).getContext();
ClusterConfiguration clusterConfiguration = context.getConfiguration();
if ( !clusterConfiguration.getMemberURIs().isEmpty() )
{
logger.getLogger().debug( " Server " + (j + 1) + ": Cluster:" + clusterConfiguration.getMemberURIs() +
", Roles:" + clusterConfiguration.getRoles() + ", Failed:" + heartbeatContext.getFailed() );
verifyConfigurations( stateMachines, members, roles, failed, errors );
}
}
// assertEquals( "In:" + in + ", Out:" + out, protocolServers.size(), Iterables.count( Iterables.<Cluster,
// List<Cluster>>flatten( in, out ) ) );
if ( !errors.isEmpty() )
{
for ( AssertionError error : errors )
{
logger.getLogger().error( error.toString() );
}
throw errors.get( 0 );
}
}
public void verifyConfigurations()
{
logger.getLogger().debug( "Verify configurations" );
List<URI> members = null;
Map<String, InstanceId> roles = null;
Set<InstanceId> failed = null;
List<AssertionError> errors = new LinkedList<AssertionError>();
List<TestProtocolServer> protocolServers = network.getServers();
for ( int j = 0; j < protocolServers.size(); j++ )
{
StateMachines stateMachines = protocolServers.get( j ).getServer().getStateMachines();
State<?, ?> clusterState = stateMachines.getStateMachine( ClusterMessage.class ).getState();
if ( !clusterState.equals( ClusterState.entered ) )
{
logger.getLogger().warn( "Instance " + (j + 1) + " is not in the cluster (" + clusterState + ")" );
continue;
}
ClusterContext context = (ClusterContext) stateMachines.getStateMachine( ClusterMessage.class )
.getContext();
HeartbeatContext heartbeatContext = (HeartbeatContext) stateMachines.getStateMachine(
HeartbeatMessage.class ).getContext();
ClusterConfiguration clusterConfiguration = context.getConfiguration();
if ( !clusterConfiguration.getMemberURIs().isEmpty() )
{
logger.getLogger().debug( " Server " + (j + 1) + ": Cluster:" + clusterConfiguration.getMemberURIs() +
", Roles:" + clusterConfiguration.getRoles() + ", Failed:" + heartbeatContext.getFailed() );
if ( members == null )
{
members = clusterConfiguration.getMemberURIs();
roles = clusterConfiguration.getRoles();
failed = heartbeatContext.getFailed();
}
else
{
verifyConfigurations( stateMachines, members, roles, failed, errors );
}
}
}
assertEquals( "In:" + in + ", Out:" + out, protocolServers.size(), Iterables.count( Iterables.<Cluster,
List<Cluster>>flatten( in, out ) ) );
if ( !errors.isEmpty() )
{
for ( AssertionError error : errors )
{
logger.getLogger().error( error.toString() );
}
throw errors.get( 0 );
}
}
private void verifyConfigurations( StateMachines stateMachines, List<URI> members, Map<String, InstanceId> roles,
Set<InstanceId> failed, List<AssertionError> errors )
{
ClusterContext context = (ClusterContext) stateMachines.getStateMachine( ClusterMessage.class )
.getContext();
int myId = context.getMyId().toIntegerIndex();
State<?, ?> clusterState = stateMachines.getStateMachine( ClusterMessage.class ).getState();
if ( !clusterState.equals( ClusterState.entered ) )
{
logger.getLogger().warn( "Instance " + myId + " is not in the cluster (" + clusterState + ")" );
return;
}
HeartbeatContext heartbeatContext = (HeartbeatContext) stateMachines.getStateMachine(
HeartbeatMessage.class ).getContext();
ClusterConfiguration clusterConfiguration = context.getConfiguration();
try
{
assertEquals( "Config for server" + myId + " is wrong", new HashSet<URI>( members ),
new HashSet<URI>( clusterConfiguration
.getMemberURIs() ) );
}
catch ( AssertionError e )
{
errors.add( e );
}
try
{
assertEquals( "Roles for server" + myId + " is wrong", roles, clusterConfiguration
.getRoles() );
}
catch ( AssertionError e )
{
errors.add( e );
}
try
{
assertEquals( "Failed for server" + myId + " is wrong", failed, heartbeatContext.getFailed
() );
}
catch ( AssertionError e )
{
errors.add( e );
}
}
public interface ClusterTestScript
{
int rounds();
void tick( long time );
}
public class ClusterTestScriptDSL
implements ClusterTestScript
{
public abstract class ClusterAction
implements Runnable
{
public long time;
}
private final Queue<ClusterAction> actions = new LinkedList<ClusterAction>();
private final AtomicBroadcastSerializer serializer = new AtomicBroadcastSerializer(new ObjectStreamFactory(), new ObjectStreamFactory());
private int rounds = 100;
private long now = 0;
public ClusterTestScriptDSL rounds( int n )
{
rounds = n;
return this;
}
public ClusterTestScriptDSL join( int time, final int joinServer, final int... joinServers )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster joinCluster = servers.get( joinServer - 1 ).newClient( Cluster.class );
for ( final Cluster cluster : out )
{
if ( cluster.equals( joinCluster ) )
{
out.remove( cluster );
logger.getLogger().debug( "Join:" + cluster.toString() );
if ( joinServers.length == 0 )
{
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
// Use test info to figure out who to join
URI[] toJoin = new URI[servers.size()];
for ( int i = 0; i < servers.size(); i++ )
{
toJoin[i] = servers.get( i ).getServer().boundAt();
}
final Future<ClusterConfiguration> result = cluster.join( "default", toJoin );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
network.addFutureWaiter( result, joiner );
}
}
else
{
// List of servers to join was explicitly specified, so use that
URI[] instanceUris = new URI[joinServers.length];
for ( int i = 0; i < joinServers.length; i++ )
{
int server = joinServers[i];
instanceUris[i] = URI.create( "server" + server );
}
final Future<ClusterConfiguration> result = cluster.join( "default", instanceUris );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug(
"**** Node " + joinServer + " could not join cluster:" + e
.getMessage() );
if ( !(e.getCause() instanceof IllegalStateException) )
{
cluster.create( "default" );
}
else
{
logger.getLogger().debug( "*** Incorrectly configured cluster? "
+ e.getCause().getMessage() );
}
}
}
};
network.addFutureWaiter( result, joiner );
}
break;
}
}
}
}, time );
}
public ClusterTestScriptDSL leave( long time, final int leaveServer )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster leaveCluster = servers.get( leaveServer - 1 ).newClient( Cluster.class );
for ( Cluster cluster : in )
{
if ( cluster.equals( leaveCluster ) )
{
in.remove( cluster );
cluster.leave();
logger.getLogger().debug( "Leave:" + cluster.toString() );
break;
}
}
}
}, time );
}
public ClusterTestScriptDSL down( int time, final int serverDown )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster server = servers.get( serverDown - 1 ).newClient( Cluster.class );
network.getNetworkLatencyStrategy().getStrategy( ScriptableNetworkFailureLatencyStrategy.class )
.nodeIsDown( "server"+server.toString() );
logger.getLogger().debug( server + " is down" );
}
}, time );
}
public ClusterTestScriptDSL up( int time, final int serverUp )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
Cluster server = servers.get( serverUp - 1 ).newClient( Cluster.class );
network.getNetworkLatencyStrategy()
.getStrategy( ScriptableNetworkFailureLatencyStrategy.class )
.nodeIsUp( "server"+server.toString() );
logger.getLogger().debug( server + " is up" );
}
}, time );
}
public ClusterTestScriptDSL broadcast( int time, final int server, final Object value )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
AtomicBroadcast broadcast = servers.get( server - 1 ).newClient( AtomicBroadcast.class );
try
{
broadcast.broadcast( serializer.broadcast( value ) );
}
catch ( IOException e )
{
e.printStackTrace();
}
}
}, time );
}
public ClusterTestScriptDSL sleep( final int sleepTime )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
logger.getLogger().debug( "Slept for " + sleepTime );
}
}, sleepTime );
}
public ClusterTestScriptDSL message( int time, final String msg )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
logger.getLogger().debug( msg );
}
}, time );
}
public ClusterTestScriptDSL verifyConfigurations( long time )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
ClusterMockTest.this.verifyConfigurations();
}
}, time );
}
private ClusterTestScriptDSL addAction( ClusterAction action, long time )
{
action.time = now + time;
actions.offer( action );
now += time;
return this;
}
@Override
public int rounds()
{
return rounds;
}
@Override
public void tick( long time )
{
while ( !actions.isEmpty() && actions.peek().time == time )
{
actions.poll().run();
}
}
public ClusterTestScriptDSL getRoles( final Map<String, InstanceId> roles )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
ClusterMockTest.this.getRoles( roles );
}
}, 0 );
}
public ClusterTestScriptDSL verifyCoordinatorRoleSwitched( final Map<String, InstanceId> comparedTo )
{
return addAction( new ClusterAction()
{
@Override
public void run()
{
HashMap<String, InstanceId> roles = new HashMap<String, InstanceId>();
ClusterMockTest.this.getRoles( roles );
InstanceId oldCoordinator = comparedTo.get( ClusterConfiguration.COORDINATOR );
InstanceId newCoordinator = roles.get( ClusterConfiguration.COORDINATOR );
assertNotNull( "Should have had a coordinator before bringing it down", oldCoordinator );
assertNotNull( "Should have a new coordinator after the previous failed", newCoordinator );
assertTrue( "Should have elected a new coordinator", !oldCoordinator.equals( newCoordinator ) );
}
}, 0 );
}
}
public class ClusterTestScriptRandom
implements ClusterTestScript
{
private final long seed;
private final Random random;
public ClusterTestScriptRandom( long seed )
{
if ( seed == -1 )
{
seed = System.nanoTime();
}
this.seed = seed;
random = new Random( seed );
}
@Override
public int rounds()
{
return 300;
}
@Override
public void tick( long time )
{
if ( time >= (rounds() - 100) * 10 )
{
return;
}
if ( time == 0 )
{
logger.getLogger().debug( "Random seed:" + seed + "L" );
}
if ( random.nextDouble() >= 0.8 )
{
double inOrOut = (in.size() - out.size()) / ((double) servers.size());
double whatToDo = random.nextDouble() + inOrOut;
logger.getLogger().debug( "What to do:" + whatToDo );
if ( whatToDo < 0.5 && !out.isEmpty() )
{
int idx = random.nextInt( out.size() );
final Cluster cluster = out.remove( idx );
if ( in.isEmpty() )
{
cluster.create( "default" );
}
else
{
final Future<ClusterConfiguration> result = cluster.join( "default",
URI.create( in.get( 0 ).toString() ) );
Runnable joiner = new Runnable()
{
@Override
public void run()
{
try
{
ClusterConfiguration clusterConfiguration = result.get();
logger.getLogger().debug( "**** Cluster configuration:" +
clusterConfiguration );
}
catch ( Exception e )
{
logger.getLogger().debug( "**** Node could not join cluster:" + e
.getMessage() );
out.add( cluster );
}
}
};
network.addFutureWaiter( result, joiner );
}
logger.getLogger().debug( "Enter cluster:" + cluster.toString() );
}
else if ( !in.isEmpty() )
{
int idx = random.nextInt( in.size() );
Cluster cluster = in.remove( idx );
cluster.leave();
logger.getLogger().debug( "Leave cluster:" + cluster.toString() );
}
}
}
}
private void getRoles( Map<String, InstanceId> roles )
{
List<TestProtocolServer> protocolServers = network.getServers();
for ( int j = 0; j < protocolServers.size(); j++ )
{
StateMachines stateMachines = protocolServers.get( j )
.getServer()
.getStateMachines();
State<?, ?> clusterState = stateMachines.getStateMachine( ClusterMessage.class ).getState();
if ( !clusterState.equals( ClusterState.entered ) )
{
logger.getLogger().warn( "Instance " + (j + 1) + " is not in the cluster (" + clusterState + ")" );
continue;
}
ClusterContext context = (ClusterContext) stateMachines.getStateMachine( ClusterMessage.class )
.getContext();
ClusterConfiguration clusterConfiguration = context.getConfiguration();
roles.putAll( clusterConfiguration.getRoles() );
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMockTest.java
|
3,872
|
public static class ConfigurationTimeoutState
{
private final int remainingPings;
public ConfigurationTimeoutState( int remainingPings)
{
this.remainingPings = remainingPings;
}
public int getRemainingPings()
{
return remainingPings;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ConfigurationTimeoutState that = (ConfigurationTimeoutState) o;
if ( remainingPings != that.remainingPings )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
return remainingPings;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterMessage.java
|
3,873
|
public static class ConfigurationResponseState
implements Serializable
{
private final Map<InstanceId, URI> nodes;
private final org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId latestReceivedInstanceId;
private final Map<String, InstanceId> roles;
private final String clusterName;
public ConfigurationResponseState( Map<String, InstanceId> roles, Map<InstanceId, URI> nodes,
org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId latestReceivedInstanceId,
String clusterName )
{
this.roles = roles;
this.nodes = nodes;
this.latestReceivedInstanceId = latestReceivedInstanceId;
this.clusterName = clusterName;
}
public Map<InstanceId, URI> getMembers()
{
return nodes;
}
public Map<String, InstanceId> getRoles()
{
return roles;
}
public org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId getLatestReceivedInstanceId()
{
return latestReceivedInstanceId;
}
public String getClusterName()
{
return clusterName;
}
public ConfigurationResponseState snapshot()
{
return new ConfigurationResponseState( new HashMap<>(roles), new HashMap<>(nodes),
latestReceivedInstanceId, clusterName );
}
@Override
public String toString()
{
return "ConfigurationResponseState{" +
"nodes=" + nodes +
", latestReceivedInstanceId=" + latestReceivedInstanceId +
", roles=" + roles +
", clusterName='" + clusterName + '\'' +
'}';
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ConfigurationResponseState that = (ConfigurationResponseState) o;
if ( clusterName != null ? !clusterName.equals( that.clusterName ) : that.clusterName != null )
{
return false;
}
if ( latestReceivedInstanceId != null ? !latestReceivedInstanceId.equals( that.latestReceivedInstanceId )
: that.latestReceivedInstanceId != null )
{
return false;
}
if ( nodes != null ? !nodes.equals( that.nodes ) : that.nodes != null )
{
return false;
}
if ( roles != null ? !roles.equals( that.roles ) : that.roles != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = nodes != null ? nodes.hashCode() : 0;
result = 31 * result + (latestReceivedInstanceId != null ? latestReceivedInstanceId.hashCode() : 0);
result = 31 * result + (roles != null ? roles.hashCode() : 0);
result = 31 * result + (clusterName != null ? clusterName.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterMessage.java
|
3,874
|
public static class ConfigurationRequestState implements Serializable, Comparable<ConfigurationRequestState>
{
private static final long serialVersionUID = -221752558518247157L;
private InstanceId joiningId;
private URI joiningUri;
public ConfigurationRequestState( InstanceId joiningId, URI joiningUri )
{
this.joiningId = joiningId;
this.joiningUri = joiningUri;
}
public InstanceId getJoiningId()
{
return joiningId;
}
public URI getJoiningUri()
{
return joiningUri;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ConfigurationRequestState that = (ConfigurationRequestState) o;
if ( !joiningId.equals( that.joiningId ) )
{
return false;
}
if ( !joiningUri.equals( that.joiningUri ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = joiningId.hashCode();
result = 31 * result + joiningUri.hashCode();
return result;
}
@Override
public int compareTo( ConfigurationRequestState o )
{
return this.joiningId.compareTo( o.joiningId );
}
@Override
public String toString()
{
return joiningId + ":" + joiningUri;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterMessage.java
|
3,875
|
public class ClusterMembershipTest
extends ClusterMockTest
{
@Test
public void threeNodesJoinAndThenLeave()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 70 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
message( 100, "*** Cluster formed, now leave" ).
leave( 0, 3 ).
leave( 100, 2 ).
leave( 100, 1 ) );
}
@Test
public void threeNodesJoinAndThenLeaveInOriginalOrder()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 100 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
message( 100, "*** Cluster formed, now leave" ).
verifyConfigurations( 0 ).sleep( 100 ).
leave( 0, 1 ).
verifyConfigurations( 200 ).
leave( 0, 2 ).
leave( 200, 3 ) );
}
@Test
public void noobTest()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 1, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 3 ).
sleep( 10 ).
join( 0, 1 ).
message( 100, "*** Cluster formed, now leave" ).
leave( 0, 1 ).verifyConfigurations( 0 ) );
}
@Test
public void sevenNodesJoinAndThenLeave()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 7, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 500 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
join( 100, 4 ).
join( 100, 5 ).
join( 100, 6 ).
join( 100, 7 ).
leave( 100, 7 ).
leave( 500, 6 ).
leave( 500, 5 ).
leave( 500, 4 ).
leave( 500, 3 ).
leave( 500, 2 ).
leave( 500, 1 )
);
}
@Test
public void oneNodeJoinThenTwoJoinRoughlyAtSameTime()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 500 ).
join( 100, 1 ).
join( 100, 2 ).
join( 10, 3 ).
message( 2000, "*** All are in " ).
leave( 0, 3 )
);
}
@Test
public void oneNodeJoinThenThreeJoinRoughlyAtSameTime2()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 4, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 800 ).
join( 100, 1 ).
join( 100, 2 ).
join( 10, 3 ).
join( 10, 4 ).
message( 2000, "*** All are in " ).
broadcast( 10, 2, "Hello world" )
);
}
@Test
public void twoNodesJoinThenOneLeavesAsThirdJoins()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 820 ).
join( 0, 1 ).
join( 10, 2 ).
message( 80, "*** 1 and 2 are in cluster" ).
leave( 10, 2 ).
join( 20, 3 )
);
}
@Test
@Ignore("instance 1 is in start, 2 in discovery. Correct but we don't have a way to verify it yet")
public void oneNodeCreatesClusterAndThenAnotherJoinsAsFirstLeaves()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 2, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 1000 ).
join( 0, 1 ).
join( 10, 2, 1, 2 ).
leave( 20, 1 )
);
}
@Test
public void threeNodesJoinAndThenFirstLeavesAsFourthJoins()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 4, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 200 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
message( 100, "*** Cluster formed, now leave" ).
leave( 0, 1 ).
join( 10, 4 )
);
}
@Test
public void threeNodesJoinAndThenFirstLeavesAsFourthJoins2()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 5, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 200 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
join( 100, 4 ).
message( 100, "*** Cluster formed, now leave" ).
leave( 0, 1 ).
join( 30, 5 ).
leave( 0, 2 )
);
}
@Ignore( "Ignore until fix available" )
@Test
public void threeNodesJoinAtSameTime()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 400 ).
join( 0, 1, 1, 2, 3 ).
join( 0, 2, 1, 2, 3 ).
join( 0, 3, 1, 2, 3 ).
message( 390, "*** Cluster formed" ));
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterMembershipTest.java
|
3,876
|
public class LearnerContextImplTest
{
@Test
public void shouldOnlyLogLearnMissOnce() throws Exception
{
// Given
TestLogging logging = new TestLogging();
LearnerContextImpl ctx = new LearnerContextImpl( new InstanceId( 1 ), mock(CommonContextState.class), logging,
mock(Timeouts.class), mock( PaxosInstanceStore.class ), mock(AcceptorInstanceStore.class), mock(
ObjectInputStreamFactory.class), mock( ObjectOutputStreamFactory.class), mock(HeartbeatContextImpl.class));
// When
ctx.notifyLearnMiss( new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( 1l ) );
ctx.notifyLearnMiss( new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( 1l ) );
ctx.notifyLearnMiss( new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( 2l ) );
ctx.notifyLearnMiss( new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( 2l ) );
ctx.notifyLearnMiss( new org.neo4j.cluster.protocol.atomicbroadcast.multipaxos.InstanceId( 1l ) );
// Then
logging.getMessagesLog( LearnerState.class ).assertExactly(
debug( "Did not have learned value for instance 1" ),
debug( "Did not have learned value for instance 2" ),
debug( "Did not have learned value for instance 1" )
);
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_LearnerContextImplTest.java
|
3,877
|
public class ClusterHeartbeatTest
extends ClusterMockTest
{
@Test
public void threeNodesJoinAndNoFailures()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 200 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
verifyConfigurations( 3000 ).
leave( 0, 1 ).
leave( 200, 2 ).
leave( 200, 3 ) );
}
@Test
public void threeNodesJoinAndThenSlaveDies()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 1000 ).
join( 100, 1 ).
join( 100, 2 ).
join( 100, 3 ).
verifyConfigurations( 3000 ).
message( 100, "*** All nodes up and ok" ).
down( 100, 3 ).
message( 1000, "*** Should have seen failure by now" ).
up( 0, 3 ).
message( 200, "*** Should have recovered by now" ).
verifyConfigurations( 0 ).
leave( 200, 1 ).
leave( 200, 2 ).
leave( 200, 3 ) );
}
@Test
public void threeNodesJoinAndThenCoordinatorDies()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 1000 ).
join( 100, 1, 1 ).
join( 100, 2, 1 ).
join( 100, 3, 1 ).
message( 3000, "*** All nodes up and ok" ).
down( 500, 1 ).
message( 1000, "*** Should have seen failure by now" ).
up( 0, 1 ).
message( 2000, "*** Should have recovered by now" ).
verifyConfigurations( 0 ).
down( 0, 2 ).
message( 1400, "*** Should have seen failure by now" ).
up( 0, 2 ).
message( 800, "*** All nodes leave" ).
verifyConfigurations( 0 ).
leave( 0, 1 ).
leave( 300, 2 ).
leave( 300, 3 ) );
}
@Test
public void threeNodesJoinAndThenCoordinatorDiesForReal()
throws URISyntaxException, ExecutionException, TimeoutException, InterruptedException
{
final Map<String, InstanceId> roles = new HashMap<String, InstanceId>();
testCluster( 3, DEFAULT_NETWORK(), new ClusterTestScriptDSL().
rounds( 1000 ).
join( 100, 1, 1 ).
join( 100, 2, 1 ).
join( 100, 3, 1 ).
message( 3000, "*** All nodes up and ok" ).
getRoles( roles ).
down( 800, 1 ).
message( 2000, "*** Should have seen failure by now" ).
verifyCoordinatorRoleSwitched( roles ).
leave( 0, 1 ).
leave( 300, 2 ).
leave( 300, 3 ) );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterHeartbeatTest.java
|
3,878
|
public class ClusterEntryDeniedException extends IllegalStateException
{
private final ConfigurationResponseState configurationResponseState;
public ClusterEntryDeniedException( InstanceId me, ConfigurationResponseState configurationResponseState )
{
super( "I was denied entry. I am " + me + ", configuration response:" + configurationResponseState );
this.configurationResponseState = configurationResponseState;
}
public ConfigurationResponseState getConfigurationResponseState()
{
return configurationResponseState;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterEntryDeniedException.java
|
3,879
|
public class ClusterConfigurationTest
{
public static URI NEO4J_SERVER1_URI;
public static InstanceId NEO4J_SERVER_ID;
static
{
try
{
NEO4J_SERVER1_URI = new URI( "neo4j://server1" );
NEO4J_SERVER_ID = new InstanceId( 1 );
}
catch ( URISyntaxException e )
{
e.printStackTrace();
}
}
ClusterConfiguration configuration = new ClusterConfiguration( "default", StringLogger.SYSTEM, new ArrayList<URI>() );
@Test
public void givenEmptyClusterWhenNodeAddedThenNodeWasAdded()
{
configuration.joined( NEO4J_SERVER_ID, NEO4J_SERVER1_URI );
assertThat( configuration.getMemberIds(), matchesIterable( Iterables.<InstanceId, InstanceId>iterable( NEO4J_SERVER_ID ) ) );
assertThat( configuration.getUriForId( NEO4J_SERVER_ID ), equalTo( NEO4J_SERVER1_URI ) );
assertThat( configuration.getMemberURIs(), equalTo( Arrays.asList( NEO4J_SERVER1_URI ) ) );
}
@Test
public void givenEmptyClusterWhenNodeIsAddedTwiceThenNodeWasAddedOnce()
{
configuration.joined( NEO4J_SERVER_ID, NEO4J_SERVER1_URI );
configuration.joined( NEO4J_SERVER_ID, NEO4J_SERVER1_URI );
assertThat( configuration.getMemberIds(), matchesIterable( Iterables.<InstanceId, InstanceId>iterable( NEO4J_SERVER_ID ) ) );
assertThat( configuration.getUriForId( NEO4J_SERVER_ID ), equalTo( NEO4J_SERVER1_URI ) );
assertThat( configuration.getMemberURIs(), equalTo( Arrays.asList( NEO4J_SERVER1_URI ) ) );
}
@Test
public void givenClusterWithOneNodeWhenNodeIsRemovedThenClusterIsEmpty()
{
configuration.joined( NEO4J_SERVER_ID, NEO4J_SERVER1_URI );
configuration.left( NEO4J_SERVER_ID );
assertThat( configuration.getMemberIds(), matchesIterable( Iterables.<InstanceId>empty() ) );
assertThat( configuration.getUriForId( NEO4J_SERVER_ID ), equalTo( null ) );
assertThat( configuration.getMemberURIs(), equalTo( Collections.<URI>emptyList() ) );
}
@Test
public void givenClusterWithOneNodeWhenNodeIsRemovedTwiceThenClusterIsEmpty()
{
configuration.joined( NEO4J_SERVER_ID, NEO4J_SERVER1_URI );
configuration.left( NEO4J_SERVER_ID );
configuration.left( NEO4J_SERVER_ID );
assertThat( configuration.getMemberIds(), matchesIterable( Iterables.<InstanceId>empty() ) );
assertThat( configuration.getUriForId( NEO4J_SERVER_ID ), equalTo( null ) );
assertThat( configuration.getMemberURIs(), equalTo( Collections.<URI>emptyList() ) );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_cluster_ClusterConfigurationTest.java
|
3,880
|
{
@Override
public boolean accept( Map.Entry<String, InstanceId> item )
{
return item.getValue().equals( node );
}
}, roles.entrySet() ) );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterConfiguration.java
|
3,881
|
{
@Override
public String apply( Map.Entry<String, InstanceId> stringURIEntry )
{
return stringURIEntry.getKey();
}
}, Iterables.filter( new Predicate<Map.Entry<String, InstanceId>>()
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterConfiguration.java
|
3,882
|
public class ClusterConfiguration
{
public static final String COORDINATOR = "coordinator";
private final String name;
private final StringLogger logger;
private final List<URI> candidateMembers;
private Map<InstanceId, URI> members;
private Map<String, InstanceId> roles = new HashMap<String, InstanceId>();
private int allowedFailures = 1;
public ClusterConfiguration( String name, StringLogger logger, String... members )
{
this.name = name;
this.logger = logger;
this.candidateMembers = new ArrayList<URI>();
for ( String node : members )
{
try
{
this.candidateMembers.add( new URI( node ) );
}
catch ( URISyntaxException e )
{
e.printStackTrace();
}
}
this.members = new HashMap<InstanceId, URI>();
}
public ClusterConfiguration( String name, StringLogger logger, Collection<URI> members )
{
this.name = name;
this.logger = logger;
this.candidateMembers = new ArrayList<URI>( members );
this.members = new HashMap<InstanceId, URI>();
}
public ClusterConfiguration( ClusterConfiguration copy )
{
this(copy, copy.logger);
}
private ClusterConfiguration( ClusterConfiguration copy, StringLogger logger )
{
this.name = copy.name;
this.logger = logger;
this.candidateMembers = new ArrayList<URI>( copy.candidateMembers );
this.roles = new HashMap<String, InstanceId>( copy.roles );
this.members = new HashMap<InstanceId, URI>( copy.members );
}
public void joined( InstanceId joinedInstanceId, URI instanceUri )
{
if ( instanceUri.equals( members.get( joinedInstanceId ) ) )
{
return; // Already know that this node is in - ignore
}
this.members = new HashMap<InstanceId, URI>( members );
members.put( joinedInstanceId, instanceUri );
}
public void left( InstanceId leftInstanceId )
{
logger.info( "Instance " + leftInstanceId + " is leaving the cluster" );
this.members = new HashMap<InstanceId, URI>( members );
members.remove( leftInstanceId );
// Remove any roles that this node had
Iterator<Map.Entry<String, InstanceId>> entries = roles.entrySet().iterator();
while ( entries.hasNext() )
{
Map.Entry<String, InstanceId> roleEntry = entries.next();
if ( roleEntry.getValue().equals( leftInstanceId ) )
{
logger.info("Removed role " + roleEntry.getValue() + " from leaving instance " + roleEntry.getKey() );
entries.remove();
}
}
}
public void elected( String name, InstanceId electedInstanceId )
{
assert members.containsKey( electedInstanceId );
roles = new HashMap<String, InstanceId>( roles );
roles.put( name, electedInstanceId );
}
public void unelected( String roleName )
{
assert roles.containsKey( roleName );
roles = new HashMap<String, InstanceId>( roles );
roles.remove( roleName );
}
public void setMembers( Map<InstanceId, URI> members )
{
this.members = new HashMap<InstanceId, URI>( members );
}
public void setRoles( Map<String, InstanceId> roles )
{
for ( InstanceId electedInstanceId : roles.values() )
{
assert members.containsKey( electedInstanceId );
}
this.roles = new HashMap<String, InstanceId>( roles );
}
public Iterable<InstanceId> getMemberIds()
{
return members.keySet();
}
public Map<InstanceId, URI> getMembers()
{
return members;
}
public List<URI> getMemberURIs()
{
return Iterables.toList( members.values() );
}
public String getName()
{
return name;
}
public Map<String, InstanceId> getRoles()
{
return roles;
}
public int getAllowedFailures()
{
return allowedFailures;
}
public void left()
{
this.members = new HashMap<InstanceId, URI>();
roles = new HashMap<String, InstanceId>();
}
public void removeElected( String roleName )
{
roles = new HashMap<String, InstanceId>( roles );
InstanceId removed = roles.remove( roleName );
logger.info( "Removed role " + roleName + " from instance " + removed );
}
public InstanceId getElected( String roleName )
{
return roles.get( roleName );
}
public Iterable<String> getRolesOf( final InstanceId node )
{
return Iterables.map( new Function<Map.Entry<String, InstanceId>, String>()
{
@Override
public String apply( Map.Entry<String, InstanceId> stringURIEntry )
{
return stringURIEntry.getKey();
}
}, Iterables.filter( new Predicate<Map.Entry<String, InstanceId>>()
{
@Override
public boolean accept( Map.Entry<String, InstanceId> item )
{
return item.getValue().equals( node );
}
}, roles.entrySet() ) );
}
public URI getUriForId( InstanceId node )
{
return members.get( node );
}
public InstanceId getIdForUri( URI fromUri )
{
for ( Map.Entry<InstanceId, URI> serverIdURIEntry : members.entrySet() )
{
if ( serverIdURIEntry.getValue().equals( fromUri ) )
{
return serverIdURIEntry.getKey();
}
}
return null;
}
public ClusterConfiguration snapshot(StringLogger logger)
{
return new ClusterConfiguration(this, logger);
}
@Override
public String toString()
{
return "Name:" + name + " Nodes:" + members + " Roles:" + roles;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ClusterConfiguration that = (ClusterConfiguration) o;
if ( allowedFailures != that.allowedFailures )
{
return false;
}
if ( !candidateMembers.equals( that.candidateMembers ) )
{
return false;
}
if ( !members.equals( that.members ) )
{
return false;
}
if ( !name.equals( that.name ) )
{
return false;
}
if ( !roles.equals( that.roles ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = name.hashCode();
result = 31 * result + candidateMembers.hashCode();
result = 31 * result + members.hashCode();
result = 31 * result + roles.hashCode();
result = 31 * result + allowedFailures;
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_cluster_ClusterConfiguration.java
|
3,883
|
class ProposerContextImpl
extends AbstractContextImpl
implements ProposerContext
{
public static final int MAX_CONCURRENT_INSTANCES = 10;
// ProposerContext
private final Deque<Message> pendingValues;
private final Map<InstanceId, Message> bookedInstances;
private final PaxosInstanceStore paxosInstances;
ProposerContextImpl( org.neo4j.cluster.InstanceId me, CommonContextState commonState,
Logging logging,
Timeouts timeouts, PaxosInstanceStore paxosInstances )
{
super( me, commonState, logging, timeouts );
this.paxosInstances = paxosInstances;
pendingValues = new LinkedList<>( );
bookedInstances = new HashMap<>();
}
private ProposerContextImpl( org.neo4j.cluster.InstanceId me, CommonContextState commonState, Logging logging,
Timeouts timeouts, Deque<Message> pendingValues,
Map<InstanceId, Message> bookedInstances, PaxosInstanceStore paxosInstances )
{
super( me, commonState, logging, timeouts );
this.pendingValues = pendingValues;
this.bookedInstances = bookedInstances;
this.paxosInstances = paxosInstances;
}
@Override
public InstanceId newInstanceId()
{
// Never propose something lower than last received instance id
if ( commonState.lastKnownLearnedInstanceInCluster() >= commonState.nextInstanceId() )
{
commonState.setNextInstanceId( commonState.lastKnownLearnedInstanceInCluster() + 1 );
}
return new InstanceId( commonState.getAndIncrementInstanceId() );
}
@Override
public void leave()
{
pendingValues.clear();
bookedInstances.clear();
commonState.setNextInstanceId( 0 );
paxosInstances.leave();
}
@Override
public void bookInstance( InstanceId instanceId, Message message )
{
if ( message.getPayload() == null )
{
throw new IllegalArgumentException( "null payload for booking instance: " + message );
}
bookedInstances.put( instanceId, message );
}
@Override
public PaxosInstance getPaxosInstance( InstanceId instanceId )
{
return paxosInstances.getPaxosInstance( instanceId );
}
@Override
public void pendingValue( Message message )
{
pendingValues.offerFirst( message );
}
@Override
public boolean hasPendingValues()
{
return !pendingValues.isEmpty();
}
@Override
public Message popPendingValue()
{
return pendingValues.remove();
}
@Override
public boolean canBookInstance()
{
return bookedInstances.size() < MAX_CONCURRENT_INSTANCES;
}
@Override
public Message getBookedInstance( InstanceId id )
{
return bookedInstances.get( id );
}
@Override
public Message<ProposerMessage> unbookInstance( InstanceId id )
{
return bookedInstances.remove( id );
}
@Override
public int nrOfBookedInstances()
{
return bookedInstances.size();
}
@Override
public int getMinimumQuorumSize( List<URI> acceptors )
{
// n >= 2f+1
if ( acceptors.size() >= 2 * commonState.configuration().getAllowedFailures() + 1 )
{
return acceptors.size() - commonState.configuration().getAllowedFailures();
}
else
{
return acceptors.size();
}
}
/**
* This patches the booked instances that are pending in case the configuration of the cluster changes. This
* should be called only when we learn a ConfigurationChangeState i.e. when we receive an accepted for
* such a message. This won't "learn" the message, as in applying it on the cluster configuration, but will
* just update properly the set of acceptors for pending instances.
*/
@Override
public void patchBookedInstances( ClusterMessage.ConfigurationChangeState value )
{
if ( value.getJoin() != null )
{
for ( InstanceId instanceId : bookedInstances.keySet() )
{
PaxosInstance instance = paxosInstances.getPaxosInstance( instanceId );
if ( instance.getAcceptors() != null )
{
instance.getAcceptors().remove( commonState.configuration().getMembers().get( value.getJoin()));
getLogger( ProposerContext.class ).debug( "For booked instance " + instance +
" removed gone member "
+ commonState.configuration().getMembers().get( value.getJoin() )
+ " added joining member " +
value.getJoinUri() );
if ( !instance.getAcceptors().contains( value.getJoinUri() ) )
{
instance.getAcceptors().add( value.getJoinUri() );
}
}
}
}
else if ( value.getLeave() != null )
{
for ( InstanceId instanceId : bookedInstances.keySet() )
{
PaxosInstance instance = paxosInstances.getPaxosInstance( instanceId );
if ( instance.getAcceptors() != null )
{
getLogger( ProposerContext.class ).debug( "For booked instance " + instance +
" removed leaving member "
+ value.getLeave() + " (at URI " +
commonState.configuration().getMembers().get( value.getLeave() )
+ ")" );
instance.getAcceptors().remove( commonState.configuration().getMembers().get(value.getLeave()));
}
}
}
}
public ProposerContextImpl snapshot( CommonContextState commonStateSnapshot, Logging logging, Timeouts timeouts,
PaxosInstanceStore paxosInstancesSnapshot )
{
return new ProposerContextImpl( me, commonStateSnapshot, logging, timeouts, new LinkedList<>( pendingValues ),
new HashMap<>(bookedInstances), paxosInstancesSnapshot );
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
ProposerContextImpl that = (ProposerContextImpl) o;
if ( bookedInstances != null ? !bookedInstances.equals( that.bookedInstances ) : that.bookedInstances != null )
{
return false;
}
if ( paxosInstances != null ? !paxosInstances.equals( that.paxosInstances ) : that.paxosInstances != null )
{
return false;
}
if ( pendingValues != null ? !pendingValues.equals( that.pendingValues ) : that.pendingValues != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = pendingValues != null ? pendingValues.hashCode() : 0;
result = 31 * result + (bookedInstances != null ? bookedInstances.hashCode() : 0);
result = 31 * result + (paxosInstances != null ? paxosInstances.hashCode() : 0);
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_ProposerContextImpl.java
|
3,884
|
public class MultiPaxosContext
{
private final ClusterContextImpl clusterContext;
private final ProposerContextImpl proposerContext;
private final AcceptorContextImpl acceptorContext;
private final LearnerContextImpl learnerContext;
private final HeartbeatContextImpl heartbeatContext;
private final ElectionContextImpl electionContext;
private final AtomicBroadcastContextImpl atomicBroadcastContext;
private final CommonContextState commonState;
private final PaxosInstanceStore paxosInstances;
public MultiPaxosContext( InstanceId me,
Iterable<ElectionRole> roles,
ClusterConfiguration configuration,
Executor executor,
Logging logging,
ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory,
AcceptorInstanceStore instanceStore,
Timeouts timeouts,
ElectionCredentialsProvider electionCredentialsProvider )
{
commonState = new CommonContextState(configuration);
paxosInstances = new PaxosInstanceStore();
heartbeatContext = new HeartbeatContextImpl(me, commonState, logging, timeouts, executor );
learnerContext = new LearnerContextImpl(me, commonState, logging, timeouts, paxosInstances, instanceStore, objectInputStreamFactory, objectOutputStreamFactory, heartbeatContext );
clusterContext = new ClusterContextImpl(me, commonState, logging, timeouts, executor, objectOutputStreamFactory, objectInputStreamFactory, learnerContext, heartbeatContext);
electionContext = new ElectionContextImpl( me, commonState, logging, timeouts, roles, clusterContext, heartbeatContext, electionCredentialsProvider );
proposerContext = new ProposerContextImpl(me, commonState, logging, timeouts, paxosInstances );
acceptorContext = new AcceptorContextImpl(me, commonState, logging, timeouts, instanceStore);
atomicBroadcastContext = new AtomicBroadcastContextImpl(me, commonState, logging, timeouts, executor);
heartbeatContext.setCircularDependencies( clusterContext, learnerContext );
}
private MultiPaxosContext( ProposerContextImpl proposerContext, AcceptorContextImpl acceptorContext,
LearnerContextImpl learnerContext, HeartbeatContextImpl heartbeatContext,
ElectionContextImpl electionContext, AtomicBroadcastContextImpl atomicBroadcastContext,
CommonContextState commonState, PaxosInstanceStore paxosInstances,
ClusterContextImpl clusterContext )
{
this.clusterContext = clusterContext;
this.proposerContext = proposerContext;
this.acceptorContext = acceptorContext;
this.learnerContext = learnerContext;
this.heartbeatContext = heartbeatContext;
this.electionContext = electionContext;
this.atomicBroadcastContext = atomicBroadcastContext;
this.commonState = commonState;
this.paxosInstances = paxosInstances;
}
public ClusterContext getClusterContext()
{
return clusterContext;
}
public ProposerContext getProposerContext()
{
return proposerContext;
}
public AcceptorContext getAcceptorContext()
{
return acceptorContext;
}
public LearnerContext getLearnerContext()
{
return learnerContext;
}
public HeartbeatContext getHeartbeatContext()
{
return heartbeatContext;
}
public ElectionContext getElectionContext()
{
return electionContext;
}
public AtomicBroadcastContextImpl getAtomicBroadcastContext()
{
return atomicBroadcastContext;
}
/** Create a state snapshot. The snapshot will not duplicate services, and expects the caller to duplicate
* {@link AcceptorInstanceStore}, since that is externally provided. */
public MultiPaxosContext snapshot(Logging logging, Timeouts timeouts, Executor executor,
AcceptorInstanceStore instanceStore,
ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory,
ElectionCredentialsProvider electionCredentialsProvider)
{
CommonContextState commonStateSnapshot = commonState.snapshot(logging.getMessagesLog( ClusterConfiguration.class ) );
PaxosInstanceStore paxosInstancesSnapshot = paxosInstances.snapshot();
HeartbeatContextImpl snapshotHeartbeatContext =
heartbeatContext.snapshot( commonStateSnapshot, logging, timeouts, executor );
LearnerContextImpl snapshotLearnerContext =
learnerContext.snapshot( commonStateSnapshot, logging, timeouts, paxosInstancesSnapshot, instanceStore,
objectInputStreamFactory, objectOutputStreamFactory, snapshotHeartbeatContext );
ClusterContextImpl snapshotClusterContext =
clusterContext.snapshot( commonStateSnapshot, logging, timeouts, executor, objectOutputStreamFactory,
objectInputStreamFactory, snapshotLearnerContext, snapshotHeartbeatContext );
ElectionContextImpl snapshotElectionContext =
electionContext.snapshot( commonStateSnapshot, logging, timeouts, snapshotClusterContext,
snapshotHeartbeatContext, electionCredentialsProvider );
ProposerContextImpl snapshotProposerContext =
proposerContext.snapshot( commonStateSnapshot, logging, timeouts, paxosInstancesSnapshot );
AcceptorContextImpl snapshotAcceptorContext =
acceptorContext.snapshot( commonStateSnapshot, logging, timeouts, instanceStore );
AtomicBroadcastContextImpl snapshotAtomicBroadcastContext =
atomicBroadcastContext.snapshot( commonStateSnapshot, logging, timeouts, executor );
snapshotHeartbeatContext.setCircularDependencies( snapshotClusterContext, snapshotLearnerContext );
return new MultiPaxosContext( snapshotProposerContext, snapshotAcceptorContext, snapshotLearnerContext,
snapshotHeartbeatContext, snapshotElectionContext, snapshotAtomicBroadcastContext, commonStateSnapshot,
paxosInstancesSnapshot, snapshotClusterContext
);
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
MultiPaxosContext that = (MultiPaxosContext) o;
if ( !acceptorContext.equals( that.acceptorContext ) )
{
return false;
}
if ( !atomicBroadcastContext.equals( that.atomicBroadcastContext ) )
{
return false;
}
if ( !clusterContext.equals( that.clusterContext ) )
{
return false;
}
if ( !commonState.equals( that.commonState ) )
{
return false;
}
if ( !electionContext.equals( that.electionContext ) )
{
return false;
}
if ( !heartbeatContext.equals( that.heartbeatContext ) )
{
return false;
}
if ( !learnerContext.equals( that.learnerContext ) )
{
return false;
}
if ( !paxosInstances.equals( that.paxosInstances ) )
{
return false;
}
if ( !proposerContext.equals( that.proposerContext ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = clusterContext.hashCode();
result = 31 * result + proposerContext.hashCode();
result = 31 * result + acceptorContext.hashCode();
result = 31 * result + learnerContext.hashCode();
result = 31 * result + heartbeatContext.hashCode();
result = 31 * result + electionContext.hashCode();
result = 31 * result + atomicBroadcastContext.hashCode();
result = 31 * result + commonState.hashCode();
result = 31 * result + paxosInstances.hashCode();
return result;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_atomicbroadcast_multipaxos_context_MultiPaxosContext.java
|
3,885
|
{
@Override
public int defaultPort()
{
return 5001;
}
@Override
public int port()
{
return conf.get( ClusterSettings.cluster_server ).getPort();
}
}, networkReceiver, new DevNullLoggingService()));
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_com_message_NetworkSenderReceiverTest.java
|
3,886
|
private static class Server
implements Lifecycle, MessageProcessor
{
private final NetworkReceiver networkReceiver;
private final NetworkSender networkSender;
private final LifeSupport life = new LifeSupport();
private AtomicBoolean processedMessage = new AtomicBoolean();
private Server( final CountDownLatch latch, final Map<String, String> config )
{
final Config conf = new Config( config, ClusterSettings.class );
networkReceiver = life.add(new NetworkReceiver(new NetworkReceiver.Configuration()
{
@Override
public HostnamePort clusterServer()
{
return conf.get( ClusterSettings.cluster_server );
}
@Override
public int defaultPort()
{
return 5001;
}
@Override
public String name()
{
return null;
}
}, new DevNullLoggingService()));
networkSender = life.add(new NetworkSender(new NetworkSender.Configuration()
{
@Override
public int defaultPort()
{
return 5001;
}
@Override
public int port()
{
return conf.get( ClusterSettings.cluster_server ).getPort();
}
}, networkReceiver, new DevNullLoggingService()));
life.add( new LifecycleAdapter()
{
@Override
public void start() throws Throwable
{
networkReceiver.addMessageProcessor( new MessageProcessor()
{
@Override
public boolean process( Message<? extends MessageType> message )
{
// server receives a message
processedMessage.set(true);
latch.countDown();
return true;
}
} );
}
} );
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
life.start();
}
@Override
public void stop() throws Throwable
{
life.stop();
}
@Override
public void shutdown() throws Throwable
{
}
@Override
public boolean process( Message<? extends MessageType> message )
{
// server sends a message
this.processedMessage.set(true);
return networkSender.process( message );
}
public boolean processedMessage()
{
return this.processedMessage.get();
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_com_message_NetworkSenderReceiverTest.java
|
3,887
|
public class HeartbeatContextTest
{
private static InstanceId[] instanceIds = new InstanceId[]{
new InstanceId( 1 ),
new InstanceId( 2 ),
new InstanceId( 3 )
};
private static String[] initialHosts = new String[]{
"cluster://localhost:5001",
"cluster://localhost:5002",
"cluster://localhost:5003"
};
private HeartbeatContext toTest;
private ClusterContext context;
@Before
public void setup()
{
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>( );
for ( int i = 0; i < instanceIds.length; i++ )
{
members.put( instanceIds[i], URI.create( initialHosts[i] ) );
}
ClusterConfiguration config = new ClusterConfiguration( "clusterName", StringLogger.DEV_NULL, initialHosts );
config.setMembers( members );
context = mock( ClusterContext.class );
when( context.getConfiguration() ).thenReturn( config );
when( context.getMyId() ).thenReturn( instanceIds[0] );
Logging logging = Mockito.mock( Logging.class );
when( logging.getMessagesLog( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class) );
MultiPaxosContext context = new MultiPaxosContext( instanceIds[0], Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), config,
Mockito.mock( Executor.class ), logging,
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
toTest = context.getHeartbeatContext();
}
@Test
public void testSaneInitialState()
{
// In config, not suspected yet
assertFalse( toTest.alive( instanceIds[0] ) );
// Not in config
assertFalse( toTest.alive( new InstanceId( 4 ) ) );
// By default, instances start off as alive
assertEquals( instanceIds.length, Iterables.count( toTest.getAlive() ) );
assertEquals( 0, toTest.getFailed().size() );
for ( InstanceId initialHost : instanceIds )
{
assertFalse( toTest.isFailed( initialHost ) );
}
}
@Test
public void testSuspicions()
{
InstanceId suspect = instanceIds[1];
toTest.suspect( suspect );
assertEquals( Collections.singleton( suspect ), toTest.getSuspicionsFor( context.getMyId() ) );
assertEquals( Collections.singletonList( context.getMyId() ), toTest.getSuspicionsOf( suspect ) );
// Being suspected by just one (us) is not enough
assertFalse( toTest.isFailed( suspect ) );
assertTrue( toTest.alive( suspect ) ); // This resets the suspicion above
// If we suspect an instance twice in a row, it shouldn't change its status in any way.
toTest.suspect( suspect );
toTest.suspect( suspect );
assertEquals( Collections.singleton( suspect ), toTest.getSuspicionsFor( context.getMyId() ) );
assertEquals( Collections.singletonList( context.getMyId() ), toTest.getSuspicionsOf( suspect ) );
assertFalse( toTest.isFailed( suspect ) );
assertTrue( toTest.alive( suspect ) );
// The other one sends suspicions too
InstanceId newSuspiciousBastard = instanceIds[2];
toTest.suspicions( newSuspiciousBastard, Collections.singleton( suspect ) );
toTest.suspect( suspect );
// Now two instances suspect it, it should be reported failed
assertEquals( Collections.singleton( suspect ), toTest.getSuspicionsFor( context.getMyId() ) );
assertEquals( Collections.singleton( suspect ), toTest.getSuspicionsFor( newSuspiciousBastard ) );
List<InstanceId> suspiciousBastards = new ArrayList<InstanceId>( 2 );
suspiciousBastards.add( context.getMyId() );
suspiciousBastards.add( newSuspiciousBastard );
assertEquals( suspiciousBastards, toTest.getSuspicionsOf( suspect ) );
assertTrue( toTest.isFailed( suspect ) );
assertTrue( toTest.alive( suspect ) );
}
@Test
public void testFailedInstanceBecomingAlive()
{
InstanceId suspect = instanceIds[1];
InstanceId newSuspiciousBastard = instanceIds[2];
toTest.suspicions( newSuspiciousBastard, Collections.singleton( suspect ) );
toTest.suspect( suspect );
// Just make sure
assertTrue( toTest.isFailed( suspect ) );
// Ok, here it is. We received a heartbeat, so it is alive.
toTest.alive( suspect );
// It must no longer be failed
assertFalse( toTest.isFailed( suspect ) );
// Simulate us stopping receiving heartbeats again
toTest.suspect( suspect );
assertTrue( toTest.isFailed( suspect ) );
// Assume the other guy started receiving heartbeats first
toTest.suspicions( newSuspiciousBastard, Collections.<InstanceId>emptySet() );
assertFalse( toTest.isFailed( suspect ) );
}
/**
* Tests the following scenario:
* Instance A (the one this test simulates) sees instance C down. B agrees.
* Instance A sees instance B down.
* Instance C starts responding again.
* Instance A should now consider C alive.
*/
@Test
public void testOneInstanceComesAliveAfterAllOtherFail()
{
InstanceId instanceB = instanceIds[1];
InstanceId instanceC = instanceIds[2];
// Both A and B consider C down
toTest.suspect( instanceC );
toTest.suspicions( instanceB, Collections.singleton( instanceC ) );
assertTrue( toTest.isFailed( instanceC ) );
// A sees B as down
toTest.suspect( instanceB );
assertTrue( toTest.isFailed( instanceB ) );
// C starts responding again
assertTrue( toTest.alive( instanceC ) );
assertFalse( toTest.isFailed( instanceC ) );
}
@Test
public void shouldConsultSuspicionsOnlyFromCurrentClusterMembers() throws Exception
{
// Given
InstanceId notInCluster = new InstanceId( -1 ); // backup, for example
toTest.suspicions( notInCluster, Iterables.toSet( Iterables.<InstanceId, InstanceId>iterable( instanceIds[1] ) ) );
// When
List<InstanceId> suspicions = toTest.getSuspicionsOf ( instanceIds[1] );
// Then
assertThat( suspicions.size(), CoreMatchers.equalTo( 0 ) );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatContextTest.java
|
3,888
|
class BackupServer extends Server<TheBackupInterface, Object>
{
static final byte PROTOCOL_VERSION = 1;
private final BackupRequestType[] contexts = BackupRequestType.values();
static int DEFAULT_PORT = 6362;
static final int FRAME_LENGTH = Protocol.MEGA * 4;
public BackupServer( TheBackupInterface requestTarget, final HostnamePort server,
Logging logging, Monitors monitors ) throws IOException
{
super( requestTarget, new Configuration()
{
@Override
public long getOldChannelThreshold()
{
return Client.DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS * 1000;
}
@Override
public int getMaxConcurrentTransactions()
{
return 3;
}
@Override
public int getChunkSize()
{
return FRAME_LENGTH;
}
@Override
public HostnamePort getServerAddress()
{
return server;
}
}, logging, FRAME_LENGTH, PROTOCOL_VERSION,
TxChecksumVerifier.ALWAYS_MATCH, SYSTEM_CLOCK, monitors );
}
@Override
protected void responseWritten( RequestType<TheBackupInterface> type, Channel channel,
RequestContext context )
{
}
@Override
protected RequestType<TheBackupInterface> getRequestContext( byte id )
{
return contexts[id];
}
@Override
protected void finishOffChannel( Channel channel, RequestContext context )
{
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupServer.java
|
3,889
|
public abstract class BackupExtensionService extends Service
{
public BackupExtensionService( String name )
{
super( name );
}
/**
* The source specific target to valid backup host translation method.
*
* @param from The URI as passed in the command line
* @param arguments all arguments to the backup command
* @return A URI where the scheme is the service's name and there exist host
* and port parts that point to a backup source.
*/
public abstract URI resolve( URI from, Args arguments, Logging logging );
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupExtensionService.java
|
3,890
|
public class BackupEmbeddedIT
{
public static final File PATH = TargetDirectory.forTest( BackupEmbeddedIT.class ).cleanDirectory( "db" );
public static final File BACKUP_PATH = TargetDirectory.forTest( BackupEmbeddedIT.class ).cleanDirectory( "backup" +
"-db" );
private GraphDatabaseService db;
private String ip;
@Before
public void before() throws Exception
{
if ( osIsWindows() ) return;
FileUtils.deleteDirectory( PATH );
FileUtils.deleteDirectory( BACKUP_PATH );
ip = InetAddress.getLocalHost().getHostAddress();
}
@SuppressWarnings("deprecation")
public static DbRepresentation createSomeData( GraphDatabaseService db )
{
Transaction tx = db.beginTx();
Node node = db.createNode();
node.setProperty( "name", "Neo" );
db.createNode().createRelationshipTo( node, DynamicRelationshipType.withName( "KNOWS" ) );
tx.success();
tx.finish();
return DbRepresentation.of( db );
}
@After
public void after()
{
if ( osIsWindows() ) return;
db.shutdown();
}
@Test
public void makeSureBackupCanBePerformedWithDefaultPort() throws Exception
{
if ( osIsWindows() ) return;
startDb( null );
assertEquals(
0,
runBackupToolFromOtherJvmToGetExitCode( "-from",
BackupTool.DEFAULT_SCHEME + "://"+ ip, "-to",
BACKUP_PATH.getPath() ) );
assertEquals( DbRepresentation.of( db ), DbRepresentation.of( BACKUP_PATH ) );
createSomeData( db );
assertEquals(
0,
runBackupToolFromOtherJvmToGetExitCode( "-from", BackupTool.DEFAULT_SCHEME + "://"+ ip,
"-to", BACKUP_PATH.getPath() ) );
assertEquals( DbRepresentation.of( db ), DbRepresentation.of( BACKUP_PATH ) );
}
@Test
public void makeSureBackupCanBePerformedWithCustomPort() throws Exception
{
if ( osIsWindows() ) return;
int port = 4445;
startDb( "" + port );
assertEquals(
1,
runBackupToolFromOtherJvmToGetExitCode( "-from",
BackupTool.DEFAULT_SCHEME + "://" + ip, "-to",
BACKUP_PATH.getPath() ) );
assertEquals(
0,
runBackupToolFromOtherJvmToGetExitCode( "-from",
BackupTool.DEFAULT_SCHEME + "://"+ ip +":" + port,
"-to", BACKUP_PATH.getPath() ) );
assertEquals( DbRepresentation.of( db ), DbRepresentation.of( BACKUP_PATH ) );
createSomeData( db );
assertEquals(
0,
runBackupToolFromOtherJvmToGetExitCode( "-from", BackupTool.DEFAULT_SCHEME + "://"+ ip +":"
+ port, "-to",
BACKUP_PATH.getPath() ) );
assertEquals( DbRepresentation.of( db ), DbRepresentation.of( BACKUP_PATH ) );
}
private void startDb( String backupPort )
{
GraphDatabaseBuilder dbBuild = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( PATH
.getPath() ).
setConfig( OnlineBackupSettings.online_backup_enabled, Settings.TRUE );
if(backupPort != null)
{
dbBuild = dbBuild.setConfig( OnlineBackupSettings.online_backup_server, ip +":" + backupPort );
}
db = dbBuild.newGraphDatabase();
createSomeData( db );
}
public static int runBackupToolFromOtherJvmToGetExitCode( String... args )
throws Exception
{
List<String> allArgs = new ArrayList<String>( Arrays.asList( "java", "-cp", System.getProperty( "java.class.path" ), BackupTool.class.getName() ) );
allArgs.addAll( Arrays.asList( args ) );
Process process = Runtime.getRuntime().exec( allArgs.toArray( new String[allArgs.size()] ));
return new ProcessStreamHandler( process, false ).waitForResult();
}
}
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_BackupEmbeddedIT.java
|
3,891
|
{
public Response<Void> call( TheBackupInterface master, RequestContext context,
ChannelBuffer input, ChannelBuffer target )
{
return master.incrementalBackup( context );
}
}, Protocol.VOID_SERIALIZER )
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupClient.java
|
3,892
|
{
public Response<Void> call( TheBackupInterface master, RequestContext context,
ChannelBuffer input, ChannelBuffer target )
{
return master.fullBackup( new ToNetworkStoreWriter( target, new Monitors() ) );
}
}, Protocol.VOID_SERIALIZER ),
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupClient.java
|
3,893
|
class BackupClient extends Client<TheBackupInterface> implements TheBackupInterface
{
public BackupClient( String hostNameOrIp, int port, Logging logging, Monitors monitors, StoreId storeId )
{
super( hostNameOrIp, port, logging, monitors, storeId, FRAME_LENGTH, PROTOCOL_VERSION, 40 * 1000,
Client.DEFAULT_MAX_NUMBER_OF_CONCURRENT_CHANNELS_PER_CLIENT,
FRAME_LENGTH );
}
public Response<Void> fullBackup( StoreWriter storeWriter )
{
return sendRequest( BackupRequestType.FULL_BACKUP, RequestContext.EMPTY,
Protocol.EMPTY_SERIALIZER, new Protocol.FileStreamsDeserializer( storeWriter ) );
}
public Response<Void> incrementalBackup( RequestContext context )
{
return sendRequest( BackupRequestType.INCREMENTAL_BACKUP, context, Protocol.EMPTY_SERIALIZER,
Protocol.VOID_DESERIALIZER );
}
@Override
protected boolean shouldCheckStoreId( RequestType<TheBackupInterface> type )
{
return type != BackupRequestType.FULL_BACKUP;
}
public static enum BackupRequestType implements RequestType<TheBackupInterface>
{
FULL_BACKUP( new TargetCaller<TheBackupInterface, Void>()
{
public Response<Void> call( TheBackupInterface master, RequestContext context,
ChannelBuffer input, ChannelBuffer target )
{
return master.fullBackup( new ToNetworkStoreWriter( target, new Monitors() ) );
}
}, Protocol.VOID_SERIALIZER ),
INCREMENTAL_BACKUP( new TargetCaller<TheBackupInterface, Void>()
{
public Response<Void> call( TheBackupInterface master, RequestContext context,
ChannelBuffer input, ChannelBuffer target )
{
return master.incrementalBackup( context );
}
}, Protocol.VOID_SERIALIZER )
;
@SuppressWarnings( "rawtypes" )
private final TargetCaller masterCaller;
@SuppressWarnings( "rawtypes" )
private final ObjectSerializer serializer;
@SuppressWarnings( "rawtypes" )
private BackupRequestType( TargetCaller masterCaller, ObjectSerializer serializer )
{
this.masterCaller = masterCaller;
this.serializer = serializer;
}
@SuppressWarnings( "rawtypes" )
public TargetCaller getTargetCaller()
{
return masterCaller;
}
@SuppressWarnings( "rawtypes" )
public ObjectSerializer getObjectSerializer()
{
return serializer;
}
public byte id()
{
return (byte) ordinal();
}
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupClient.java
|
3,894
|
@SuppressWarnings( "unchecked" )
public class AdversarialWriter extends Writer
{
private final Writer writer;
private final Adversary adversary;
public AdversarialWriter( Writer writer, Adversary adversary )
{
this.writer = writer;
this.adversary = adversary;
}
@Override
public void write( int c ) throws IOException
{
adversary.injectFailure( IOException.class );
writer.write( c );
}
@Override
public void write( char[] cbuf ) throws IOException
{
adversary.injectFailure( IOException.class );
writer.write( cbuf );
}
@Override
public void write( char[] cbuf, int off, int len ) throws IOException
{
writer.write( cbuf, off, len );
}
@Override
public void write( String str ) throws IOException
{
adversary.injectFailure(
StringIndexOutOfBoundsException.class, IOException.class,
IndexOutOfBoundsException.class, ArrayStoreException.class,
NullPointerException.class );
writer.write( str );
}
@Override
public void write( String str, int off, int len ) throws IOException
{
adversary.injectFailure(
StringIndexOutOfBoundsException.class, IOException.class,
IndexOutOfBoundsException.class, ArrayStoreException.class,
NullPointerException.class );
writer.write( str, off, len );
}
@Override
public Writer append( CharSequence csq ) throws IOException
{
adversary.injectFailure(
StringIndexOutOfBoundsException.class, IOException.class,
IndexOutOfBoundsException.class, ArrayStoreException.class,
NullPointerException.class );
return writer.append( csq );
}
@Override
public Writer append( CharSequence csq, int start, int end ) throws IOException
{
adversary.injectFailure(
StringIndexOutOfBoundsException.class, IOException.class,
IndexOutOfBoundsException.class, ArrayStoreException.class,
NullPointerException.class );
return writer.append( csq, start, end );
}
@Override
public Writer append( char c ) throws IOException
{
adversary.injectFailure( IOException.class );
return writer.append( c );
}
@Override
public void flush() throws IOException
{
adversary.injectFailure( IOException.class );
writer.flush();
}
@Override
public void close() throws IOException
{
adversary.injectFailure( IOException.class );
writer.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_fs_AdversarialWriter.java
|
3,895
|
@SuppressWarnings( "unchecked" )
public class AdversarialReader extends Reader
{
private final Reader reader;
private final Adversary adversary;
public AdversarialReader( Reader reader, Adversary adversary )
{
this.reader = reader;
this.adversary = adversary;
}
@Override
public int read( CharBuffer target ) throws IOException
{
if ( adversary.injectFailureOrMischief(
IOException.class, BufferOverflowException.class, IndexOutOfBoundsException.class ) )
{
CharBuffer dup = target.duplicate();
dup.limit( Math.max( target.limit() / 2, 1 ) );
return reader.read( dup );
}
return reader.read( target );
}
@Override
public int read() throws IOException
{
adversary.injectFailure( IOException.class );
return reader.read();
}
@Override
public int read( char[] cbuf ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class ) )
{
char[] dup = new char[ Math.max( cbuf.length / 2, 1 ) ];
int read = reader.read( dup );
System.arraycopy( dup, 0, cbuf, 0, read );
return read;
}
return reader.read( cbuf );
}
@Override
public int read( char[] cbuf, int off, int len ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class ) )
{
return reader.read( cbuf, off, Math.max( len / 2, 1 ) );
}
return reader.read( cbuf, off, len );
}
@Override
public long skip( long n ) throws IOException
{
adversary.injectFailure( IllegalArgumentException.class, IOException.class );
return reader.skip( n );
}
@Override
public boolean ready() throws IOException
{
adversary.injectFailure( IOException.class );
return reader.ready();
}
@Override
public boolean markSupported()
{
adversary.injectFailure();
return reader.markSupported();
}
@Override
public void mark( int readAheadLimit ) throws IOException
{
adversary.injectFailure( IOException.class );
reader.mark( readAheadLimit );
}
@Override
public void reset() throws IOException
{
adversary.injectFailure( IOException.class );
reader.reset();
}
@Override
public void close() throws IOException
{
adversary.injectFailure( IOException.class );
reader.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_fs_AdversarialReader.java
|
3,896
|
@SuppressWarnings( "unchecked" )
public class AdversarialOutputStream extends OutputStream
{
private final OutputStream outputStream;
private final Adversary adversary;
public AdversarialOutputStream( OutputStream outputStream, Adversary adversary )
{
this.outputStream = outputStream;
this.adversary = adversary;
}
@Override
public void write( int b ) throws IOException
{
adversary.injectFailure( IOException.class );
outputStream.write( b );
}
@Override
public void write( byte[] b ) throws IOException
{
adversary.injectFailure( NullPointerException.class, IndexOutOfBoundsException.class, IOException.class );
outputStream.write( b );
}
@Override
public void write( byte[] b, int off, int len ) throws IOException
{
adversary.injectFailure( NullPointerException.class, IndexOutOfBoundsException.class, IOException.class );
outputStream.write( b, off, len );
}
@Override
public void flush() throws IOException
{
adversary.injectFailure( IOException.class );
outputStream.flush();
}
@Override
public void close() throws IOException
{
adversary.injectFailure( IOException.class );
outputStream.close();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_fs_AdversarialOutputStream.java
|
3,897
|
@SuppressWarnings( "unchecked" )
public class AdversarialInputStream extends InputStream
{
private final InputStream inputStream;
private final Adversary adversary;
public AdversarialInputStream( InputStream inputStream, Adversary adversary )
{
this.inputStream = inputStream;
this.adversary = adversary;
}
@Override
public int read() throws IOException
{
adversary.injectFailure( IOException.class );
return inputStream.read();
}
@Override
public int read( byte[] b ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class, NullPointerException.class ) )
{
byte[] dup = new byte[Math.max( b.length / 2, 1 )];
int read = inputStream.read( dup );
System.arraycopy( dup, 0, b, 0, read );
return read;
}
return inputStream.read( b );
}
@Override
public int read( byte[] b, int off, int len ) throws IOException
{
if ( adversary.injectFailureOrMischief(
IOException.class, NullPointerException.class, IndexOutOfBoundsException.class ) )
{
int halflen = Math.max( len / 2, 1 );
return inputStream.read( b, off, halflen );
}
return inputStream.read( b, off, len );
}
@Override
public long skip( long n ) throws IOException
{
adversary.injectFailure( IOException.class, NullPointerException.class, IndexOutOfBoundsException.class );
return inputStream.skip( n );
}
@Override
public int available() throws IOException
{
adversary.injectFailure( IOException.class );
return inputStream.available();
}
@Override
public void close() throws IOException
{
adversary.injectFailure( IOException.class );
inputStream.close();
}
@Override
public void mark( int readlimit )
{
adversary.injectFailure();
inputStream.mark( readlimit );
}
@Override
public void reset() throws IOException
{
adversary.injectFailure( IOException.class );
inputStream.reset();
}
@Override
public boolean markSupported()
{
adversary.injectFailure();
return inputStream.markSupported();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_fs_AdversarialInputStream.java
|
3,898
|
@SuppressWarnings("unchecked")
public class AdversarialFileSystemAbstraction implements FileSystemAbstraction
{
private final DefaultFileSystemAbstraction delegate;
private final Adversary adversary;
public AdversarialFileSystemAbstraction( Adversary adversary )
{
this.adversary = adversary;
delegate = new DefaultFileSystemAbstraction();
}
public StoreChannel open( File fileName, String mode ) throws IOException
{
adversary.injectFailure( FileNotFoundException.class, IOException.class, SecurityException.class );
return new AdversarialFileChannel( delegate.open( fileName, mode ), adversary );
}
public boolean renameFile( File from, File to ) throws IOException
{
adversary.injectFailure( FileNotFoundException.class, SecurityException.class );
return delegate.renameFile( from, to );
}
public OutputStream openAsOutputStream( File fileName, boolean append ) throws IOException
{
adversary.injectFailure( FileNotFoundException.class, SecurityException.class );
return new AdversarialOutputStream( delegate.openAsOutputStream( fileName, append ), adversary );
}
public StoreChannel create( File fileName ) throws IOException
{
adversary.injectFailure( FileNotFoundException.class, IOException.class, SecurityException.class );
return new AdversarialFileChannel( delegate.create( fileName ), adversary );
}
public boolean mkdir( File fileName )
{
adversary.injectFailure( SecurityException.class );
return delegate.mkdir( fileName );
}
public File[] listFiles( File directory )
{
adversary.injectFailure( SecurityException.class );
return delegate.listFiles( directory );
}
public Writer openAsWriter( File fileName, String encoding, boolean append ) throws IOException
{
adversary.injectFailure(
UnsupportedEncodingException.class, FileNotFoundException.class, SecurityException.class );
return new AdversarialWriter( delegate.openAsWriter( fileName, encoding, append ), adversary );
}
public Reader openAsReader( File fileName, String encoding ) throws IOException
{
adversary.injectFailure(
UnsupportedEncodingException.class, FileNotFoundException.class, SecurityException.class );
return new AdversarialReader( delegate.openAsReader( fileName, encoding ), adversary );
}
public long getFileSize( File fileName )
{
adversary.injectFailure( SecurityException.class );
return delegate.getFileSize( fileName );
}
public void copyFile( File from, File to ) throws IOException
{
adversary.injectFailure( SecurityException.class, FileNotFoundException.class, IOException.class );
delegate.copyFile( from, to );
}
public void copyRecursively( File fromDirectory, File toDirectory ) throws IOException
{
adversary.injectFailure( SecurityException.class, IOException.class, NullPointerException.class );
delegate.copyRecursively( fromDirectory, toDirectory );
}
public boolean deleteFile( File fileName )
{
adversary.injectFailure( SecurityException.class );
return delegate.deleteFile( fileName );
}
public InputStream openAsInputStream( File fileName ) throws IOException
{
adversary.injectFailure( FileNotFoundException.class, SecurityException.class );
return new AdversarialInputStream( delegate.openAsInputStream( fileName ), adversary );
}
public void moveToDirectory( File file, File toDirectory ) throws IOException
{
adversary.injectFailure(
SecurityException.class, IllegalArgumentException.class, NotFoundException.class,
NullPointerException.class, IOException.class );
delegate.moveToDirectory( file, toDirectory );
}
public boolean isDirectory( File file )
{
adversary.injectFailure( SecurityException.class );
return delegate.isDirectory( file );
}
public boolean fileExists( File fileName )
{
adversary.injectFailure( SecurityException.class );
return delegate.fileExists( fileName );
}
public void mkdirs( File fileName ) throws IOException
{
adversary.injectFailure( SecurityException.class, IOException.class );
delegate.mkdirs( fileName );
}
public void deleteRecursively( File directory ) throws IOException
{
adversary.injectFailure( SecurityException.class, NullPointerException.class, IOException.class );
delegate.deleteRecursively( directory );
}
public FileLock tryLock( File fileName, StoreChannel channel ) throws IOException
{
adversary.injectFailure( SecurityException.class, IOException.class, FileNotFoundException.class );
return delegate.tryLock( fileName, channel );
}
@Override
public <K extends ThirdPartyFileSystem> K getOrCreateThirdPartyFileSystem( Class<K> clazz, Function<Class<K>, K>
creator )
{
// TODO implement 3rd party file systems in AdversarialFSA
return null;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_fs_AdversarialFileSystemAbstraction.java
|
3,899
|
@SuppressWarnings( "unchecked" )
public class AdversarialFileChannel extends StoreFileChannel
{
private final Adversary adversary;
public AdversarialFileChannel( StoreFileChannel channel, Adversary adversary )
{
super( channel );
this.adversary = adversary;
}
@Override
public long write( ByteBuffer[] srcs ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.write( srcs );
}
@Override
public int write( ByteBuffer src, long position ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.write( src, position );
}
@Override
public MappedByteBuffer map( FileChannel.MapMode mode, long position, long size ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.map( mode, position, size );
}
@Override
public long write( ByteBuffer[] srcs, int offset, int length ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.write( srcs, offset, length );
}
@Override
public StoreFileChannel truncate( long size ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.truncate( size );
}
@Override
public StoreFileChannel position( long newPosition ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.position( newPosition );
}
@Override
public int read( ByteBuffer dst, long position ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class ) )
{
int oldLimit = mischiefLimit( dst );
int read = super.read( dst, position );
dst.limit( oldLimit );
return read;
}
return super.read( dst, position );
}
private int mischiefLimit( ByteBuffer buf )
{
int oldLimit = buf.limit();
int newLimit = oldLimit - Math.max( buf.remaining() / 2, 1 );
buf.limit( newLimit );
return oldLimit;
}
@Override
public void force( boolean metaData ) throws IOException
{
adversary.injectFailure( IOException.class );
super.force( metaData );
}
@Override
public int read( ByteBuffer dst ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class ) )
{
int oldLimit = mischiefLimit( dst );
int read = super.read( dst );
dst.limit( oldLimit );
return read;
}
return super.read( dst );
}
@Override
public long read( ByteBuffer[] dsts, int offset, int length ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class ) )
{
ByteBuffer lastBuf = dsts[dsts.length - 1];
int oldLimit = mischiefLimit( lastBuf );
long read = super.read( dsts, offset, length );
lastBuf.limit( oldLimit );
return read;
}
return super.read( dsts, offset, length );
}
@Override
public long position() throws IOException
{
adversary.injectFailure( IOException.class );
return super.position();
}
@Override
public FileLock tryLock() throws IOException
{
adversary.injectFailure( IOException.class );
return super.tryLock();
}
@Override
public boolean isOpen()
{
adversary.injectFailure();
return super.isOpen();
}
@Override
public long read( ByteBuffer[] dsts ) throws IOException
{
if ( adversary.injectFailureOrMischief( IOException.class ) )
{
ByteBuffer lastBuf = dsts[dsts.length - 1];
int oldLimit = mischiefLimit( lastBuf );
long read = super.read( dsts );
lastBuf.limit( oldLimit );
return read;
}
return super.read( dsts );
}
@Override
public int write( ByteBuffer src ) throws IOException
{
adversary.injectFailure( IOException.class );
return super.write( src );
}
@Override
public void close() throws IOException
{
adversary.injectFailure( IOException.class );
super.close();
}
@Override
public long size() throws IOException
{
adversary.injectFailure( IOException.class );
return super.size();
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_fs_AdversarialFileChannel.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.