Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
4,300
|
omega
{
@Override
public State<?, ?> handle( OmegaContext context, Message<OmegaMessage> message,
MessageHolder outgoing ) throws Throwable
{
switch ( message.getMessageType() )
{
case refresh_timeout:
{
int refreshRoundId = context.startRefreshRound();
for ( URI server : context.getServers() )
{
outgoing.offer( Message.to( OmegaMessage.refresh, server,
RefreshPayload.fromState( context.getMyState(),
refreshRoundId ) ) );
}
}
break;
case refresh:
{
org.neo4j.cluster.protocol.omega.state.State state = RefreshPayload.toState(
(RefreshPayload) message.getPayload() ).other();
URI from = new URI( message.getHeader( Message.FROM ) );
org.neo4j.cluster.protocol.omega.state.State currentForThisHost = context.getRegistry().get( from );
if ( currentForThisHost == null || currentForThisHost.compareTo( state ) < 0 )
{
context.getRegistry().put( from, state );
}
outgoing.offer( Message.to(
OmegaMessage.refresh_ack,
from,
RefreshAckPayload.forRefresh( (RefreshPayload) message
.getPayload() ) ) );
}
break;
case refresh_ack:
{
RefreshAckPayload ack = message.getPayload();
// TODO deal with duplicate/corrupted messages here perhaps?
int refreshRound = ack.round;
context.ackReceived( refreshRound );
if ( context.getAckCount( refreshRound ) > context.getClusterNodeCount() / 2 )
{
context.getMyState().increaseFreshness();
context.roundDone( refreshRound );
}
}
break;
case round_trip_timeout:
{
context.getMyView().setExpired( true );
context.getMyState().getEpochNum().increaseSerialNum();
}
break;
case read_timeout:
{
int collectRound = context.startCollectionRound();
for ( URI server : context.getServers() )
{
outgoing.offer( Message.to(
OmegaMessage.collect,
server,
new CollectPayload( collectRound ) ) );
}
}
break;
case collect:
{
CollectPayload payload = message.getPayload();
URI from = new URI( message.getHeader( Message.FROM ) );
int readNum = payload.getReadNum();
outgoing.offer( Message.to( OmegaMessage.status,
from,
CollectResponsePayload.fromRegistry( context.getRegistry(), readNum ) ) );
}
break;
case status:
{
CollectResponsePayload payload = message.getPayload();
URI from = new URI( message.getHeader( Message.FROM ) );
int readNum = payload.getReadNum();
context.responseReceivedForRound( readNum, from, CollectResponsePayload.fromPayload( payload ));
if ( context.getStatusResponsesForRound( readNum ) > context.getClusterNodeCount() / 2 )
{
context.collectionRoundDone( readNum );
}
}
break;
default:
throw new IllegalArgumentException( message.getMessageType() +" is unknown" );
}
return this;
}
};
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_OmegaState.java
|
4,301
|
public class OmegaMessageTest
{
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_omega_OmegaMessageTest.java
|
4,302
|
public class OmegaContextTest
{
@Test
public void testOrderingOfEpochNumberOnSerialNum()
{
List<EpochNumber> toSort = new LinkedList<EpochNumber>();
// Creates a list in order 5,4,6,3,7,2,8,1,9
for ( int i = 1; i < 10; i++ )
{
// The sign code is lame, but i couldn't figure a branch free way
EpochNumber epoch = new EpochNumber( 5 + ((i + 1) / 2) * (i % 2 == 0 ? -1 : 1), i );
toSort.add( epoch );
}
Collections.sort( toSort );
for ( int i = 1; i < toSort.size(); i++ )
{
EpochNumber prev = toSort.get( i - 1 );
EpochNumber current = toSort.get( i );
assertTrue( prev.getSerialNum() < current.getSerialNum() );
}
}
@Test
public void testOrderingOfEpochNumberOnProcessId()
{
List<EpochNumber> toSort = new LinkedList<EpochNumber>();
// Creates a list in order 5,4,6,3,7,2,8,1,9
for ( int i = 1; i < 10; i++ )
{
EpochNumber epoch = new EpochNumber( 1, 5 + ((i + 1) / 2) * (i % 2 == 0 ? -1 : 1) );
toSort.add( epoch );
}
Collections.sort( toSort );
for ( int i = 1; i < toSort.size(); i++ )
{
EpochNumber prev = toSort.get( i - 1 );
EpochNumber current = toSort.get( i );
assertTrue( prev.getProcessId() < current.getProcessId() );
}
}
@Test
public void testOrderingEqualEpochs()
{
assertEquals( 0, new EpochNumber().compareTo( new EpochNumber() ) );
}
@Test
public void testOrderingOfStateOnEpochNum()
{
List<State> toSort = new LinkedList<State>();
// Creates a list in order 5,4,6,3,7,2,8,1,9
for ( int i = 1; i < 10; i++ )
{
EpochNumber epoch = new EpochNumber( 5 + ((i + 1) / 2) * (i % 2 == 0 ? -1 : 1), 1 );
State state = new State( epoch, 1 );
toSort.add( state );
}
Collections.sort( toSort );
for ( int i = 1; i < toSort.size(); i++ )
{
State prev = toSort.get( i - 1 );
State current = toSort.get( i );
assertTrue( prev.getEpochNum().getSerialNum() < current.getEpochNum().getSerialNum() );
}
}
@Test
public void testBasicRefreshRound()
{
OmegaContext context = new OmegaContext( Mockito.mock( ClusterContext.class ) );
assertEquals( -1, context.getAckCount( 0 ) );
int refreshRoundOne = context.startRefreshRound();
assertEquals( 0, context.getAckCount( refreshRoundOne ) );
context.ackReceived( refreshRoundOne );
assertEquals( 1, context.getAckCount( refreshRoundOne ) );
context.roundDone( refreshRoundOne );
assertEquals( -1, context.getAckCount( refreshRoundOne ) );
}
@Test
public void testTwoParallelRefreshRounds()
{
OmegaContext context = new OmegaContext( Mockito.mock( ClusterContext.class ) );
int refreshRoundOne = context.startRefreshRound();
context.ackReceived( refreshRoundOne );
int refreshRoundTwo = context.startRefreshRound();
context.ackReceived( refreshRoundOne );
context.ackReceived( refreshRoundTwo );
assertEquals( 2, context.getAckCount( refreshRoundOne ) );
assertEquals( 1, context.getAckCount( refreshRoundTwo ) );
context.roundDone( refreshRoundOne );
assertEquals( -1, context.getAckCount( refreshRoundOne ) );
assertEquals( 1, context.getAckCount( refreshRoundTwo ) );
}
@Test
public void testFirstAndSecondCollectionRound() throws URISyntaxException
{
OmegaContext context = new OmegaContext( Mockito.mock( ClusterContext.class ) );
int firstCollectionRound = context.startCollectionRound();
assertEquals( 1, firstCollectionRound );
assertEquals( Collections.emptyMap(), context.getPreviousViewForCollectionRound( firstCollectionRound ) );
assertEquals( 0, context.getStatusResponsesForRound( firstCollectionRound ) );
State state1 = new State( new EpochNumber(), 1 );
State state2 = new State( new EpochNumber(), 1 );
URI uri1 = new URI( "neo4j://server1" );
URI uri2 = new URI( "neo4j://server2" );
Map<URI, State> registry = new HashMap<URI, State>();
registry.put( uri1, state1 );
registry.put( uri2, state2 );
Map<URI, State> emptyView = Collections.emptyMap();
context.responseReceivedForRound( firstCollectionRound, uri1, emptyView );
// Really checking the invariants on the COLLECT phase of the algo is a test case on its own, below
assertEquals( 1, context.getStatusResponsesForRound( firstCollectionRound ) );
context.responseReceivedForRound( firstCollectionRound, uri2, emptyView );
assertEquals( 2, context.getStatusResponsesForRound( firstCollectionRound ) );
context.collectionRoundDone( firstCollectionRound );
int secondCollectionRound = context.startCollectionRound();
assertEquals( secondCollectionRound, firstCollectionRound + 1 );
assertEquals( context.getViews(), context.getPreviousViewForCollectionRound( secondCollectionRound ) );
}
@Test
public void testCollectInvariantsHoldAfterTwoCollectResponses() throws URISyntaxException
{
URI uri1 = new URI( "neo4j://server1" );
URI uri2 = new URI( "neo4j://server2" );
URI uri3 = new URI( "neo4j://server3" );
URI uri4 = new URI( "neo4j://server4" );
// Also, assume this is the first round
Map<URI, View> finalViews = new HashMap<URI, View>();
finalViews.put( uri1, new View( new State( new EpochNumber( 1 ), 3 ), false ) );
finalViews.put( uri2, new View( new State( new EpochNumber( 2 ), 3 ), false ) );
finalViews.put( uri3, new View( new State( new EpochNumber( 0 ), 5 ), false ) );
Map<URI, State> registryFrom1 = new HashMap<URI, State>();
registryFrom1.put( uri1, new State( new EpochNumber( 1 ), 3 ) );
registryFrom1.put( uri2, new State( new EpochNumber( 2 ), 3 ) );
registryFrom1.put( uri3, new State( new EpochNumber( 0 ), 5 ) );
Map<URI, State> registryFrom2 = new HashMap<URI, State>();
registryFrom2.put( uri1, new State( new EpochNumber( 1 ), 3 ) );
registryFrom2.put( uri2, new State( new EpochNumber( 1 ), 3 ) );
registryFrom2.put( uri3, new State( new EpochNumber( 0 ), 4 ) );
OmegaContext context = new OmegaContext( Mockito.mock( ClusterContext.class ) );
int collectionRound = context.startCollectionRound();
context.responseReceivedForRound( collectionRound, uri1, registryFrom1 );
checkConsolidatedViews( context.getViews(), registryFrom1 );
context.responseReceivedForRound( collectionRound, uri2, registryFrom2 );
checkConsolidatedViews( context.getViews(), registryFrom2 );
// Now we have collected responses from a majority. Check that the views have been expired properly
context.collectionRoundDone( collectionRound );
// We know the rest of the servers provide a maximum of 3 instances - our views must be at least as large
checkUpdatedViews( context.getPreviousViewForCollectionRound( collectionRound ), context.getViews() );
assertEquals( finalViews, context.getViews() );
// Time for the second round. This is the expected
finalViews.put( uri1, new View( new State( new EpochNumber( 1 ), 3 ) ) );
finalViews.put( uri2, new View( new State( new EpochNumber( 2 ), 3 ) ) );
finalViews.put( uri3, new View( new State( new EpochNumber( 4 ), 10 ), false ) );
finalViews.put( uri4, new View( new State( new EpochNumber( 1 ), 2 ), false ) );
registryFrom1.put( uri1, new State( new EpochNumber( 1 ), 3 ) );
registryFrom1.put( uri2, new State( new EpochNumber( 1 ), 3 ) );
registryFrom1.put( uri3, new State( new EpochNumber( 4 ), 10 ) );
registryFrom2.put( uri1, new State( new EpochNumber( 1 ), 3 ) );
registryFrom2.put( uri2, new State( new EpochNumber( 1 ), 3 ) );
registryFrom2.put( uri3, new State( new EpochNumber( 3 ), 9 ) );
// and all of the sudden, we have seen a new instance
registryFrom2.put( uri4, new State( new EpochNumber( 1 ), 2 ) );
collectionRound = context.startCollectionRound();
context.responseReceivedForRound( collectionRound, uri1, registryFrom1 );
checkConsolidatedViews( context.getViews(), registryFrom1 );
context.responseReceivedForRound( collectionRound, uri2, registryFrom2 );
checkConsolidatedViews( context.getViews(), registryFrom2 );
// Now we have collected responses from a majority. Check that the views have been expired properly
context.collectionRoundDone( collectionRound );
// We know the rest of the servers provide a maximum of 3 instances - our views must be at least as large
checkUpdatedViews( context.getPreviousViewForCollectionRound( collectionRound ), context.getViews() );
assertEquals( finalViews, context.getViews() );
}
private void checkConsolidatedViews( Map<URI, View> collectorViews, Map<URI, State> collectedRegistry )
{
for ( Map.Entry<URI, View> view : collectorViews.entrySet() )
{
URI uri = view.getKey();
State viewedState = view.getValue().getState();
assertTrue( viewedState.compareTo( collectedRegistry.get( uri ) ) >= 0 );
}
}
private void checkUpdatedViews( Map<URI, View> oldViews, Map<URI, View> newViews )
{
for ( Map.Entry<URI, View> view : newViews.entrySet() )
{
URI uri = view.getKey();
View newView = view.getValue();
View oldView = oldViews.get( uri );
if ( oldView == null )
{
assertFalse( newView.isExpired() );
continue;
}
if ( newView.getState().compareTo( oldView.getState() ) <= 0 )
{
assertTrue( newView.isExpired() );
}
if ( newView.getState().getEpochNum().compareTo( oldView.getState().getEpochNum() ) > 0 )
{
assertFalse( newView.isExpired() );
}
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_omega_OmegaContextTest.java
|
4,303
|
private static final class RefreshRoundContext
{
int acksReceived;
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_OmegaContext.java
|
4,304
|
public class OmegaContext
{
public void startTimers()
{
}
public void roundDone( int refreshRound )
{
refreshContexts.remove( refreshRound );
}
public Iterable<? extends URI> getServers()
{
return clusterContext.getMemberURIs();
}
public State getMyState()
{
return registry.get( clusterContext.getMyId() );
}
public View getMyView()
{
return views.get( clusterContext.getMyId() );
}
public int getMyProcessId()
{
return getMyState().getEpochNum().getProcessId();
}
public int startCollectionRound()
{
int nextRound = collectionContexts.isEmpty() ? 1 : collectionContexts.lastKey() + 1;
Map<URI, View> oldViews = new HashMap<URI, View>();
oldViews.putAll( views );
collectionContexts.put( nextRound, new CollectionRound( oldViews, nextRound ) );
return nextRound;
}
public Map<URI, View> getPreviousViewForCollectionRound( int newCollectionRound )
{
return collectionContexts.get( newCollectionRound ).getOldViews();
}
public int getStatusResponsesForRound( int newCollectionRound )
{
return collectionContexts.get( newCollectionRound ).getResponseCount();
}
public Map<URI, View> getViews()
{
return views;
}
public void collectionRoundDone( int collectionRound )
{
CollectionRound theRound = collectionContexts.get( collectionRound );
Map<URI, View> previousView = theRound.getOldViews();
// Now, consolidate them
for ( Map.Entry<URI, View> newView : views.entrySet() )
{
URI uri = newView.getKey();
View view = newView.getValue();
View oldView = previousView.get( uri );
if ( oldView == null )
{
// This means we didn't know about it, so it is definitely not expired
view.setExpired( false );
continue;
}
if ( view.getState().compareTo( oldView.getState() ) <= 0 )
{
view.setExpired( true );
}
if ( view.getState().getEpochNum().compareTo( oldView.getState().getEpochNum() ) > 0 )
{
view.setExpired( false );
}
}
}
public void responseReceivedForRound( int collectionRound, URI from, Map<URI, State> view )
{
CollectionRound thisRound = collectionContexts.get( collectionRound );
thisRound.responseReceived( from );
for ( Map.Entry<URI, State> incomingView : view.entrySet() )
{
URI uri = incomingView.getKey();
State incomingState = incomingView.getValue();
View oldView = views.get( uri );
if ( oldView == null /*we don't know about it yet*/
|| incomingState.compareTo( oldView.getState() ) > 0 )
{
views.put( uri, new View( incomingState ) );
}
}
}
private static final class RefreshRoundContext
{
int acksReceived;
}
private final SortedMap<Integer, RefreshRoundContext> refreshContexts = new TreeMap<Integer, RefreshRoundContext>();
private final SortedMap<Integer, CollectionRound> collectionContexts = new TreeMap<Integer, CollectionRound>();
public int getAckCount( int forRound )
{
RefreshRoundContext context = refreshContexts.get( forRound );
if ( context == null )
{
return -1;
}
return context.acksReceived;
}
public int startRefreshRound()
{
int nextRound = refreshContexts.isEmpty() ? 1 : refreshContexts.lastKey() + 1;
refreshContexts.put( nextRound, new RefreshRoundContext() );
return nextRound;
}
public void ackReceived( int forRound )
{
refreshContexts.get( forRound ).acksReceived++;
}
private final Map<URI, State> registry = new HashMap<URI, State>();
private final Map<URI, View> views = new HashMap<URI, View>();
private final List<OmegaListener> listeners = new ArrayList<OmegaListener>();
private int bigDelta;
private int smallDelta;
private ClusterContext clusterContext;
public OmegaContext( ClusterContext clusterContext )
{
this.clusterContext = clusterContext;
}
public ClusterContext getClusterContext()
{
return clusterContext;
}
public void addListener( OmegaListener listener )
{
listeners.add( listener );
}
public void removeListener( OmegaListener listener )
{
listeners.remove( listener );
}
public int getClusterNodeCount()
{
return getClusterContext().getConfiguration().getMemberURIs().size();
}
public Map<URI, State> getRegistry()
{
return registry;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_OmegaContext.java
|
4,305
|
public class MessageArgumentMatcher<T extends MessageType> extends ArgumentMatcher<Message<T>>
{
private URI from;
private URI to;
private T theMessageType;
private Serializable payload;
public MessageArgumentMatcher<T> from( URI from )
{
this.from = from;
return this;
}
public MessageArgumentMatcher<T> to( URI to )
{
this.to = to;
return this;
}
public MessageArgumentMatcher<T> onMessageType( T messageType )
{
this.theMessageType = messageType;
return this;
}
public MessageArgumentMatcher<T> withPayload( Serializable payload )
{
this.payload = payload;
return this;
}
@Override
public boolean matches( Object message )
{
if ( message == null || !( message instanceof Message ) )
{
return false;
}
if (message == this)
{
return true;
}
Message toMatchAgainst = (Message) message;
boolean toMatches = to == null ? true : to.toString().equals( toMatchAgainst.getHeader( Message.TO ) );
boolean fromMatches = from == null ? true : from.toString().equals( toMatchAgainst.getHeader( Message.FROM ) );
boolean typeMatches = theMessageType == null ? true : theMessageType == toMatchAgainst.getMessageType();
boolean payloadMatches = payload == null ? true : payload.equals( toMatchAgainst.getPayload() );
return fromMatches && toMatches && typeMatches && payloadMatches;
}
@Override
public void describeTo( Description description )
{
description.appendText(
(theMessageType != null ? theMessageType.name() : "<no particular message type>") +
"{from=" + from + ", to=" + to + ", payload=" + payload + "}" );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_omega_MessageArgumentMatcher.java
|
4,306
|
public class CollectionRound
{
private final Map<URI, View> oldViews;
private final Set<URI> responders;
private final int collectionRound;
public CollectionRound( Map<URI, View> oldViews, int collectionRound )
{
this.oldViews = oldViews;
this.collectionRound = collectionRound;
this.responders = new HashSet<URI>();
}
public Map<URI, View> getOldViews()
{
return oldViews;
}
public void responseReceived( URI from )
{
responders.add( from );
}
public Iterable<URI> getResponders()
{
return responders;
}
public int getResponseCount()
{
return responders.size();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_CollectionRound.java
|
4,307
|
public class HeartbeatStateTest
{
@Test
public void shouldIgnoreSuspicionsForOurselves() throws Throwable
{
// Given
InstanceId instanceId = new InstanceId( 1 );
HeartbeatState heartbeat= HeartbeatState.heartbeat;
ClusterConfiguration configuration = new ClusterConfiguration("whatever", StringLogger.DEV_NULL,
"cluster://1", "cluster://2" );
configuration.joined( instanceId, URI.create("cluster://1" ) );
configuration.joined( new InstanceId( 2 ), URI.create("cluster://2" ));
Logging logging = mock( Logging.class );
when( logging.getMessagesLog( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
MultiPaxosContext context = new MultiPaxosContext( instanceId, Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), configuration,
Mockito.mock( Executor.class ), logging,
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
HeartbeatContext heartbeatContext = context.getHeartbeatContext();
Message received = Message.internal( HeartbeatMessage.suspicions,
new HeartbeatMessage.SuspicionsState( Iterables.toSet( Iterables.<InstanceId, InstanceId>iterable( instanceId ) ) ) );
received.setHeader( Message.FROM, "cluster://2" );
// When
heartbeat.handle( heartbeatContext, received, mock( MessageHolder.class) );
// Then
assertThat( heartbeatContext.getSuspicionsOf( instanceId ).size(), equalTo( 0 ) );
}
@Test
public void shouldIgnoreSuspicionsForOurselvesButKeepTheRest() throws Throwable
{
// Given
InstanceId myId = new InstanceId( 1 );
InstanceId foreignId = new InstanceId( 3 );
HeartbeatState heartbeat= HeartbeatState.heartbeat;
ClusterConfiguration configuration = new ClusterConfiguration("whatever", StringLogger.DEV_NULL,
"cluster://1", "cluster://2" );
configuration.joined( myId, URI.create("cluster://1" ) );
configuration.joined( new InstanceId( 2 ), URI.create("cluster://2" ));
Logging logging = mock( Logging.class );
when( logging.getMessagesLog( Matchers.<Class>any() ) ).thenReturn( mock( StringLogger.class ) );
MultiPaxosContext context = new MultiPaxosContext( myId, Iterables.<ElectionRole, ElectionRole>iterable(
new ElectionRole( "coordinator" ) ), configuration,
Mockito.mock( Executor.class ), logging,
Mockito.mock( ObjectInputStreamFactory.class), Mockito.mock( ObjectOutputStreamFactory.class),
Mockito.mock( AcceptorInstanceStore.class), Mockito.mock( Timeouts.class),
mock( ElectionCredentialsProvider.class) );
HeartbeatContext heartbeatContext = context.getHeartbeatContext();
Message received = Message.internal( HeartbeatMessage.suspicions,
new HeartbeatMessage.SuspicionsState( Iterables.toSet( Iterables.<InstanceId, InstanceId>iterable( myId, foreignId ) ) ) );
received.setHeader( Message.FROM, "cluster://2" );
// When
heartbeat.handle( heartbeatContext, received, mock( MessageHolder.class) );
// Then
assertThat( heartbeatContext.getSuspicionsOf( myId ).size(), equalTo( 0 ) );
assertThat( heartbeatContext.getSuspicionsOf( foreignId ).size(), equalTo( 1 ) );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatStateTest.java
|
4,308
|
heartbeat
{
@Override
public HeartbeatState handle( HeartbeatContext context,
Message<HeartbeatMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case i_am_alive:
{
HeartbeatMessage.IAmAliveState state = message.getPayload();
if (context.isMe( state.getServer() ) )
{
break;
}
context.getLogger( HeartbeatState.class ).debug( "Received " + state );
if ( state.getServer() == null )
{
break;
}
if ( context.alive( state.getServer() ) )
{
// Send suspicions messages to all non-failed servers
for ( InstanceId aliveServer : context.getAlive() )
{
if ( !aliveServer.equals( context.getMyId() ) )
{
URI aliveServerUri =
context.getUriForId( aliveServer );
outgoing.offer( Message.to( HeartbeatMessage.suspicions, aliveServerUri,
new HeartbeatMessage.SuspicionsState( context.getSuspicionsFor( context.getMyId() ) ) ) );
}
}
}
context.cancelTimeout( HeartbeatMessage.i_am_alive + "-" +
state.getServer() );
context.setTimeout( HeartbeatMessage.i_am_alive + "-" +
state.getServer(), timeout( HeartbeatMessage.timed_out, message, state
.getServer() ) );
// Check if this server knows something that we don't
if ( message.hasHeader( "last-learned" ) )
{
long lastLearned = Long.parseLong( message.getHeader( "last-learned" ) );
if ( lastLearned > context.getLastKnownLearnedInstanceInCluster() )
{
outgoing.offer( internal( LearnerMessage.catchUp, lastLearned ) );
}
}
break;
}
case timed_out:
{
InstanceId server = message.getPayload();
context.getLogger( HeartbeatState.class )
.debug( "Received timed out for server " + server );
// Check if this node is no longer a part of the cluster
if ( context.getMembers().containsKey( server ) )
{
context.suspect( server );
context.setTimeout( HeartbeatMessage.i_am_alive + "-" +
server, timeout( HeartbeatMessage.timed_out, message, server ) );
// Send suspicions messages to all non-failed servers
for ( InstanceId aliveServer : context.getAlive() )
{
if ( !aliveServer.equals( context.getMyId() ) )
{
URI sendTo = context.getUriForId(
aliveServer );
outgoing.offer( Message.to( HeartbeatMessage.suspicions, sendTo,
new HeartbeatMessage.SuspicionsState( context.getSuspicionsFor(
context.getMyId() ) ) ) );
}
}
}
else
{
// If no longer part of cluster, then don't bother
context.serverLeftCluster( server );
}
break;
}
case sendHeartbeat:
{
InstanceId to = message.getPayload();
if (!context.isMe( to ) )
{
// Check if this node is no longer a part of the cluster
if ( context.getMembers().containsKey( to ) )
{
URI toSendTo = context.getUriForId( to );
// Send heartbeat message to given server
outgoing.offer( to( HeartbeatMessage.i_am_alive, toSendTo,
new HeartbeatMessage.IAmAliveState(
context.getMyId() ) )
.setHeader( "last-learned",
context.getLastLearnedInstanceId() + "" ) );
// Set new timeout to send heartbeat to this host
context.setTimeout(
HeartbeatMessage.sendHeartbeat + "-" + to,
timeout( HeartbeatMessage.sendHeartbeat, message, to ) );
}
}
break;
}
case reset_send_heartbeat:
{
InstanceId to = message.getPayload();
if ( !context.isMe( to ) )
{
String timeoutName = HeartbeatMessage.sendHeartbeat + "-" + to;
context.cancelTimeout( timeoutName );
context.setTimeout( timeoutName, Message.timeout(
HeartbeatMessage.sendHeartbeat, message, to ) );
}
break;
}
case suspicions:
{
HeartbeatMessage.SuspicionsState suspicions = message.getPayload();
context.getLogger( HeartbeatState.class )
.debug( "Received suspicions as " + suspicions );
URI from = new URI( message.getHeader( Message.FROM ) );
InstanceId fromId = context.getIdForUri( from );
/*
* Remove ourselves from the suspicions received - we just received a message,
* it's not normal to be considered failed. Whatever it was, it was transient and now it has
* passed.
*/
suspicions.getSuspicions().remove( context.getMyId() );
context.suspicions( fromId, suspicions.getSuspicions() );
break;
}
case leave:
{
context.getLogger( HeartbeatState.class ).debug( "Received leave" );
return start;
}
case addHeartbeatListener:
{
context.addHeartbeatListener( (HeartbeatListener) message.getPayload() );
break;
}
case removeHeartbeatListener:
{
context.removeHeartbeatListener( (HeartbeatListener) message.getPayload() );
break;
}
}
return this;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatState.java
|
4,309
|
start
{
@Override
public HeartbeatState handle( HeartbeatContext context,
Message<HeartbeatMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case addHeartbeatListener:
{
context.addHeartbeatListener( (HeartbeatListener) message.getPayload() );
break;
}
case removeHeartbeatListener:
{
context.removeHeartbeatListener( (HeartbeatListener) message.getPayload() );
break;
}
case join:
{
for ( InstanceId instanceId : context.getOtherInstances() )
{
// Setup heartbeat timeouts for the other instance
context.setTimeout(
HeartbeatMessage.i_am_alive + "-" + instanceId,
timeout( HeartbeatMessage.timed_out, message, instanceId ) );
// Send first heartbeat immediately
outgoing.offer( timeout( HeartbeatMessage.sendHeartbeat, message, instanceId) );
}
return heartbeat;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatState.java
|
4,310
|
public class HeartbeatRefreshProcessor implements MessageProcessor
{
private final MessageHolder outgoing;
private final ClusterContext clusterContext;
public HeartbeatRefreshProcessor( MessageHolder outgoing, ClusterContext clusterContext )
{
this.outgoing = outgoing;
this.clusterContext = clusterContext;
}
@Override
public boolean process( Message<? extends MessageType> message )
{
if ( !message.isInternal() &&
!message.getMessageType().equals( HeartbeatMessage.i_am_alive ) )
{
try
{
String to = message.getHeader( Message.TO );
InstanceId serverId = clusterContext.getConfiguration().getIdForUri( new URI( to ) );
if ( !clusterContext.isMe( serverId ) )
{
outgoing.offer( Message.internal( HeartbeatMessage.reset_send_heartbeat,
serverId ) );
}
}
catch( URISyntaxException e )
{
e.printStackTrace();
}
}
return true;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatRefreshProcessor.java
|
4,311
|
public static class SuspicionsState
implements Serializable
{
private static final long serialVersionUID = 3152836192116904427L;
private Set<InstanceId> suspicions;
public SuspicionsState( Set<InstanceId> suspicions )
{
this.suspicions = suspicions;
}
public Set<InstanceId> getSuspicions()
{
return suspicions;
}
@Override
public String toString()
{
return "Suspicions:"+suspicions;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
SuspicionsState that = (SuspicionsState) o;
if ( suspicions != null ? !suspicions.equals( that.suspicions ) : that.suspicions != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
return suspicions != null ? suspicions.hashCode() : 0;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatMessage.java
|
4,312
|
public static class IAmAliveState
implements Serializable
{
private final InstanceId server;
public IAmAliveState( InstanceId server )
{
this.server = server;
}
public InstanceId getServer()
{
return server;
}
@Override
public String toString()
{
return "i_am_alive[" + server + "]";
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
IAmAliveState that = (IAmAliveState) o;
if ( server != null ? !server.equals( that.server ) : that.server != null )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
return server != null ? server.hashCode() : 0;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatMessage.java
|
4,313
|
public static class Adapter implements HeartbeatListener
{
@Override
public void failed( InstanceId server )
{
}
@Override
public void alive( InstanceId server )
{
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatListener.java
|
4,314
|
public class HeartbeatJoinListener extends ClusterListener.Adapter
{
private final MessageHolder outgoing;
public HeartbeatJoinListener( MessageHolder outgoing )
{
this.outgoing = outgoing;
}
@Override
public void joinedCluster( InstanceId member, URI atUri )
{
outgoing.offer( Message.internal( HeartbeatMessage.reset_send_heartbeat, member ) );
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatJoinListener.java
|
4,315
|
{{
put( new InstanceId( 1 ), URI.create( "ha://1" ) );
put( new InstanceId( 2 ), URI.create( "ha://2" ) );
}}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessorTest.java
|
4,316
|
{
@Override
public Object answer( InvocationOnMock invocation ) throws Throwable
{
sentOut.add( (Message) invocation.getArguments()[0] );
return null;
}
} ).when( holder ).offer( Matchers.<Message<MessageType>>any() );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessorTest.java
|
4,317
|
{{
put( new InstanceId( 1 ), URI.create( "ha://1" ) );
put( new InstanceId( 2 ), URI.create( "ha://2" ) );
}}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessorTest.java
|
4,318
|
{
@Override
public Object answer( InvocationOnMock invocation ) throws Throwable
{
sentOut.add( (Message) invocation.getArguments()[0] );
return null;
}
} ).when( holder ).offer( Matchers.<Message<MessageType>>any() );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessorTest.java
|
4,319
|
{{
put( new InstanceId( 1 ), URI.create( "ha://1" ) );
put( new InstanceId( 2 ), URI.create( "ha://2" ) );
}}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_heartbeat_HeartbeatIAmAliveProcessorTest.java
|
4,320
|
start
{
@Override
public OmegaState handle( OmegaContext context, Message<OmegaMessage> message,
MessageHolder outgoing ) throws Throwable
{
switch ( message.getMessageType() )
{
case add_listener:
context.addListener( (OmegaListener) message.getPayload() );
break;
case remove_listener:
context.removeListener( (OmegaListener) message.getPayload() );
break;
case start:
context.startTimers();
return omega;
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_OmegaState.java
|
4,321
|
public class OmegaStateTest
{
@Test
public void testStartTransition() throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Message.internal( OmegaMessage.start );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
OmegaState result = (OmegaState) OmegaState.start.handle( context, message, outgoing );
// Assert we move to operational state
assertEquals( OmegaState.omega, result );
// And that timers started
Mockito.verify( context ).startTimers();
}
@Test
public void testRefreshTimeoutResponse() throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Message.internal( OmegaMessage.refresh_timeout );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
State state = new State( new EpochNumber() );
Mockito.when( context.getMyState() ).thenReturn( state );
Set<URI> servers = new HashSet<URI>();
servers.add( new URI( "localhost:80" ) );
servers.add( new URI( "localhost:81" ) );
Mockito.when( context.getServers() ).thenReturn( (Collection) servers );
OmegaState result = (OmegaState) OmegaState.omega.handle( context, message, outgoing );
assertEquals( OmegaState.omega, result );
Mockito.verify( context ).getServers();
Mockito.verify( outgoing, Mockito.times( servers.size() ) ).offer( Matchers.isA( Message.class ) );
Mockito.verify( context ).startRefreshRound();
}
@Test
public void testRefreshSuccess() throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Message.internal( OmegaMessage.refresh_ack, RefreshAckPayload.forRefresh( new
RefreshPayload( 1, 2, 3, 1 ) ) );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
Mockito.when( context.getClusterNodeCount() ).thenReturn( 3 );
Mockito.when( context.getAckCount( 1 ) ).thenReturn( 2 );
State state = new State( new EpochNumber() );
Mockito.when( context.getMyState() ).thenReturn( state );
OmegaState.omega.handle( context, message, outgoing );
Mockito.verify( context ).roundDone( 1 );
assertEquals( 1, state.getFreshness() );
}
@Test
public void testRoundTripTimeoutAkaAdvanceEpoch() throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Message.internal( OmegaMessage.round_trip_timeout );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
State state = new State( new EpochNumber() );
Mockito.when( context.getMyState() ).thenReturn( state );
View myView = new View( state );
Mockito.when( context.getMyView() ).thenReturn( myView );
OmegaState result = (OmegaState) OmegaState.omega.handle( context, message, outgoing );
assertEquals( OmegaState.omega, result );
Mockito.verify( context ).getMyState();
Mockito.verify( context ).getMyView();
Mockito.verify( context, Mockito.never() ).roundDone( Matchers.anyInt() );
assertTrue( myView.isExpired() );
// Most important things to test - no update on freshness and serial num incremented
assertEquals( 1, state.getEpochNum().getSerialNum() );
assertEquals( 0, state.getFreshness() );
}
private static final String fromString = "neo4j://from";
private void testRefreshResponseOnState( boolean newer ) throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Mockito.mock( Message.class );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
// Value here is not important, we override the compareTo() method anyway
RefreshPayload payload = new RefreshPayload( 1, 1, 1, 1 );
Mockito.when( message.getHeader( Message.FROM ) ).thenReturn( fromString );
Mockito.when( message.getPayload() ).thenReturn( payload );
Mockito.when( message.getMessageType() ).thenReturn( OmegaMessage.refresh );
URI fromURI = new URI( fromString );
Map<URI, State> registry = Mockito.mock( Map.class );
State fromState = Mockito.mock( State.class );
Mockito.when( registry.get( fromURI ) ).thenReturn( fromState );
Mockito.when( context.getRegistry() ).thenReturn( registry );
if ( newer )
{
Mockito.when( fromState.compareTo( Matchers.any( State.class ) ) ).thenReturn( -1 );
}
else
{
Mockito.when( fromState.compareTo( Matchers.any( State.class ) ) ).thenReturn( 1 );
}
OmegaState.omega.handle( context, message, outgoing );
Mockito.verify( context, Mockito.atLeastOnce() ).getRegistry();
Mockito.verify( registry ).get( fromURI );
Mockito.verify( fromState ).compareTo( Matchers.isA( State.class ) ); // existing compared to the one from the message
if ( newer )
{
Mockito.verify( registry ).put( Matchers.eq( fromURI ), Matchers.isA( State.class ) );
}
else
{
Mockito.verify( registry, Mockito.never() ).put( Matchers.eq( fromURI ), Matchers.isA( State.class ) );
}
Mockito.verify( outgoing ).offer( Matchers.argThat( new MessageArgumentMatcher<OmegaMessage>().to( fromURI
).onMessageType(
OmegaMessage.refresh_ack ) ) );
}
@Test
public void testRefreshResponseOnOlderState() throws Throwable
{
testRefreshResponseOnState( false );
}
@Test
public void testRefreshResponseOnNewerState() throws Throwable
{
testRefreshResponseOnState( true );
}
@Test
public void testCollectRoundStartsOnReadTimeout() throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Mockito.mock( Message.class );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
Set<URI> servers = new HashSet<URI>();
servers.add( new URI( "localhost:80" ) );
servers.add( new URI( "localhost:81" ) );
servers.add( new URI( "localhost:82" ) );
Mockito.when( context.getServers() ).thenReturn( (Collection) servers );
Mockito.when( message.getMessageType() ).thenReturn( OmegaMessage.read_timeout );
Mockito.when( context.getMyProcessId() ).thenReturn( 1 );
OmegaState.omega.handle( context, message, outgoing );
Mockito.verify( context, Mockito.atLeastOnce() ).getServers();
Mockito.verify( context ).startCollectionRound();
for ( URI server : servers )
{
Mockito.verify( outgoing ).offer( Matchers.argThat( new MessageArgumentMatcher<OmegaMessage>().to(
server )
.onMessageType( OmegaMessage.collect ).withPayload( new CollectPayload( 0 ) ) ) );
}
}
@Test
public void testResponseOnCollectRequest() throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Mockito.mock( Message.class );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
Map<URI, State> dummyState = new HashMap<URI, State>();
Mockito.when( context.getRegistry() ).thenReturn( dummyState );
Mockito.when( message.getHeader( Message.FROM ) ).thenReturn( fromString );
Mockito.when( message.getPayload() ).thenReturn( new CollectPayload( 1 ) );
Mockito.when( message.getMessageType() ).thenReturn( OmegaMessage.collect );
OmegaState.omega.handle( context, message, outgoing );
Mockito.verify( context ).getRegistry();
Mockito.verify( outgoing ).offer( Matchers.argThat( new MessageArgumentMatcher<OmegaMessage>().to( new URI(
fromString ) )
.onMessageType( OmegaMessage.status ).withPayload( new CollectResponsePayload( new URI[]{},
new RefreshPayload[]{}, 1 ) ) ) );
}
private void testStatusResponseHandling( boolean done ) throws Throwable
{
OmegaContext context = Mockito.mock( OmegaContext.class );
Message<OmegaMessage> message = Mockito.mock( Message.class );
MessageHolder outgoing = Mockito.mock( MessageHolder.class );
URI fromUri = new URI( fromString );
Map<URI, State> thePayloadContents = new HashMap<URI, State>();
thePayloadContents.put( fromUri, new State( new EpochNumber( 1, 1 ), 1 ) );
CollectResponsePayload thePayload = CollectResponsePayload.fromRegistry( thePayloadContents, 3 /*== readNum*/ );
Mockito.when( message.getHeader( Message.FROM ) ).thenReturn( fromString );
Mockito.when( message.getPayload() ).thenReturn( thePayload );
Mockito.when( message.getMessageType() ).thenReturn( OmegaMessage.status );
Mockito.when( context.getViews() ).thenReturn( new HashMap<URI, View>() );
Mockito.when( context.getStatusResponsesForRound( 3 ) ).thenReturn( done ? 3 : 1 ); // less than half, not done
Mockito.when( context.getClusterNodeCount() ).thenReturn( 5 );
OmegaState.omega.handle( context, message, outgoing );
Mockito.verify( context ).responseReceivedForRound( 3, fromUri, thePayloadContents );
Mockito.verify( context ).getStatusResponsesForRound( 3 /*== readNum*/ );
Mockito.verify( context ).getClusterNodeCount();
if ( done )
{
Mockito.verify( context ).collectionRoundDone( 3 );
}
Mockito.verifyNoMoreInteractions( context );
// Receiving status response sends no messages anywhere, just alters context state
Mockito.verifyZeroInteractions( outgoing );
}
@Test
public void testStatusResponseHandlingRoundNotDone() throws Throwable
{
testStatusResponseHandling( false );
}
@Test
public void testStatusResponseHandlingRoundDone() throws Throwable
{
testStatusResponseHandling( true );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_omega_OmegaStateTest.java
|
4,322
|
private class StateMachineRule
implements StateTransitionListener
{
State<?,?> oldState;
MessageType messageType;
State<?,?> newState;
Message<?>[] messages;
private StateMachineRule( State<?, ?> oldState, MessageType messageType, State<?, ?> newState, Message<?>[] messages )
{
this.oldState = oldState;
this.messageType = messageType;
this.newState = newState;
this.messages = messages;
}
@Override
public void stateTransition( StateTransition transition )
{
if (oldState.equals( transition.getOldState() ) &&
transition.getMessage().getMessageType().equals( messageType ) &&
newState.equals( transition.getNewState() ))
{
for( Message<?> message : messages )
{
outgoing.offer( message );
}
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachineRules.java
|
4,323
|
public class RefreshRound
{
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_RefreshRound.java
|
4,324
|
public class StateMachineRules
implements StateTransitionListener
{
private final MessageHolder outgoing;
private Map<State<?,?>,List<StateMachineRule>> rules = new HashMap<State<?, ?>, List<StateMachineRule>>( );
public StateMachineRules( MessageHolder outgoing )
{
this.outgoing = outgoing;
}
public StateMachineRules rule( State<?, ?> oldState,
MessageType messageType,
State<?, ?> newState,
Message<?>... messages
)
{
List<StateMachineRule> fromRules = rules.get( oldState );
if (fromRules == null)
{
fromRules = new ArrayList<StateMachineRule>( );
rules.put( oldState, fromRules );
}
fromRules.add( new StateMachineRule( oldState, messageType, newState, messages ) );
return this;
}
@Override
public void stateTransition( StateTransition transition )
{
List<StateMachineRule> oldStateRules = rules.get( transition.getOldState() );
if (oldStateRules != null)
{
for( StateMachineRule oldStateRule : oldStateRules )
{
oldStateRule.stateTransition( transition );
}
}
}
private class StateMachineRule
implements StateTransitionListener
{
State<?,?> oldState;
MessageType messageType;
State<?,?> newState;
Message<?>[] messages;
private StateMachineRule( State<?, ?> oldState, MessageType messageType, State<?, ?> newState, Message<?>[] messages )
{
this.oldState = oldState;
this.messageType = messageType;
this.newState = newState;
this.messages = messages;
}
@Override
public void stateTransition( StateTransition transition )
{
if (oldState.equals( transition.getOldState() ) &&
transition.getMessage().getMessageType().equals( messageType ) &&
newState.equals( transition.getNewState() ))
{
for( Message<?> message : messages )
{
outgoing.offer( message );
}
}
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachineRules.java
|
4,325
|
public class StateMachineProxyHandler
implements InvocationHandler
{
private StateMachineProxyFactory stateMachineProxyFactory;
private StateMachine stateMachine;
public StateMachineProxyHandler( StateMachineProxyFactory stateMachineProxyFactory, StateMachine stateMachine )
{
this.stateMachineProxyFactory = stateMachineProxyFactory;
this.stateMachine = stateMachine;
}
@Override
public Object invoke( Object proxy, Method method, Object[] args )
throws Throwable
{
// Delegate call to factory, which will translate method call into state machine invocation
return stateMachineProxyFactory.invoke( stateMachine, method, args == null ? null : (args.length > 1 ? args :
args[0]) );
}
public StateMachineProxyFactory getStateMachineProxyFactory()
{
return stateMachineProxyFactory;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachineProxyHandler.java
|
4,326
|
class ResponseFuture
implements Future<Object>
{
private final String conversationId;
private final MessageType initiatedByMessageType;
private Message response;
ResponseFuture( String conversationId, MessageType initiatedByMessageType )
{
this.conversationId = conversationId;
this.initiatedByMessageType = initiatedByMessageType;
}
public synchronized boolean setPotentialResponse( Message response )
{
if ( isResponse( response ) )
{
this.response = response;
this.notifyAll();
return true;
}
else
{
return false;
}
}
private boolean isResponse( Message response )
{
return (response.getMessageType().name().equals( initiatedByMessageType.name() + "Response" ) ||
response.getMessageType().name().equals( initiatedByMessageType.name() + "Failure" ));
}
@Override
public boolean cancel( boolean mayInterruptIfRunning )
{
return false;
}
@Override
public boolean isCancelled()
{
return false;
}
@Override
public boolean isDone()
{
return response != null;
}
@Override
public synchronized Object get()
throws InterruptedException, ExecutionException
{
if ( response != null )
{
return getResult();
}
while (response == null)
{
this.wait( 50 );
}
return getResult();
}
private synchronized Object getResult()
throws InterruptedException, ExecutionException
{
if ( response.getMessageType().name().equals( initiatedByMessageType.name() + "Failure" ) )
{
// Call failed
if ( response.getPayload() != null )
{
if ( response.getPayload() instanceof Throwable )
{
throw new ExecutionException( (Throwable) response.getPayload() );
}
else
{
throw new InterruptedException( response.getPayload().toString() );
}
}
else
{
// No message specified
throw new InterruptedException();
}
}
else
{
// Return result
return response.getPayload();
}
}
@Override
public synchronized Object get( long timeout, TimeUnit unit )
throws InterruptedException, ExecutionException, TimeoutException
{
if ( response != null )
{
getResult();
}
this.wait( unit.toMillis( timeout ) );
if ( response == null )
{
throw new TimeoutException();
}
return getResult();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachineProxyFactory.java
|
4,327
|
public class StateMachineProxyFactory
implements MessageProcessor
{
private final StateMachines stateMachines;
private final StateMachineConversations conversations;
private volatile InstanceId me;
private final Map<String, ResponseFuture> responseFutureMap = new ConcurrentHashMap<String, ResponseFuture>();
public StateMachineProxyFactory( StateMachines stateMachines, StateMachineConversations conversations, InstanceId me )
{
this.stateMachines = stateMachines;
this.conversations = conversations;
this.me = me;
}
public <CLIENT> CLIENT newProxy( Class<CLIENT> proxyInterface )
throws IllegalArgumentException
{
// Get the state machine whose messages correspond to the methods of the proxy interface
StateMachine stateMachine = getStateMachine( proxyInterface );
// Create a new dynamic proxy and handler that converts calls to state machine invocations
return proxyInterface.cast( Proxy.newProxyInstance( proxyInterface.getClassLoader(),
new Class<?>[]{proxyInterface}, new StateMachineProxyHandler( this, stateMachine ) ) );
}
Object invoke( StateMachine stateMachine, Method method, Object arg )
throws Throwable
{
if ( method.getName().equals( "toString" ) )
{
return me.toString();
}
if ( method.getName().equals( "equals" ) )
{
return ((StateMachineProxyHandler) Proxy.getInvocationHandler( arg )).getStateMachineProxyFactory().me.equals( me );
}
String conversationId = conversations.getNextConversationId();
try
{
Class<? extends MessageType> messageType = stateMachine.getMessageType();
MessageType typeAsEnum = (MessageType) Enum.valueOf( (Class<? extends Enum>) messageType, method.getName() );
Message<?> message = Message.internal( typeAsEnum, arg );
if ( me != null )
{
message.
setHeader( Message.CONVERSATION_ID, conversationId ).
setHeader( Message.CREATED_BY,me.toString() );
}
if ( method.getReturnType().equals( Void.TYPE ) )
{
stateMachines.process( message );
return null;
}
else
{
ResponseFuture future = new ResponseFuture( conversationId, typeAsEnum );
responseFutureMap.put( conversationId, future );
stateMachines.process( message );
return future;
}
}
catch ( IllegalArgumentException e )
{
throw new IllegalStateException( "No state machine can handle the method " + method.getName() );
}
}
@Override
public boolean process( Message message )
{
if ( !responseFutureMap.isEmpty() )
{
if ( !message.hasHeader( Message.TO ) )
{
String conversationId = message.getHeader( Message.CONVERSATION_ID );
ResponseFuture future = responseFutureMap.get( conversationId );
if ( future != null )
{
if ( future.setPotentialResponse( message ) )
{
responseFutureMap.remove( conversationId );
}
}
}
}
return true;
}
private StateMachine getStateMachine( Class<?> proxyInterface )
throws IllegalArgumentException
{
IllegalArgumentException exception = new IllegalArgumentException( "No state machine can handle the " +
"interface:" + proxyInterface.getName() );
statemachine:
for ( StateMachine stateMachine : stateMachines.getStateMachines() )
{
boolean foundMatch = false;
for ( Method method : proxyInterface.getMethods() )
{
if ( !(method.getReturnType().equals( Void.TYPE ) || method.getReturnType().equals( Future.class )) )
{
throw new IllegalArgumentException( "Methods must return either void or Future" );
}
try
{
Enum.valueOf( (Class<? extends Enum>) stateMachine.getMessageType(), method.getName() );
// Ok!
foundMatch = true;
}
catch ( Exception e )
{
if ( foundMatch )
// State machine could only partially handle this interface
{
exception = new IllegalArgumentException( "State machine for " + stateMachine.getMessageType
().getName() + " cannot handle method:" + method.getName() );
}
// Continue searching
continue statemachine;
}
}
// All methods are implemented by this state machine - return it!
return stateMachine;
}
// Could not find any state machine that can handle this interface
throw exception;
}
class ResponseFuture
implements Future<Object>
{
private final String conversationId;
private final MessageType initiatedByMessageType;
private Message response;
ResponseFuture( String conversationId, MessageType initiatedByMessageType )
{
this.conversationId = conversationId;
this.initiatedByMessageType = initiatedByMessageType;
}
public synchronized boolean setPotentialResponse( Message response )
{
if ( isResponse( response ) )
{
this.response = response;
this.notifyAll();
return true;
}
else
{
return false;
}
}
private boolean isResponse( Message response )
{
return (response.getMessageType().name().equals( initiatedByMessageType.name() + "Response" ) ||
response.getMessageType().name().equals( initiatedByMessageType.name() + "Failure" ));
}
@Override
public boolean cancel( boolean mayInterruptIfRunning )
{
return false;
}
@Override
public boolean isCancelled()
{
return false;
}
@Override
public boolean isDone()
{
return response != null;
}
@Override
public synchronized Object get()
throws InterruptedException, ExecutionException
{
if ( response != null )
{
return getResult();
}
while (response == null)
{
this.wait( 50 );
}
return getResult();
}
private synchronized Object getResult()
throws InterruptedException, ExecutionException
{
if ( response.getMessageType().name().equals( initiatedByMessageType.name() + "Failure" ) )
{
// Call failed
if ( response.getPayload() != null )
{
if ( response.getPayload() instanceof Throwable )
{
throw new ExecutionException( (Throwable) response.getPayload() );
}
else
{
throw new InterruptedException( response.getPayload().toString() );
}
}
else
{
// No message specified
throw new InterruptedException();
}
}
else
{
// Return result
return response.getPayload();
}
}
@Override
public synchronized Object get( long timeout, TimeUnit unit )
throws InterruptedException, ExecutionException, TimeoutException
{
if ( response != null )
{
getResult();
}
this.wait( unit.toMillis( timeout ) );
if ( response == null )
{
throw new TimeoutException();
}
return getResult();
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachineProxyFactory.java
|
4,328
|
public class StateMachineConversations
{
private final AtomicLong nextConversationId = new AtomicLong();
private final String serverId;
public StateMachineConversations( InstanceId me )
{
serverId = me.toString();
}
public String getNextConversationId()
{
return serverId + "/" + nextConversationId.incrementAndGet() + "#";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachineConversations.java
|
4,329
|
public class StateMachine
{
private Object context;
private Class<? extends MessageType> messageEnumType;
private State<?, ?> state;
private Logging logging;
private List<StateTransitionListener> listeners = new ArrayList<StateTransitionListener>();
public StateMachine( Object context, Class<? extends MessageType> messageEnumType, State<?, ?> state,
Logging logging )
{
this.context = context;
this.messageEnumType = messageEnumType;
this.state = state;
this.logging = logging;
}
public Class<? extends MessageType> getMessageType()
{
return messageEnumType;
}
public State<?, ?> getState()
{
return state;
}
public Object getContext()
{
return context;
}
public void addStateTransitionListener( StateTransitionListener listener )
{
List<StateTransitionListener> newlisteners = new ArrayList<StateTransitionListener>( listeners );
newlisteners.add( listener );
listeners = newlisteners;
}
public void removeStateTransitionListener( StateTransitionListener listener )
{
List<StateTransitionListener> newlisteners = new ArrayList<StateTransitionListener>( listeners );
newlisteners.remove( listener );
listeners = newlisteners;
}
public synchronized void handle( Message<? extends MessageType> message, MessageHolder outgoing )
{
try
{
// Let the old state handle the incoming message and tell us what the new state should be
State<Object, MessageType> oldState = (State<Object, MessageType>) state;
State<?, ?> newState = oldState.handle( (Object) context, (Message<MessageType>) message, outgoing );
state = newState;
// Notify any listeners of the new state
StateTransition transition = new StateTransition( oldState, message, newState );
for ( StateTransitionListener listener : listeners )
{
try
{
listener.stateTransition( transition );
}
catch ( Throwable e )
{
// Ignore
logging.getMessagesLog( listener.getClass() ).warn( "Listener threw exception", e );
}
}
}
catch ( Throwable throwable )
{
logging.getMessagesLog( getClass() ).warn( "Exception in message handling", throwable );
}
}
@Override
public String toString()
{
return state.toString() + ": " + context.toString();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_statemachine_StateMachine.java
|
4,330
|
public class SnapshotStateTest
{
@Test
public void testNoSnapshotRequestIfCoordinatorInExistingCluster() throws Throwable
{
Map<InstanceId, URI> extraMember = new HashMap<InstanceId, URI>();
URI other = URI.create( "cluster://other");
extraMember.put( new InstanceId( 2 ), other );
baseNoSendTest( extraMember );
}
@Test
public void testNoSnapshotRequestIfOnlyMember() throws Throwable
{
Map<InstanceId, URI> extraMember = new HashMap<InstanceId, URI>();
baseNoSendTest( extraMember );
}
public void baseNoSendTest( Map<InstanceId, URI> extraMembers) throws Throwable
{
URI me = URI.create( "cluster://me" );
Map<InstanceId, URI> members = new HashMap<InstanceId, URI>();
final InstanceId myId = new InstanceId( 1 );
members.put( myId, me );
members.putAll( extraMembers );
ClusterConfiguration clusterConfiguration = mock( ClusterConfiguration.class );
when( clusterConfiguration.getMembers() ).thenReturn( members );
when( clusterConfiguration.getElected( ClusterConfiguration.COORDINATOR ) ).thenReturn( myId );
when( clusterConfiguration.getUriForId( myId ) ).thenReturn( me );
ClusterContext clusterContext = mock( ClusterContext.class );
when( clusterContext.getConfiguration() ).thenReturn( clusterConfiguration );
when( clusterContext.getMyId() ).thenReturn( myId );
SnapshotContext context = mock( SnapshotContext.class );
when( context.getClusterContext() ).thenReturn( clusterContext );
when( context.getSnapshotProvider() ).thenReturn( mock( SnapshotProvider.class ) );
Message<SnapshotMessage> message = Message.to( SnapshotMessage.refreshSnapshot, me );
MessageHolder outgoing = mock( MessageHolder.class );
SnapshotState newState = (SnapshotState) SnapshotState.ready.handle( context, message, outgoing );
assertThat( newState, equalTo( SnapshotState.ready ) );
Mockito.verifyZeroInteractions( outgoing );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_protocol_snapshot_SnapshotStateTest.java
|
4,331
|
ready
{
@Override
public State<?, ?> handle( SnapshotContext context,
Message<SnapshotMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case refreshSnapshot:
{
if ( context.getClusterContext().getConfiguration().getMembers().size() <= 1 ||
context.getSnapshotProvider() == null )
{
// we are the only instance in the cluster or snapshots are not meaningful
return ready;
}
else
{
InstanceId coordinator = context.getClusterContext().getConfiguration().getElected(
ClusterConfiguration.COORDINATOR );
if ( coordinator != null && !coordinator.equals( context.getClusterContext().getMyId() ) )
{
// coordinator exists, ask for the snapshot
outgoing.offer( Message.to( SnapshotMessage.sendSnapshot,
context.getClusterContext().getConfiguration().getUriForId(
coordinator ) ) );
return refreshing;
}
else
{
// coordinator is unknown, can't do much
return ready;
}
}
}
case sendSnapshot:
{
outgoing.offer( Message.respond( SnapshotMessage.snapshot, message,
new SnapshotMessage.SnapshotState( context.getLearnerContext()
.getLastDeliveredInstanceId(), context.getSnapshotProvider(),
context.getClusterContext().getObjectInputStreamFactory(),
context.getClusterContext().getObjectOutputStreamFactory()) ) );
break;
}
case leave:
{
return start;
}
}
return this;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_snapshot_SnapshotState.java
|
4,332
|
refreshing
{
@Override
public State<?, ?> handle( SnapshotContext context,
Message<SnapshotMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case snapshot:
{
SnapshotMessage.SnapshotState state = message.getPayload();
// If we have already delivered everything that is rolled into this snapshot, ignore it
state.setState( context.getSnapshotProvider(), context.getClusterContext().getObjectInputStreamFactory() );
return ready;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_snapshot_SnapshotState.java
|
4,333
|
start
{
@Override
public State<?, ?> handle( SnapshotContext context,
Message<SnapshotMessage> message,
MessageHolder outgoing
)
throws Throwable
{
switch ( message.getMessageType() )
{
case setSnapshotProvider:
{
SnapshotProvider snapshotProvider = message.getPayload();
context.setSnapshotProvider( snapshotProvider );
break;
}
case refreshSnapshot:
{
if ( context.getClusterContext().getConfiguration().getMembers().size() <= 1 ||
context.getSnapshotProvider() == null )
{
// we are the only instance or there are no snapshots
return start;
}
else
{
InstanceId coordinator = context.getClusterContext().getConfiguration().getElected(
ClusterConfiguration.COORDINATOR );
if ( coordinator != null )
{
// there is a coordinator - ask from that
outgoing.offer( Message.to( SnapshotMessage.sendSnapshot,
context.getClusterContext().getConfiguration().getUriForId(
coordinator ) ) );
return refreshing;
}
else
{
return start;
}
}
}
case join:
{
// go to ready state, if someone needs snapshots they should ask for it explicitly.
return ready;
}
}
return this;
}
},
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_snapshot_SnapshotState.java
|
4,334
|
public static class SnapshotState
implements Serializable
{
private long lastDeliveredInstanceId = -1;
transient SnapshotProvider provider;
private transient ObjectInputStreamFactory objectInputStreamFactory;
private transient ObjectOutputStreamFactory objectOutputStreamFactory;
transient byte[] buf;
public SnapshotState( long lastDeliveredInstanceId, SnapshotProvider provider,
ObjectInputStreamFactory objectInputStreamFactory,
ObjectOutputStreamFactory objectOutputStreamFactory )
{
this.lastDeliveredInstanceId = lastDeliveredInstanceId;
this.provider = provider;
if ( objectInputStreamFactory == null )
{
throw new RuntimeException( "objectInputStreamFactory was null" );
}
if ( objectOutputStreamFactory == null )
{
throw new RuntimeException( "objectOutputStreamFactory was null" );
}
this.objectInputStreamFactory = objectInputStreamFactory;
this.objectOutputStreamFactory = objectOutputStreamFactory;
}
public long getLastDeliveredInstanceId()
{
return lastDeliveredInstanceId;
}
public void setState( SnapshotProvider provider, ObjectInputStreamFactory objectInputStreamFactory )
throws IOException
{
ByteArrayInputStream bin = new ByteArrayInputStream( buf );
ObjectInputStream oin = objectInputStreamFactory.create( bin );
try
{
provider.setState( oin );
}
catch ( Throwable e )
{
e.printStackTrace();
}
finally
{
oin.close();
}
}
private void writeObject( java.io.ObjectOutputStream out )
throws IOException
{
out.defaultWriteObject();
ByteArrayOutputStream bout = new ByteArrayOutputStream();
ObjectOutputStream oout = objectOutputStreamFactory.create( bout );
provider.getState( oout );
oout.close();
byte[] buf = bout.toByteArray();
out.writeInt( buf.length );
out.write( buf );
}
private void readObject( java.io.ObjectInputStream in )
throws IOException, ClassNotFoundException
{
in.defaultReadObject();
buf = new byte[in.readInt()];
try
{
in.readFully( buf );
}
catch ( EOFException endOfFile )
{
// do nothing - the stream's ended but the message content got through ok.
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_snapshot_SnapshotMessage.java
|
4,335
|
public class SnapshotContext
{
private SnapshotProvider snapshotProvider;
private ClusterContext clusterContext;
private LearnerContext learnerContext;
public SnapshotContext( ClusterContext clusterContext, LearnerContext learnerContext )
{
this.clusterContext = clusterContext;
this.learnerContext = learnerContext;
}
public void setSnapshotProvider( SnapshotProvider snapshotProvider)
{
this.snapshotProvider = snapshotProvider;
}
public ClusterContext getClusterContext()
{
return clusterContext;
}
public LearnerContext getLearnerContext()
{
return learnerContext;
}
public SnapshotProvider getSnapshotProvider()
{
return snapshotProvider;
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_snapshot_SnapshotContext.java
|
4,336
|
public class View
{
private final State state;
private boolean expired;
public View( State state )
{
this(state, true);
}
public View( State state, boolean expired )
{
this.state = state;
this.expired = expired;
}
public State getState()
{
return state;
}
public boolean isExpired()
{
return expired;
}
public void setExpired( boolean expired )
{
this.expired = expired;
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !(obj instanceof View ) )
{
return false;
}
View other = (View) obj;
return state.equals( other.state ) && expired == other.expired;
}
@Override
public String toString()
{
return "View [state:"+state+", expired= "+expired+"]";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_state_View.java
|
4,337
|
public class State implements Comparable<State>
{
private final EpochNumber epochNum;
private int freshness;
public State( EpochNumber epochNum, int freshness )
{
this.epochNum = epochNum;
this.freshness = freshness;
}
public State( EpochNumber epochNum )
{
this( epochNum, 0 );
}
@Override
public int compareTo( State o )
{
return epochNum.compareTo( o.epochNum ) == 0 ? freshness - o.freshness : epochNum.compareTo( o.epochNum );
}
public EpochNumber getEpochNum()
{
return epochNum;
}
public int getFreshness()
{
return freshness;
}
public int increaseFreshness()
{
return freshness++;
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !(obj instanceof State) )
{
return false;
}
State other = (State) obj;
return epochNum.equals( other.epochNum ) && (freshness == other.freshness);
}
@Override
public String toString()
{
return "State [Epoch: "+epochNum+", freshness= "+freshness+"]";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_state_State.java
|
4,338
|
public class EpochNumber implements Comparable<EpochNumber>
{
private int serialNum;
private final int processId;
public EpochNumber( int serialNum, int processId )
{
this.serialNum = serialNum;
this.processId = processId;
}
public EpochNumber( int processId )
{
this( 0, processId );
}
public EpochNumber()
{
this( -1 );
}
@Override
public int compareTo( EpochNumber o )
{
return serialNum == o.serialNum ? processId - o.processId : serialNum - o.serialNum;
}
public int getSerialNum()
{
return serialNum;
}
public int getProcessId()
{
return processId;
}
public int increaseSerialNum()
{
return serialNum++;
}
@Override
public boolean equals( Object obj )
{
if ( obj == null )
{
return false;
}
if ( !(obj instanceof EpochNumber) )
{
return false;
}
EpochNumber other = (EpochNumber) obj;
return serialNum == other.serialNum && processId == other.processId;
}
@Override
public String toString()
{
return "EpochNumber [serial= "+serialNum+", process= "+processId+"]";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_state_EpochNumber.java
|
4,339
|
public final class RefreshPayload implements Serializable
{
public final int serialNum;
public final int processId;
public final int freshness;
public final int refreshRound;
public RefreshPayload( int serialNum, int processId, int freshness, int refreshRound )
{
this.serialNum = serialNum;
this.processId = processId;
this.freshness = freshness;
this.refreshRound = refreshRound;
}
public static RefreshPayload fromState( State state, int refreshRound )
{
return new RefreshPayload( state.getEpochNum().getSerialNum(), state.getEpochNum().getProcessId(),
state.getFreshness(), refreshRound );
}
public static Pair<Integer, State> toState( RefreshPayload payload )
{
EpochNumber epochNumber = new EpochNumber( payload.serialNum, payload.processId );
State result = new State( epochNumber, payload.freshness );
return Pair.of( payload.refreshRound, result );
}
@Override
public String toString()
{
return "RefreshPayload[serialNum= " + serialNum + ", processId=" + processId + ", " +
"freshness=" + freshness + ", refreshRound=" + refreshRound + "]";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_payload_RefreshPayload.java
|
4,340
|
public final class RefreshAckPayload implements Serializable
{
public final int round;
public RefreshAckPayload( int round )
{
this.round = round;
}
public static RefreshAckPayload forRefresh( RefreshPayload payload )
{
return new RefreshAckPayload( payload.refreshRound );
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_payload_RefreshAckPayload.java
|
4,341
|
public final class CollectResponsePayload implements Serializable
{
private final URI[] servers;
private final RefreshPayload[] registry;
private final int readNum;
public CollectResponsePayload( URI[] servers, RefreshPayload[] registry, int readNum )
{
this.servers = servers;
this.registry = registry;
this.readNum = readNum;
}
public int getReadNum()
{
return readNum;
}
public static CollectResponsePayload fromRegistry( Map<URI, State> registry, int readNum )
{
URI[] servers = new URI[registry.size()];
RefreshPayload[] refreshPayloads = new RefreshPayload[registry.size()];
int currentIndex = 0;
for (Map.Entry<URI, State> entry : registry.entrySet())
{
servers[currentIndex] = entry.getKey();
refreshPayloads[currentIndex] = RefreshPayload.fromState( entry.getValue(), -1 );
currentIndex++;
}
return new CollectResponsePayload( servers, refreshPayloads, readNum );
}
public static Map<URI, State> fromPayload( CollectResponsePayload payload )
{
Map<URI, State> result = new HashMap<URI, State>();
for ( int i = 0; i < payload.servers.length; i++ )
{
URI server = payload.servers[i];
State state = RefreshPayload.toState( payload.registry[i] ).other();
result.put( server, state );
}
return result;
}
@Override
public boolean equals( Object obj )
{
if (obj == null)
{
return false;
}
if (obj == this)
{
return true;
}
if (!(obj instanceof CollectResponsePayload))
{
return false;
}
CollectResponsePayload other = (CollectResponsePayload) obj;
return Arrays.deepEquals(servers, other.servers) && Arrays.deepEquals( registry, other.registry ) && readNum == other.readNum;
}
@Override
public String toString()
{
StringBuffer buffer = new StringBuffer( "CollectResponsePayload[{" );
for ( int i = 0; i < servers.length; i++ )
{
URI server = servers[i];
RefreshPayload payload = registry[i];
buffer.append( server ).append( ":" ).append( payload );
if (i < servers.length - 1)
{
buffer.append( "," );
}
}
buffer.append( "}, readNum=" ).append( readNum );
return buffer.toString();
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_payload_CollectResponsePayload.java
|
4,342
|
public class CollectPayload implements Serializable
{
private final int readNum;
public CollectPayload( int readNum )
{
this.readNum = readNum;
}
public int getReadNum()
{
return readNum;
}
@Override
public boolean equals( Object obj )
{
if (obj == null)
{
return false;
}
if (obj == this)
{
return true;
}
if (!(obj instanceof CollectPayload))
{
return false;
}
CollectPayload other = (CollectPayload) obj;
return readNum == other.readNum;
}
@Override
public String toString()
{
return "CollectPayload[readNum= "+readNum+"]";
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_protocol_omega_payload_CollectPayload.java
|
4,343
|
{
@Override
public void run()
{
try
{
Thread.sleep( 100 );
}
catch ( InterruptedException e )
{
Thread.interrupted();
}
shutdownProcess();
}
}.start();
| false
|
enterprise_com_src_test_java_org_neo4j_com_MadeUpServerProcess.java
|
4,344
|
public class Protocol
{
public static final int MEGA = 1024 * 1024;
public static final int DEFAULT_FRAME_LENGTH = 16*MEGA;
private final int chunkSize;
private final byte applicationProtocolVersion;
private final byte internalProtocolVersion;
public Protocol( int chunkSize, byte applicationProtocolVersion, byte internalProtocolVersion )
{
this.chunkSize = chunkSize;
this.applicationProtocolVersion = applicationProtocolVersion;
this.internalProtocolVersion = internalProtocolVersion;
}
public void serializeRequest( Channel channel, ChannelBuffer buffer, RequestType<?> type, RequestContext ctx,
Serializer payload ) throws IOException
{
buffer.clear();
ChunkingChannelBuffer chunkingBuffer = new ChunkingChannelBuffer( buffer,
channel, chunkSize, internalProtocolVersion, applicationProtocolVersion );
chunkingBuffer.writeByte( type.id() );
writeContext( ctx, chunkingBuffer );
payload.write( chunkingBuffer );
chunkingBuffer.done();
}
public <PAYLOAD> Response<PAYLOAD> deserializeResponse(BlockingReadHandler<ChannelBuffer> reader, ByteBuffer input, long timeout,
Deserializer<PAYLOAD> payloadDeserializer,
ResourceReleaser channelReleaser) throws IOException
{
DechunkingChannelBuffer dechunkingBuffer = new DechunkingChannelBuffer( reader, timeout,
internalProtocolVersion, applicationProtocolVersion );
PAYLOAD response = payloadDeserializer.read( dechunkingBuffer, input );
StoreId storeId = readStoreId( dechunkingBuffer, input );
TransactionStream txStreams = readTransactionStreams( dechunkingBuffer );
return new Response<PAYLOAD>( response, storeId, txStreams, channelReleaser );
}
private void writeContext( RequestContext context, ChannelBuffer targetBuffer )
{
targetBuffer.writeLong( context.getEpoch() );
targetBuffer.writeInt( context.machineId() );
targetBuffer.writeInt( context.getEventIdentifier() );
RequestContext.Tx[] txs = context.lastAppliedTransactions();
targetBuffer.writeByte( txs.length );
for ( RequestContext.Tx tx : txs )
{
writeString( targetBuffer, tx.getDataSourceName() );
targetBuffer.writeLong( tx.getTxId() );
}
targetBuffer.writeInt( context.getMasterId() );
targetBuffer.writeLong( context.getChecksum() );
}
private TransactionStream readTransactionStreams( final ChannelBuffer buffer )
{
final String[] datasources = readTransactionStreamHeader( buffer );
if ( datasources.length == 1 )
{
return TransactionStream.EMPTY;
}
return new TransactionStream()
{
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
makeSureNextTransactionIsFullyFetched( buffer );
String datasource = datasources[buffer.readUnsignedByte()];
if ( datasource == null )
{
return null;
}
long txId = buffer.readLong();
TxExtractor extractor = TxExtractor.create( new BlockLogReader( buffer ) );
return Triplet.of( datasource, txId, extractor );
}
@Override
public String[] dataSourceNames()
{
return Arrays.copyOfRange( datasources, 1, datasources.length );
}
};
}
private String[] readTransactionStreamHeader( ChannelBuffer buffer )
{
short numberOfDataSources = buffer.readUnsignedByte();
final String[] datasources = new String[numberOfDataSources + 1];
datasources[0] = null; // identifier for "no more transactions"
for ( int i = 1; i < datasources.length; i++ )
{
datasources[i] = readString( buffer );
}
return datasources;
}
private static void makeSureNextTransactionIsFullyFetched( ChannelBuffer buffer )
{
buffer.markReaderIndex();
try
{
if ( buffer.readUnsignedByte() > 0 /* datasource id */ )
{
buffer.skipBytes( 8 ); // tx id
int blockSize = 0;
while ( (blockSize = buffer.readUnsignedByte()) == 0 )
{
buffer.skipBytes( BlockLogBuffer.DATA_SIZE );
}
buffer.skipBytes( blockSize );
}
}
finally
{
buffer.resetReaderIndex();
}
}
private StoreId readStoreId( ChannelBuffer source, ByteBuffer byteBuffer )
{
byteBuffer.clear();
byteBuffer.limit( 8 + 8 + 8 );
source.readBytes( byteBuffer );
byteBuffer.flip();
return StoreId.deserialize( byteBuffer );
}
/* ========================
Static utility functions
======================== */
public static final ObjectSerializer<Integer> INTEGER_SERIALIZER = new ObjectSerializer<Integer>()
{
@SuppressWarnings( "boxing" )
public void write( Integer responseObject, ChannelBuffer result ) throws IOException
{
result.writeInt( responseObject );
}
};
public static final ObjectSerializer<Long> LONG_SERIALIZER = new ObjectSerializer<Long>()
{
@SuppressWarnings( "boxing" )
public void write( Long responseObject, ChannelBuffer result ) throws IOException
{
result.writeLong( responseObject );
}
};
public static final ObjectSerializer<Void> VOID_SERIALIZER = new ObjectSerializer<Void>()
{
public void write( Void responseObject, ChannelBuffer result ) throws IOException
{
}
};
public static final Deserializer<Integer> INTEGER_DESERIALIZER = new Deserializer<Integer>()
{
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
};
public static final Deserializer<Void> VOID_DESERIALIZER = new Deserializer<Void>()
{
public Void read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return null;
}
};
public static final Serializer EMPTY_SERIALIZER = new Serializer()
{
public void write( ChannelBuffer buffer ) throws IOException
{
}
};
public static class FileStreamsDeserializer implements Deserializer<Void>
{
private final StoreWriter writer;
public FileStreamsDeserializer( StoreWriter writer )
{
this.writer = writer;
}
// NOTICE: this assumes a "smart" ChannelBuffer that continues to next chunk
public Void read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
int pathLength;
while ( 0 != ( pathLength = buffer.readUnsignedShort() ) )
{
String path = readString( buffer, pathLength );
boolean hasData = buffer.readByte() == 1;
writer.write( path, hasData ? new BlockLogReader( buffer ) : null, temporaryBuffer, hasData );
}
writer.done();
return null;
}
};
public static void addLengthFieldPipes( ChannelPipeline pipeline, int frameLength )
{
pipeline.addLast( "frameDecoder",
new LengthFieldBasedFrameDecoder( frameLength + 4, 0, 4, 0, 4 ) );
pipeline.addLast( "frameEncoder", new LengthFieldPrepender( 4 ) );
}
public static void writeString( ChannelBuffer buffer, String name )
{
char[] chars = name.toCharArray();
buffer.writeInt( chars.length );
writeChars( buffer, chars );
}
public static void writeChars( ChannelBuffer buffer, char[] chars )
{
// TODO optimize?
for ( char ch : chars )
{
buffer.writeChar( ch );
}
}
public static String readString( ChannelBuffer buffer )
{
return readString( buffer, buffer.readInt() );
}
public static boolean readBoolean( ChannelBuffer buffer )
{
byte value = buffer.readByte();
switch ( value )
{
case 0: return false;
case 1: return true;
default: throw new ComException( "Invalid boolean value " + value );
}
}
public static String readString( ChannelBuffer buffer, int length )
{
char[] chars = new char[length];
for ( int i = 0; i < length; i++ )
{
chars[i] = buffer.readChar();
}
return new String( chars );
}
public static void assertChunkSizeIsWithinFrameSize( int chunkSize, int frameLength )
{
if ( chunkSize > frameLength )
throw new IllegalArgumentException( "Chunk size " + chunkSize +
" needs to be equal or less than frame length " + frameLength );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,345
|
{
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
makeSureNextTransactionIsFullyFetched( buffer );
String datasource = datasources[buffer.readUnsignedByte()];
if ( datasource == null )
{
return null;
}
long txId = buffer.readLong();
TxExtractor extractor = TxExtractor.create( new BlockLogReader( buffer ) );
return Triplet.of( datasource, txId, extractor );
}
@Override
public String[] dataSourceNames()
{
return Arrays.copyOfRange( datasources, 1, datasources.length );
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,346
|
public static class MyDSExtension implements Lifecycle
{
public static class Factory extends KernelExtensionFactory<Factory.Dependencies>
{
private final File logWriterTarget;
public interface Dependencies
{
XaDataSourceManager getXaDataSourceManager();
}
public Factory( File logWriterTarget )
{
super( "my-ds" );
this.logWriterTarget = logWriterTarget;
}
@Override
public Lifecycle newKernelExtension( Dependencies dependencies ) throws Throwable
{
return new MyDSExtension( dependencies.getXaDataSourceManager(), logWriterTarget );
}
}
private final XaDataSourceManager xaDataSourceManager;
private final File logWriterTarget;
public MyDSExtension( XaDataSourceManager xaDataSourceManager, File logWriterTarget )
{
this.xaDataSourceManager = xaDataSourceManager;
this.logWriterTarget = logWriterTarget;
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
xaDataSourceManager.registerDataSource( new MyDataSource(logWriterTarget) );
}
@Override
public void stop() throws Throwable
{
}
@Override
public void shutdown() throws Throwable
{
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,347
|
public class ThirdPartyDSStoreCopyIT
{
@Rule
public TargetDirectory.TestDirectory testDir = TargetDirectory.testDirForTest( getClass() );
private final DefaultFileSystemAbstraction fs = new DefaultFileSystemAbstraction();
@Test
public void shouldStoreLogFilesAndRunRecovery() throws Exception
{
// Given
final String copyDir = new File(testDir.directory(), "copy").getAbsolutePath();
final String originalDir = new File(testDir.directory(), "original").getAbsolutePath();
Config config = new Config( MapUtil.stringMap( store_dir.name(), copyDir ) );
RemoteStoreCopier copier = new RemoteStoreCopier( config, loadKernelExtensions(), new ConsoleLogger( StringLogger.DEV_NULL ), fs );
// When
copier.copyStore( new RemoteStoreCopier.StoreCopyRequester()
{
@Override
public Response<?> copyStore( StoreWriter writer )
{
GraphDatabaseAPI original = (GraphDatabaseAPI)new GraphDatabaseFactory()
.addKernelExtension( new MyDSExtension.Factory( new File( testDir.directory(), "irrelephant.log" ) ) )
.newEmbeddedDatabase( originalDir );
try
{
XaDataSourceManager dsManager = original.getDependencyResolver().resolveDependency(
XaDataSourceManager.class );
RequestContext ctx = ServerUtil.rotateLogsAndStreamStoreFiles( originalDir,
dsManager,
original.getDependencyResolver().resolveDependency( KernelPanicEventGenerator.class ),
StringLogger.SYSTEM, false, writer, fs,
original.getDependencyResolver().resolveDependency( Monitors.class ).newMonitor( BackupMonitor.class ) );
return ServerUtil.packResponse( original.storeId(), dsManager, ctx, null, ServerUtil.ALL );
} finally
{
original.shutdown();
}
}
@Override
public void done()
{
}
});
// Then the resulting file should contain the data we expect.
FileChannel activeLog = FileChannel.open( generatedLogFile().toPath(), READ );
ByteBuffer buffer = ByteBuffer.allocate( 512 );
activeLog.read( buffer );
activeLog.close();
buffer.flip();
assertThat( buffer.getLong(), equalTo(1338l));
assertThat( buffer.getLong(), equalTo(1339l));
}
private List<KernelExtensionFactory<?>> loadKernelExtensions()
{
List<KernelExtensionFactory<?>> kernelExtensions = new ArrayList<>();
for ( KernelExtensionFactory factory : Service.load( KernelExtensionFactory.class ) )
{
kernelExtensions.add( factory );
}
kernelExtensions.add( new MyDSExtension.Factory( generatedLogFile() ) );
return kernelExtensions;
}
private File generatedLogFile()
{
return new File(testDir.directory(), "generated.log");
}
public static class MyDataSource extends XaDataSource
{
private final File logWriterTarget;
public MyDataSource( File logWriterTarget )
{
super( "my-ds".getBytes(), "my-ds" );
this.logWriterTarget = logWriterTarget;
}
@Override
public long rotateLogicalLog() throws IOException
{
return 1337l;
}
@Override
public long getLastCommittedTxId()
{
return 1339l;
}
@Override
public LogExtractor getLogExtractor( long startTxId, long endTxIdHint ) throws IOException
{
assert startTxId == 1338l : startTxId;
assert endTxIdHint == 1339l : endTxIdHint;
// We need to mock these implementation components, since LogExtractor is not an interface.
LogExtractor.LogLoader logLoader = mock( LogExtractor.LogLoader.class );
when(logLoader.getHighestLogVersion()).thenReturn( 1l );
when(logLoader.getLogicalLogOrMyselfCommitted( anyLong(), anyLong())).thenReturn( mock( ReadableByteChannel.class) );
LogExtractor.LogPositionCache logPositionCache = mock( LogExtractor.LogPositionCache.class );
when(logPositionCache.getHeader( anyLong() )).thenReturn( 1337l );
when(logPositionCache.positionOf( 1338l )).thenReturn( new LogExtractor.TxPosition( 1, -1, 1, 0, 0 ) );
when(logPositionCache.positionOf( 1339l )).thenReturn( new LogExtractor.TxPosition( 1, -1, 1, 10, 0 ) );
return new LogExtractor( logPositionCache, logLoader, null, startTxId, endTxIdHint ){
long txCounter = 1338l;
@Override
public long extractNext( LogBuffer target ) throws IOException
{
if(txCounter <= 1339)
{
// This doesn't really matter, what we're interested in is putting something in the log
// that we can then verify in the test. This ensures the correct line of data flow is implemented,
// what the actual log format for this made up data source is makes no difference.
target.putLong( txCounter );
}
return txCounter <= 1339 ? txCounter++ : -1;
}
};
}
@Override
public XaConnection getXaConnection()
{
return null;
}
@Override
public ResourceIterator<File> listStoreFiles() throws IOException
{
return ResourceIterators.emptyResourceIterator( File.class );
}
@Override
public LogBufferFactory createLogBufferFactory()
{
return new LogBufferFactory()
{
@Override
public LogBuffer createActiveLogFile( Config config, long prevCommittedId ) throws IllegalStateException, IOException
{
FileChannel channel = FileChannel.open( logWriterTarget.toPath(), CREATE, READ, WRITE );
return new DirectLogBuffer( new StoreFileChannel( channel ), ByteBuffer.allocate(512) );
}
};
}
}
public static class MyDSExtension implements Lifecycle
{
public static class Factory extends KernelExtensionFactory<Factory.Dependencies>
{
private final File logWriterTarget;
public interface Dependencies
{
XaDataSourceManager getXaDataSourceManager();
}
public Factory( File logWriterTarget )
{
super( "my-ds" );
this.logWriterTarget = logWriterTarget;
}
@Override
public Lifecycle newKernelExtension( Dependencies dependencies ) throws Throwable
{
return new MyDSExtension( dependencies.getXaDataSourceManager(), logWriterTarget );
}
}
private final XaDataSourceManager xaDataSourceManager;
private final File logWriterTarget;
public MyDSExtension( XaDataSourceManager xaDataSourceManager, File logWriterTarget )
{
this.xaDataSourceManager = xaDataSourceManager;
this.logWriterTarget = logWriterTarget;
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
xaDataSourceManager.registerDataSource( new MyDataSource(logWriterTarget) );
}
@Override
public void stop() throws Throwable
{
}
@Override
public void shutdown() throws Throwable
{
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,348
|
{
public Response<Object> response;
@Override
public Response<?> copyStore( StoreWriter writer )
{
// Data that should be available in the store files
try ( Transaction tx = original.beginTx() )
{
original.createNode( label( "BeforeCopyBegins" ) );
tx.success();
}
XaDataSourceManager dsManager = original.getDependencyResolver().resolveDependency(
XaDataSourceManager.class );
RequestContext ctx = ServerUtil.rotateLogsAndStreamStoreFiles( originalDir,
dsManager,
original.getDependencyResolver().resolveDependency( KernelPanicEventGenerator.class ),
StringLogger.SYSTEM, false, writer, fs,
original.getDependencyResolver().resolveDependency( Monitors.class ).newMonitor(
BackupMonitor.class )
);
// Data that should be made available as part of recovery
try ( Transaction tx = original.beginTx() )
{
original.createNode( label( "AfterCopy" ) );
tx.success();
}
response = spy( ServerUtil.packResponse( original.storeId(), dsManager, ctx, null,
ServerUtil.ALL ) );
return response;
}
@Override
public void done()
{
// Ensure response is closed before this method is called
assertNotNull( response );
verify( response, times( 1 ) ).close();
}
} );
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_RemoteStoreCopierTest.java
|
4,349
|
public class RemoteStoreCopierTest
{
@Rule
public TargetDirectory.TestDirectory testDir = TargetDirectory.testDirForTest( getClass() );
private final DefaultFileSystemAbstraction fs = new DefaultFileSystemAbstraction();
@Test
public void shouldStoreLogFilesAndRunRecovery() throws Exception
{
// Given
final String copyDir = new File( testDir.directory(), "copy" ).getAbsolutePath();
final String originalDir = new File( testDir.directory(), "original" ).getAbsolutePath();
Config config = new Config( MapUtil.stringMap( store_dir.name(), copyDir ) );
RemoteStoreCopier copier = new RemoteStoreCopier( config, loadKernelExtensions(), new ConsoleLogger( StringLogger.SYSTEM ), fs );
final GraphDatabaseAPI original = (GraphDatabaseAPI)new GraphDatabaseFactory().newEmbeddedDatabase( originalDir );
// When
RemoteStoreCopier.StoreCopyRequester requester = spy( new RemoteStoreCopier.StoreCopyRequester()
{
public Response<Object> response;
@Override
public Response<?> copyStore( StoreWriter writer )
{
// Data that should be available in the store files
try ( Transaction tx = original.beginTx() )
{
original.createNode( label( "BeforeCopyBegins" ) );
tx.success();
}
XaDataSourceManager dsManager = original.getDependencyResolver().resolveDependency(
XaDataSourceManager.class );
RequestContext ctx = ServerUtil.rotateLogsAndStreamStoreFiles( originalDir,
dsManager,
original.getDependencyResolver().resolveDependency( KernelPanicEventGenerator.class ),
StringLogger.SYSTEM, false, writer, fs,
original.getDependencyResolver().resolveDependency( Monitors.class ).newMonitor(
BackupMonitor.class )
);
// Data that should be made available as part of recovery
try ( Transaction tx = original.beginTx() )
{
original.createNode( label( "AfterCopy" ) );
tx.success();
}
response = spy( ServerUtil.packResponse( original.storeId(), dsManager, ctx, null,
ServerUtil.ALL ) );
return response;
}
@Override
public void done()
{
// Ensure response is closed before this method is called
assertNotNull( response );
verify( response, times( 1 ) ).close();
}
} );
copier.copyStore( requester );
// Then
GraphDatabaseService copy = new GraphDatabaseFactory().newEmbeddedDatabase( copyDir );
try( Transaction tx = copy.beginTx() )
{
GlobalGraphOperations globalOps = GlobalGraphOperations.at( copy );
assertThat( Iterables.single( globalOps.getAllNodesWithLabel( label( "BeforeCopyBegins" ) ) ).getId(),
equalTo( 0l ) );
assertThat( Iterables.single(globalOps.getAllNodesWithLabel( label( "AfterCopy" ) )).getId(), equalTo(1l) );
tx.success();
}
finally
{
copy.shutdown();
original.shutdown();
}
verify( requester, times( 1 ) ).done();
}
private List<KernelExtensionFactory<?>> loadKernelExtensions()
{
List<KernelExtensionFactory<?>> kernelExtensions = new ArrayList<>();
for ( KernelExtensionFactory factory : Service.load( KernelExtensionFactory.class ) )
{
kernelExtensions.add( factory );
}
return kernelExtensions;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_RemoteStoreCopierTest.java
|
4,350
|
{
private int totalFiles;
@Override
public int write( String path, ReadableByteChannel data, ByteBuffer temporaryBuffer,
boolean hasData ) throws IOException
{
console.log( "Copying " + path );
int written = actual.write( path, data, temporaryBuffer, hasData );
console.log( "Copied " + path + " " + bytes( written ) );
totalFiles++;
return written;
}
@Override
public void done()
{
actual.done();
console.log( "Done, copied " + totalFiles + " files");
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_storecopy_RemoteStoreCopier.java
|
4,351
|
{
@Override
public boolean accept( File file )
{
// Skip log files and tx files from temporary database
return !file.getName().startsWith( "metrics" )
&& !file.getName().equals( StringLogger.DEFAULT_NAME )
&& !("active_tx_log tm_tx_log.1 tm_tx_log.2").contains( file.getName() );
}
} ) )
| false
|
enterprise_com_src_main_java_org_neo4j_com_storecopy_RemoteStoreCopier.java
|
4,352
|
public class RemoteStoreCopier
{
public static final String COPY_FROM_MASTER_TEMP = "temp-copy";
private final Config config;
private final Iterable<KernelExtensionFactory<?>> kernelExtensions;
private final ConsoleLogger console;
private final FileSystemAbstraction fs;
/**
* This is built as a pluggable interface to allow backup and HA to use this code independently of each other,
* each implements it's own version of how to copy a store from a remote location.
*/
public interface StoreCopyRequester
{
Response<?> copyStore(StoreWriter writer);
void done();
}
public RemoteStoreCopier( Config config, Iterable<KernelExtensionFactory<?>> kernelExtensions,
ConsoleLogger console, FileSystemAbstraction fs )
{
this.config = config;
this.kernelExtensions = kernelExtensions;
this.console = console;
this.fs = fs;
}
public void copyStore( StoreCopyRequester requester ) throws IOException
{
// Clear up the current temp directory if there
File storeDir = config.get( InternalAbstractGraphDatabase.Configuration.store_dir );
File tempStore = new File( storeDir, COPY_FROM_MASTER_TEMP );
Config tempConfig = configForTempStore( tempStore );
if ( !tempStore.mkdir() )
{
FileUtils.deleteRecursively( tempStore );
tempStore.mkdir();
}
// Request store files and transactions that will need recovery
try ( Response response = requester.copyStore( decorateWithProgressIndicator( new ToFileStoreWriter( tempStore ) ) ) )
{
// Update highest archived log id
long highestLogVersion = XaLogicalLog.getHighestHistoryLogVersion( fs, tempStore, LOGICAL_LOG_DEFAULT_NAME );
if ( highestLogVersion > -1 )
{
NeoStore.setVersion( fs, tempStore, highestLogVersion + 1 );
}
// Write pending transactions down to the currently active logical log
writeTransactionsToActiveLogFile( tempConfig, response.transactions() );
}
finally
{
requester.done();
}
// Run recovery
GraphDatabaseAPI copiedDb = newTempDatabase( tempStore );
copiedDb.shutdown();
// All is well, move to the real store directory
for ( File candidate : tempStore.listFiles( new FileFilter()
{
@Override
public boolean accept( File file )
{
// Skip log files and tx files from temporary database
return !file.getName().startsWith( "metrics" )
&& !file.getName().equals( StringLogger.DEFAULT_NAME )
&& !("active_tx_log tm_tx_log.1 tm_tx_log.2").contains( file.getName() );
}
} ) )
{
FileUtils.moveFileToDirectory( candidate, storeDir );
}
}
private Config configForTempStore( File tempStore )
{
Map<String, String> params = config.getParams();
params.put( InternalAbstractGraphDatabase.Configuration.store_dir .name(), tempStore.getAbsolutePath() );
return new Config( params );
}
private void writeTransactionsToActiveLogFile( Config tempConfig, TransactionStream transactions ) throws IOException
{
Map</*dsName*/String, LogBufferFactory> logWriters = createLogWriters( tempConfig );
Map</*dsName*/String, LogBuffer> logFiles = new HashMap<>();
try
{
while(transactions.hasNext())
{
Triplet<String,Long,TxExtractor> next = transactions.next();
LogBuffer log = getOrCreateLogBuffer( logFiles, logWriters, /*dsName*/next.first(), /*txId*/next.second(), tempConfig );
next.third().extract( log );
}
}
finally
{
for ( LogBuffer buf : logFiles.values() )
{
buf.force();
buf.getFileChannel().close();
}
}
}
private LogBuffer getOrCreateLogBuffer( Map<String, LogBuffer> buffers, Map<String, LogBufferFactory> logWriters, String dsName, Long txId, Config config )
throws IOException
{
LogBuffer buffer = buffers.get(dsName);
if(buffer == null)
{
if(logWriters.containsKey( dsName ))
{
buffer = logWriters.get( dsName ).createActiveLogFile( config, txId - 1 );
buffers.put( dsName, buffer );
}
else
{
throw new IllegalStateException( "Got transaction for unknown data source, unable to safely copy " +
"files. Offending data source was '" + dsName + "', please make sure this data source is " +
"available on the classpath." );
}
}
return buffer;
}
private Map<String, LogBufferFactory> createLogWriters( Config config ) throws IOException
{
Map<String, LogBufferFactory> writers = new HashMap<>();
File tempStore = new File( config.get( GraphDatabaseSettings.store_dir ).getAbsolutePath() + ".tmp" );
GraphDatabaseAPI db = newTempDatabase( tempStore );
try
{
XaDataSourceManager dsManager = db.getDependencyResolver().resolveDependency( XaDataSourceManager.class );
for ( XaDataSource xaDataSource : dsManager.getAllRegisteredDataSources() )
{
writers.put( xaDataSource.getName(), xaDataSource.createLogBufferFactory() );
}
return writers;
}
finally
{
db.shutdown();
FileUtils.deleteRecursively( tempStore );
tempStore.mkdirs();
}
}
private GraphDatabaseAPI newTempDatabase( File tempStore )
{
return (GraphDatabaseAPI) new GraphDatabaseFactory()
.setKernelExtensions( kernelExtensions )
.newEmbeddedDatabaseBuilder( tempStore.getAbsolutePath() )
.setConfig(
GraphDatabaseSettings.keep_logical_logs, Settings.TRUE ).setConfig(
GraphDatabaseSettings.allow_store_upgrade,
config.get( GraphDatabaseSettings.allow_store_upgrade ).toString() )
.newGraphDatabase();
}
private StoreWriter decorateWithProgressIndicator( final StoreWriter actual )
{
return new StoreWriter()
{
private int totalFiles;
@Override
public int write( String path, ReadableByteChannel data, ByteBuffer temporaryBuffer,
boolean hasData ) throws IOException
{
console.log( "Copying " + path );
int written = actual.write( path, data, temporaryBuffer, hasData );
console.log( "Copied " + path + " " + bytes( written ) );
totalFiles++;
return written;
}
@Override
public void done()
{
actual.done();
console.log( "Done, copied " + totalFiles + " files");
}
};
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_storecopy_RemoteStoreCopier.java
|
4,353
|
public class TxExtractorTest
{
@Test
public void shouldExtractToLogBuffer() throws Exception
{
// Given
InMemoryLogBuffer input = new InMemoryLogBuffer(),
output = new InMemoryLogBuffer();
input.putLong( 1337l );
TxExtractor extractor = TxExtractor.create( input );
// When
extractor.extract( output );
// Then
ByteBuffer buf = ByteBuffer.allocate( 128 );
output.read( buf );
buf.flip();
assertThat(buf.getLong(), equalTo(1337l));
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_TxExtractorTest.java
|
4,354
|
{
@Override
public ReadableByteChannel extract()
{
return data;
}
@Override
public void extract( LogBuffer buffer )
{
int transferBufSize = 128;
ByteBuffer transferBuffer = ByteBuffer.allocateDirect( transferBufSize );
try
{
for( int read; (read = data.read( transferBuffer )) > -1; )
{
transferBuffer.flip();
// byte-by-byte should be reasonably fast still, since logbuffer generally is, as the
// name implies, buffered.
while(read --> 0)
{
byte b = transferBuffer.get();
buffer.put( b );
}
transferBuffer.clear();
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_TxExtractor.java
|
4,355
|
public abstract class TxExtractor
{
public abstract void extract( LogBuffer buffer );
public abstract ReadableByteChannel extract();
public static TxExtractor create( final ReadableByteChannel data )
{
return new TxExtractor()
{
@Override
public ReadableByteChannel extract()
{
return data;
}
@Override
public void extract( LogBuffer buffer )
{
int transferBufSize = 128;
ByteBuffer transferBuffer = ByteBuffer.allocateDirect( transferBufSize );
try
{
for( int read; (read = data.read( transferBuffer )) > -1; )
{
transferBuffer.flip();
// byte-by-byte should be reasonably fast still, since logbuffer generally is, as the
// name implies, buffered.
while(read --> 0)
{
byte b = transferBuffer.get();
buffer.put( b );
}
transferBuffer.clear();
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_TxExtractor.java
|
4,356
|
{
@Override
public void assertMatch( long txId, int masterId, long checksum )
{
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_TxChecksumVerifier.java
|
4,357
|
{
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
if ( stream.hasNext() ) return stream.next();
return null;
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_TransactionStream.java
|
4,358
|
{
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
return null;
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_TransactionStream.java
|
4,359
|
public abstract class TransactionStream extends
PrefetchingIterator<Triplet<String/*datasource*/, Long/*txid*/, TxExtractor>>
{
public static final TransactionStream EMPTY = new TransactionStream()
{
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
return null;
}
};
private final String[] datasources;
public TransactionStream( String... datasources )
{
this.datasources = datasources;
}
public String[] dataSourceNames()
{
return datasources.clone();
}
public static TransactionStream create( Collection<String> datasources,
Iterable<Triplet<String, Long, TxExtractor>> streamSource )
{
final Iterator<Triplet<String, Long, TxExtractor>> stream = streamSource.iterator();
return new TransactionStream( datasources.toArray( new String[datasources.size()] ) )
{
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
if ( stream.hasNext() ) return stream.next();
return null;
}
};
}
public void close()
{
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_TransactionStream.java
|
4,360
|
public class TransactionNotPresentOnMasterException extends IllegalStateException
{
public TransactionNotPresentOnMasterException( RequestContext txId )
{
super( "Transaction " + txId + " not present on master" );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_TransactionNotPresentOnMasterException.java
|
4,361
|
public class ToChannelBufferWriter implements MadeUpWriter
{
private final ChannelBuffer target;
public ToChannelBufferWriter( ChannelBuffer target )
{
this.target = target;
}
@Override
public void write( ReadableByteChannel data )
{
BlockLogBuffer blockBuffer = new BlockLogBuffer( target, new Monitors().newMonitor( ByteCounterMonitor.class ) );
try
{
blockBuffer.write( data );
blockBuffer.done();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ToChannelBufferWriter.java
|
4,362
|
public class ToAssertionWriter implements MadeUpWriter
{
private int index;
@Override
public void write( ReadableByteChannel data )
{
ByteBuffer intermediate = ByteBuffer.allocate( 1000 );
while ( true )
{
try
{
intermediate.clear();
if ( data.read( intermediate ) == -1 )
{
break;
}
intermediate.flip();
while ( intermediate.remaining() > 0 )
{
byte value = intermediate.get();
assertEquals( (index++)%10, value );
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ToAssertionWriter.java
|
4,363
|
public class TestSlaveContext
{
@SuppressWarnings( "unchecked" )
@Test
public void assertSimilarity()
{
// Different machine ids
assertFalse( new RequestContext( 1234, 1, 2, new RequestContext.Tx[0], 0, 0 ).equals( new RequestContext( 1234, 2, 2, new RequestContext.Tx[0], 0, 0 ) ) );
// Different event identifiers
assertFalse( new RequestContext( 1234, 1, 10, new RequestContext.Tx[0], 0, 0 ).equals( new RequestContext( 1234, 1, 20, new RequestContext.Tx[0], 0, 0 ) ) );
// Different session ids
assertFalse( new RequestContext( 1001, 1, 5, new RequestContext.Tx[0], 0, 0 ).equals( new RequestContext( 1101, 1, 5, new RequestContext.Tx[0], 0, 0 ) ) );
// Same everything
assertEquals( new RequestContext( 12345, 4, 9, new RequestContext.Tx[0], 0, 0 ), new RequestContext( 12345, 4, 9, new RequestContext.Tx[0], 0, 0 ) );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_TestSlaveContext.java
|
4,364
|
class Builder
{
private final int port;
private final int chunkSize;
private final byte internalProtocolVersion;
private final byte applicationProtocolVersion;
private final TxChecksumVerifier verifier;
private final StoreId storeId;
public Builder()
{
this( PORT, FRAME_LENGTH, INTERNAL_PROTOCOL_VERSION, APPLICATION_PROTOCOL_VERSION,
ALWAYS_MATCH, storeIdToUse );
}
public Builder( int port, int chunkSize, byte internalProtocolVersion, byte applicationProtocolVersion,
TxChecksumVerifier verifier, StoreId storeId )
{
this.port = port;
this.chunkSize = chunkSize;
this.internalProtocolVersion = internalProtocolVersion;
this.applicationProtocolVersion = applicationProtocolVersion;
this.verifier = verifier;
this.storeId = storeId;
}
public Builder port( int port )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder chunkSize( int chunkSize )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder internalProtocolVersion( byte internalProtocolVersion )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder applicationProtocolVersion( byte applicationProtocolVersion )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder verifier( TxChecksumVerifier verifier )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder storeId( StoreId storeId )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public MadeUpServer server()
{
return new MadeUpServer( new MadeUpServerImplementation( storeId ), port,
internalProtocolVersion, applicationProtocolVersion, verifier, chunkSize );
}
public MadeUpServer server( MadeUpCommunicationInterface target )
{
return new MadeUpServer( target, port, internalProtocolVersion, applicationProtocolVersion, verifier, chunkSize );
}
public MadeUpClient client()
{
return new MadeUpClient( port, storeId, internalProtocolVersion, applicationProtocolVersion, chunkSize );
}
public ServerInterface serverInOtherJvm()
{
ServerInterface server = new MadeUpServerProcess().start( new StartupData(
storeId.getCreationTime(), storeId.getRandomId(),
storeId.getStoreVersion(), internalProtocolVersion,
applicationProtocolVersion, chunkSize ) );
server.awaitStarted();
return server;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_TestCommunication.java
|
4,365
|
{
@Override
public void assertMatch( long txId, int masterId, long checksum )
{
throw new FailingException( failureMessage );
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_TestCommunication.java
|
4,366
|
{
@Override
public Response<?> copyStore( StoreWriter writer )
{
GraphDatabaseAPI original = (GraphDatabaseAPI)new GraphDatabaseFactory()
.addKernelExtension( new MyDSExtension.Factory( new File( testDir.directory(), "irrelephant.log" ) ) )
.newEmbeddedDatabase( originalDir );
try
{
XaDataSourceManager dsManager = original.getDependencyResolver().resolveDependency(
XaDataSourceManager.class );
RequestContext ctx = ServerUtil.rotateLogsAndStreamStoreFiles( originalDir,
dsManager,
original.getDependencyResolver().resolveDependency( KernelPanicEventGenerator.class ),
StringLogger.SYSTEM, false, writer, fs,
original.getDependencyResolver().resolveDependency( Monitors.class ).newMonitor( BackupMonitor.class ) );
return ServerUtil.packResponse( original.storeId(), dsManager, ctx, null, ServerUtil.ALL );
} finally
{
original.shutdown();
}
}
@Override
public void done()
{
}
});
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,367
|
public static class Factory extends KernelExtensionFactory<Factory.Dependencies>
{
private final File logWriterTarget;
public interface Dependencies
{
XaDataSourceManager getXaDataSourceManager();
}
public Factory( File logWriterTarget )
{
super( "my-ds" );
this.logWriterTarget = logWriterTarget;
}
@Override
public Lifecycle newKernelExtension( Dependencies dependencies ) throws Throwable
{
return new MyDSExtension( dependencies.getXaDataSourceManager(), logWriterTarget );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,368
|
{
@SuppressWarnings( "boxing" )
public void write( Integer responseObject, ChannelBuffer result ) throws IOException
{
result.writeInt( responseObject );
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,369
|
public static class MyDataSource extends XaDataSource
{
private final File logWriterTarget;
public MyDataSource( File logWriterTarget )
{
super( "my-ds".getBytes(), "my-ds" );
this.logWriterTarget = logWriterTarget;
}
@Override
public long rotateLogicalLog() throws IOException
{
return 1337l;
}
@Override
public long getLastCommittedTxId()
{
return 1339l;
}
@Override
public LogExtractor getLogExtractor( long startTxId, long endTxIdHint ) throws IOException
{
assert startTxId == 1338l : startTxId;
assert endTxIdHint == 1339l : endTxIdHint;
// We need to mock these implementation components, since LogExtractor is not an interface.
LogExtractor.LogLoader logLoader = mock( LogExtractor.LogLoader.class );
when(logLoader.getHighestLogVersion()).thenReturn( 1l );
when(logLoader.getLogicalLogOrMyselfCommitted( anyLong(), anyLong())).thenReturn( mock( ReadableByteChannel.class) );
LogExtractor.LogPositionCache logPositionCache = mock( LogExtractor.LogPositionCache.class );
when(logPositionCache.getHeader( anyLong() )).thenReturn( 1337l );
when(logPositionCache.positionOf( 1338l )).thenReturn( new LogExtractor.TxPosition( 1, -1, 1, 0, 0 ) );
when(logPositionCache.positionOf( 1339l )).thenReturn( new LogExtractor.TxPosition( 1, -1, 1, 10, 0 ) );
return new LogExtractor( logPositionCache, logLoader, null, startTxId, endTxIdHint ){
long txCounter = 1338l;
@Override
public long extractNext( LogBuffer target ) throws IOException
{
if(txCounter <= 1339)
{
// This doesn't really matter, what we're interested in is putting something in the log
// that we can then verify in the test. This ensures the correct line of data flow is implemented,
// what the actual log format for this made up data source is makes no difference.
target.putLong( txCounter );
}
return txCounter <= 1339 ? txCounter++ : -1;
}
};
}
@Override
public XaConnection getXaConnection()
{
return null;
}
@Override
public ResourceIterator<File> listStoreFiles() throws IOException
{
return ResourceIterators.emptyResourceIterator( File.class );
}
@Override
public LogBufferFactory createLogBufferFactory()
{
return new LogBufferFactory()
{
@Override
public LogBuffer createActiveLogFile( Config config, long prevCommittedId ) throws IllegalStateException, IOException
{
FileChannel channel = FileChannel.open( logWriterTarget.toPath(), CREATE, READ, WRITE );
return new DirectLogBuffer( new StoreFileChannel( channel ), ByteBuffer.allocate(512) );
}
};
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,370
|
public class ConsistencyCheckToolTest
{
@Test
public void runsConsistencyCheck() throws Exception
{
// given
String storeDirectoryPath = storeDirectory.directory().getPath();
String[] args = {storeDirectoryPath};
ConsistencyCheckService service = mock( ConsistencyCheckService.class );
PrintStream systemError = mock( PrintStream.class );
// when
new ConsistencyCheckTool( service, systemError ).run( args );
// then
verify( service ).runFullConsistencyCheck( eq( storeDirectoryPath ), any( Config.class ),
any( ProgressMonitorFactory.class ), any( StringLogger.class ) );
}
@Test
public void appliesDefaultTuningConfigurationForConsistencyChecker() throws Exception
{
// given
String[] args = {storeDirectory.directory().getPath()};
ConsistencyCheckService service = mock( ConsistencyCheckService.class );
PrintStream systemOut = mock( PrintStream.class );
// when
new ConsistencyCheckTool( service, systemOut ).run( args );
// then
ArgumentCaptor<Config> config = ArgumentCaptor.forClass( Config.class );
verify( service ).runFullConsistencyCheck( anyString(), config.capture(),
any( ProgressMonitorFactory.class ), any( StringLogger.class ));
assertFalse( config.getValue().get( ConsistencyCheckSettings.consistency_check_property_owners ) );
assertEquals( TaskExecutionOrder.MULTI_PASS,
config.getValue().get( ConsistencyCheckSettings.consistency_check_execution_order ) );
WindowPoolImplementation expectedPoolImplementation = !osIsWindows() ?
WindowPoolImplementation.SCAN_RESISTANT :
WindowPoolImplementation.MOST_FREQUENTLY_USED;
assertEquals( expectedPoolImplementation,
config.getValue().get( ConsistencyCheckSettings.consistency_check_window_pool_implementation ) );
}
@Test
public void passesOnConfigurationIfProvided() throws Exception
{
// given
File propertyFile = TargetDirectory.forTest( getClass() ).file( "neo4j.properties" );
Properties properties = new Properties();
properties.setProperty( ConsistencyCheckSettings.consistency_check_property_owners.name(), "true" );
properties.store( new FileWriter( propertyFile ), null );
String[] args = {storeDirectory.directory().getPath(), "-config", propertyFile.getPath()};
ConsistencyCheckService service = mock( ConsistencyCheckService.class );
PrintStream systemOut = mock( PrintStream.class );
// when
new ConsistencyCheckTool( service, systemOut ).run( args );
// then
ArgumentCaptor<Config> config = ArgumentCaptor.forClass( Config.class );
verify( service ).runFullConsistencyCheck( anyString(), config.capture(),
any( ProgressMonitorFactory.class ), any( StringLogger.class ));
assertTrue( config.getValue().get( ConsistencyCheckSettings.consistency_check_property_owners ) );
}
@Test
public void exitWithFailureIndicatingCorrectUsageIfNoArgumentsSupplied() throws Exception
{
// given
ConsistencyCheckService service = mock( ConsistencyCheckService.class );
String[] args = {};
PrintStream systemError = mock( PrintStream.class );
try
{
// when
new ConsistencyCheckTool( service, systemError ).run( args );
fail( "should have thrown exception" );
}
catch ( ConsistencyCheckTool.ToolFailureException e )
{
// then
assertThat( e.getMessage(), containsString( "USAGE:" ));
}
}
@Test
public void exitWithFailureIfConfigSpecifiedButPropertiesFileDoesNotExist() throws Exception
{
// given
File propertyFile = TargetDirectory.forTest( getClass() ).file( "nonexistent_file" );
String[] args = {storeDirectory.directory().getPath(), "-config", propertyFile.getPath()};
ConsistencyCheckService service = mock( ConsistencyCheckService.class );
PrintStream systemOut = mock( PrintStream.class );
ConsistencyCheckTool ConsistencyCheckTool = new ConsistencyCheckTool( service, systemOut );
try
{
// when
ConsistencyCheckTool.run( args );
fail( "should have thrown exception" );
}
catch ( ConsistencyCheckTool.ToolFailureException e )
{
// then
assertThat( e.getMessage(), containsString( "Could not read configuration properties file" ) );
assertThat( e.getCause(), instanceOf( IOException.class ) );
}
verifyZeroInteractions( service );
}
@Rule
public TargetDirectory.TestDirectory storeDirectory = TargetDirectory.testDirForTest( getClass() );
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_ConsistencyCheckToolTest.java
|
4,371
|
static class ToolFailureException extends Exception
{
ToolFailureException( String message )
{
super( message );
}
ToolFailureException( String message, Throwable cause )
{
super( message, cause );
}
void haltJVM()
{
System.err.println( getMessage() );
if ( getCause() != null )
{
getCause().printStackTrace( System.err );
}
System.exit( 1 );
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_ConsistencyCheckTool.java
|
4,372
|
public class ConsistencyCheckTool
{
private static final String RECOVERY = "recovery";
private static final String CONFIG = "config";
public static void main( String[] args )
{
ConsistencyCheckTool tool = new ConsistencyCheckTool( new ConsistencyCheckService(), System.err );
try
{
tool.run( args );
}
catch ( ToolFailureException e )
{
e.haltJVM();
}
}
private final ConsistencyCheckService consistencyCheckService;
private final PrintStream systemError;
ConsistencyCheckTool( ConsistencyCheckService consistencyCheckService, PrintStream systemError )
{
this.consistencyCheckService = consistencyCheckService;
this.systemError = systemError;
}
void run( String... args ) throws ToolFailureException
{
Args arguments = new Args( args );
String storeDir = determineStoreDirectory( arguments );
Config tuningConfiguration = readTuningConfiguration( storeDir, arguments );
attemptRecoveryOrCheckStateOfLogicalLogs( arguments, storeDir );
StringLogger logger = StringLogger.SYSTEM;
try
{
consistencyCheckService.runFullConsistencyCheck( storeDir, tuningConfiguration,
ProgressMonitorFactory.textual( System.err ), logger );
}
catch ( ConsistencyCheckIncompleteException e )
{
throw new ToolFailureException( "Check aborted due to exception", e );
}
finally
{
logger.flush();
}
}
private void attemptRecoveryOrCheckStateOfLogicalLogs( Args arguments, String storeDir )
{
if ( arguments.getBoolean( RECOVERY, false, true ) )
{
new GraphDatabaseFactory().newEmbeddedDatabase( storeDir ).shutdown();
}
else
{
XaLogicalLogFiles logFiles = new XaLogicalLogFiles(
new File( storeDir, NeoStoreXaDataSource.LOGICAL_LOG_DEFAULT_NAME ),
new DefaultFileSystemAbstraction() );
try
{
switch ( logFiles.determineState() )
{
case LEGACY_WITHOUT_LOG_ROTATION:
systemError.println( "WARNING: store contains log file from too old version." );
break;
case NO_ACTIVE_FILE:
case CLEAN:
break;
default:
systemError.print( lines(
"Active logical log detected, this might be a source of inconsistencies.",
"Consider allowing the database to recover before running the consistency check.",
"Consistency checking will continue, abort if you wish to perform recovery first.",
"To perform recovery before checking consistency, use the '--recovery' flag." )
);
}
}
catch ( IOException e )
{
systemError.printf( "Failure when checking for active logs: '%s', continuing as normal.%n", e );
}
}
}
private String determineStoreDirectory( Args arguments ) throws ToolFailureException
{
List<String> unprefixedArguments = arguments.orphans();
if ( unprefixedArguments.size() != 1 )
{
throw new ToolFailureException( usage() );
}
String storeDir = unprefixedArguments.get( 0 );
if ( !new File( storeDir ).isDirectory() )
{
throw new ToolFailureException( lines( String.format( "'%s' is not a directory", storeDir ) ) + usage() );
}
return storeDir;
}
private Config readTuningConfiguration( String storeDir, Args arguments ) throws ToolFailureException
{
Map<String, String> specifiedProperties = stringMap();
String propertyFilePath = arguments.get( CONFIG, null );
if ( propertyFilePath != null )
{
File propertyFile = new File( propertyFilePath );
try
{
specifiedProperties = MapUtil.load( propertyFile );
}
catch ( IOException e )
{
throw new ToolFailureException( String.format( "Could not read configuration properties file [%s]",
propertyFilePath ), e );
}
}
specifiedProperties.put( GraphDatabaseSettings.store_dir.name(), storeDir );
return new Config( specifiedProperties, GraphDatabaseSettings.class, ConsistencyCheckSettings.class );
}
private String usage()
{
return lines(
Args.jarUsage( getClass(), "[-propowner] [-recovery] [-config <neo4j.properties>] <storedir>" ),
"WHERE: <storedir> is the path to the store to check",
" -recovery to perform recovery on the store before checking",
" <neo4j.properties> is the location of an optional properties file",
" containing tuning parameters for the consistency check"
);
}
private static String lines( String... content )
{
StringBuilder result = new StringBuilder();
for ( String line : content )
{
result.append( line ).append( System.getProperty( "line.separator" ) );
}
return result.toString();
}
static class ToolFailureException extends Exception
{
ToolFailureException( String message )
{
super( message );
}
ToolFailureException( String message, Throwable cause )
{
super( message, cause );
}
void haltJVM()
{
System.err.println( getMessage() );
if ( getCause() != null )
{
getCause().printStackTrace( System.err );
}
System.exit( 1 );
}
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_ConsistencyCheckTool.java
|
4,373
|
public class ConsistencyCheckSettings
{
@Description("Perform optional additional checking on property ownership. " +
"This can detect a theoretical inconsistency where a property could be owned by multiple entities. " +
"However, but the check is very expensive in time and memory, so it is skipped by default.")
public static final Setting<Boolean> consistency_check_property_owners = setting( "consistency_check_property_owners", BOOLEAN, FALSE );
@Description("Perform checks on the label scan store. Checking this store is more expensive than " +
"checking the native stores, so it may be useful to turn off this check for very large databases.")
public static final Setting<Boolean> consistency_check_label_scan_store = setting( "consistency_check_label_scan_store", BOOLEAN, TRUE );
@Description("Perform checks on indexes. Checking indexes is more expensive than " +
"checking the native stores, so it may be useful to turn off this check for very large databases.")
public static final Setting<Boolean> consistency_check_indexes = setting( "consistency_check_indexes", BOOLEAN, TRUE );
@Description("Window pool implementation to be used when running consistency check")
public static final Setting<TaskExecutionOrder> consistency_check_execution_order =
setting( "consistency_check_execution_order", options( TaskExecutionOrder.class ), TaskExecutionOrder.MULTI_PASS.name() );
// On Windows there are problems with memory (un)mapping files, involving
// relying on GC for unmapping which is error prone. So default back to
// the a window pool that can switch off memory mapping.
@Description("Window pool implementation to be used when running consistency check")
public static final Setting<WindowPoolImplementation> consistency_check_window_pool_implementation =
setting( "consistency_check_window_pool_implementation", options( WindowPoolImplementation.class ), osIsWindows() ? WindowPoolImplementation.MOST_FREQUENTLY_USED.name() : WindowPoolImplementation.SCAN_RESISTANT.name());
@SuppressWarnings("unchecked")
@Description("File name for inconsistencies log file. If not specified, logs to a file in the store directory.")
public static final
Setting<File> consistency_check_report_file = setting( "consistency_check_report_file", PATH, NO_DEFAULT, basePath( GraphDatabaseSettings.store_dir ));
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_ConsistencyCheckSettings.java
|
4,374
|
{
@Override
protected void generateInitialData( GraphDatabaseService graphDb )
{
org.neo4j.graphdb.Transaction tx = graphDb.beginTx();
try
{
Node node1 = set( graphDb.createNode() );
Node node2 = set( graphDb.createNode(), property( "key", "value" ) );
node1.createRelationshipTo( node2, DynamicRelationshipType.withName( "C" ) );
tx.success();
}
finally
{
tx.finish();
}
}
};
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_ConsistencyCheckServiceIntegrationTest.java
|
4,375
|
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
tx.create( new NodeRecord( next.node(), next.relationship(), -1 ) );
}
} );
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_ConsistencyCheckServiceIntegrationTest.java
|
4,376
|
public class ConsistencyCheckServiceIntegrationTest
{
@Test
public void shouldSucceedIfStoreIsConsistent() throws Exception
{
// given
ConsistencyCheckService service = new ConsistencyCheckService();
// when
ConsistencyCheckService.Result result = service.runFullConsistencyCheck( fixture.directory().getPath(),
new Config( stringMap( ), GraphDatabaseSettings.class, ConsistencyCheckSettings.class ),
ProgressMonitorFactory.NONE, StringLogger.DEV_NULL );
// then
assertEquals( ConsistencyCheckService.Result.SUCCESS, result );
File reportFile = new File( fixture.directory(), service.defaultLogFileName() );
assertFalse( "Inconsistency report file " + reportFile + " not generated", reportFile.exists() );
}
@Test
public void shouldFailIfTheStoreInNotConsistent() throws Exception
{
// given
breakNodeStore();
ConsistencyCheckService service = new ConsistencyCheckService();
// when
ConsistencyCheckService.Result result = service.runFullConsistencyCheck( fixture.directory().getPath(),
new Config( stringMap(), GraphDatabaseSettings.class, ConsistencyCheckSettings.class ),
ProgressMonitorFactory.NONE, StringLogger.DEV_NULL );
// then
assertEquals( ConsistencyCheckService.Result.FAILURE, result );
File reportFile = new File(fixture.directory(), service.defaultLogFileName());
assertTrue( "Inconsistency report file " + reportFile + " not generated", reportFile.exists() );
}
@Test
public void shouldWriteInconsistenciesToLogFileAtSpecifiedLocation() throws Exception
{
// given
breakNodeStore();
ConsistencyCheckService service = new ConsistencyCheckService();
File specificLogFile = new File( testDirectory.directory(), "specific_logfile.txt" );
// when
service.runFullConsistencyCheck( fixture.directory().getPath(),
new Config( stringMap( ConsistencyCheckSettings.consistency_check_report_file.name(),specificLogFile.getPath()),
GraphDatabaseSettings.class, ConsistencyCheckSettings.class ),
ProgressMonitorFactory.NONE, StringLogger.DEV_NULL );
// then
assertTrue( "Inconsistency report file " + specificLogFile + " not generated", specificLogFile.exists() );
}
private void breakNodeStore() throws IOException
{
fixture.apply( new GraphStoreFixture.Transaction()
{
@Override
protected void transactionData( GraphStoreFixture.TransactionDataBuilder tx,
GraphStoreFixture.IdGenerator next )
{
tx.create( new NodeRecord( next.node(), next.relationship(), -1 ) );
}
} );
}
@Rule
public final GraphStoreFixture fixture = new GraphStoreFixture()
{
@Override
protected void generateInitialData( GraphDatabaseService graphDb )
{
org.neo4j.graphdb.Transaction tx = graphDb.beginTx();
try
{
Node node1 = set( graphDb.createNode() );
Node node2 = set( graphDb.createNode(), property( "key", "value" ) );
node1.createRelationshipTo( node2, DynamicRelationshipType.withName( "C" ) );
tx.success();
}
finally
{
tx.finish();
}
}
};
@Rule
public final TargetDirectory.TestDirectory testDirectory = TargetDirectory.testDirForTest( getClass() );
}
| false
|
enterprise_consistency-check_src_test_java_org_neo4j_consistency_ConsistencyCheckServiceIntegrationTest.java
|
4,377
|
public class ConsistencyCheckService
{
private final Date timestamp = new Date();
public Result runFullConsistencyCheck( String storeDir,
Config tuningConfiguration,
ProgressMonitorFactory progressFactory,
StringLogger logger ) throws ConsistencyCheckIncompleteException
{
Map<String, String> params = tuningConfiguration.getParams();
params.put( GraphDatabaseSettings.store_dir.name(), storeDir );
tuningConfiguration.applyChanges( params );
DefaultFileSystemAbstraction fileSystem = new DefaultFileSystemAbstraction();
StoreFactory factory = new StoreFactory(
tuningConfiguration,
new DefaultIdGeneratorFactory(),
tuningConfiguration.get( ConsistencyCheckSettings.consistency_check_window_pool_implementation )
.windowPoolFactory( tuningConfiguration, logger ), fileSystem, logger,
new DefaultTxHook() );
ConsistencySummaryStatistics summary;
File reportFile = chooseReportPath( tuningConfiguration );
StringLogger report = StringLogger.lazyLogger( reportFile );
NeoStore neoStore = factory.newNeoStore( new File( storeDir, NeoStore.DEFAULT_NAME ) );
try
{
neoStore.makeStoreOk();
StoreAccess store = new StoreAccess( neoStore );
LabelScanStore labelScanStore = null;
try {
labelScanStore =
new LuceneLabelScanStoreBuilder( storeDir, store.getRawNeoStore(), fileSystem, logger ).build();
SchemaIndexProvider indexes = new LuceneSchemaIndexProvider( DirectoryFactory.PERSISTENT, tuningConfiguration );
DirectStoreAccess stores = new DirectStoreAccess( store, labelScanStore, indexes );
summary = new FullCheck( tuningConfiguration, progressFactory )
.execute( stores, StringLogger.tee( logger, report ) );
}
finally
{
try
{
if ( null != labelScanStore )
{
labelScanStore.shutdown();
}
}
catch ( IOException e )
{
logger.error( "Faiure during shutdown of label scan store", e );
}
}
}
finally
{
report.close();
neoStore.close();
}
if ( !summary.isConsistent() )
{
logger.logMessage( String.format( "See '%s' for a detailed consistency report.", reportFile.getPath() ) );
return Result.FAILURE;
}
else
{
return Result.SUCCESS;
}
}
private File chooseReportPath( Config tuningConfiguration )
{
File reportPath = tuningConfiguration.get( ConsistencyCheckSettings.consistency_check_report_file );
File reportFile;
if ( reportPath == null )
{
reportFile = new File( tuningConfiguration.get( GraphDatabaseSettings.store_dir ), defaultLogFileName() );
} else
{
if ( reportPath.isDirectory() )
{
reportFile = new File( reportPath, defaultLogFileName() );
}
else
{
reportFile = reportPath;
}
}
return reportFile;
}
String defaultLogFileName()
{
return String.format( "inconsistencies-%s.report",
new SimpleDateFormat( "yyyy-MM-dd.HH.mm.ss" ).format( timestamp ) );
}
public enum Result
{
FAILURE( false ), SUCCESS( true );
private boolean successful;
private Result( boolean successful )
{
this.successful = successful;
}
public boolean isSuccessful()
{
return this.successful;
}
}
}
| false
|
enterprise_consistency-check_src_main_java_org_neo4j_consistency_ConsistencyCheckService.java
|
4,378
|
@SuppressWarnings( "serial" )
private static class IndexTask implements Task
{
@Override
public void run( final GraphDatabaseAPI graphdb )
{
try ( Transaction tx = graphdb.beginTx() )
{
index( graphdb.index().forNodes( "name" ), graphdb.createNode() );
tx.success();
}
finally
{
done();
}
}
private void index( Index<Node> index, Node node )
{
enterIndex();
index.add( node, getClass().getSimpleName(), Thread.currentThread().getName() );
}
protected void enterIndex()
{
// override
}
protected void done()
{
// override
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,379
|
{
@Override
public void run()
{
runTask( graphdb );
}
}.start();
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,380
|
@SuppressWarnings( "serial" )
private static class BreakTask extends IndexTask
{
@Override
public void run( final GraphDatabaseAPI graphdb )
{
new Thread()
{
@Override
public void run()
{
runTask( graphdb );
}
}.start();
}
void runTask( GraphDatabaseAPI graphdb )
{
super.run( graphdb );
}
@Override
protected void enterIndex()
{
breakpoint1();
}
@Override
protected void done()
{
breakpoint2();
}
private void breakpoint1()
{
// the debugger will break here
}
private void breakpoint2()
{
// the debugger will break here
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,381
|
{
@Override
protected void callback( DebugInterface debug )
{
resume( shutdownThread.getAndSet( null ) );
last.countDown();
}
}.enable() };
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,382
|
{
@Override
protected void callback( DebugInterface debug )
{
indexThread.set( debug.thread().suspend( this ) );
restart.countDown();
}
}.enable(), new BreakPoint( BreakTask.class, "breakpoint2" )
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,383
|
{
@Override
protected void callback( DebugInterface debug )
{
if ( debug.matchCallingMethod( 1, LuceneDataSource.class, null ) )
{
shutdownThread.set( debug.thread().suspend( this ) );
resume( indexThread.getAndSet( null ) );
this.disable();
}
}
@Override
public void deadlock( DebuggedThread thread )
{
shutdownThread.set( null );
thread.resume();
}
}.enable(), new BreakPoint( BreakTask.class, "breakpoint1" )
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,384
|
@Ignore( "This test has to be rewritten such that it injects a TxManager.Monitor into the database," +
" and uses that to observe that the shut down will wait for the IndexWriter to be closed. " +
"We should also add test cases for the new schema indexes and constraint indexes." )
public class ShutdownRaceIT extends AbstractSubProcessTestBase
{
private final CountDownLatch restart = new CountDownLatch( 1 ), last = new CountDownLatch( 1 );
@Test
public void canHaveShutdownWhileAccessingIndexWriters() throws Exception
{
run( new IndexTask() );
run( new BreakTask() );
restart.await();
restart();
last.await();
run( new IndexTask() );
}
@Override
protected BreakPoint[] breakpoints( int id )
{
final AtomicReference<DebuggedThread> shutdownThread = new AtomicReference<DebuggedThread>(), indexThread = new AtomicReference<DebuggedThread>();
return new BreakPoint[] { new BreakPoint( XaContainer.class, "close" )
{
@Override
protected void callback( DebugInterface debug )
{
if ( debug.matchCallingMethod( 1, LuceneDataSource.class, null ) )
{
shutdownThread.set( debug.thread().suspend( this ) );
resume( indexThread.getAndSet( null ) );
this.disable();
}
}
@Override
public void deadlock( DebuggedThread thread )
{
shutdownThread.set( null );
thread.resume();
}
}.enable(), new BreakPoint( BreakTask.class, "breakpoint1" )
{
@Override
protected void callback( DebugInterface debug )
{
indexThread.set( debug.thread().suspend( this ) );
restart.countDown();
}
}.enable(), new BreakPoint( BreakTask.class, "breakpoint2" )
{
@Override
protected void callback( DebugInterface debug )
{
resume( shutdownThread.getAndSet( null ) );
last.countDown();
}
}.enable() };
}
static void resume( DebuggedThread thread )
{
if ( thread != null ) thread.resume();
}
@SuppressWarnings( "serial" )
private static class IndexTask implements Task
{
@Override
public void run( final GraphDatabaseAPI graphdb )
{
try ( Transaction tx = graphdb.beginTx() )
{
index( graphdb.index().forNodes( "name" ), graphdb.createNode() );
tx.success();
}
finally
{
done();
}
}
private void index( Index<Node> index, Node node )
{
enterIndex();
index.add( node, getClass().getSimpleName(), Thread.currentThread().getName() );
}
protected void enterIndex()
{
// override
}
protected void done()
{
// override
}
}
@SuppressWarnings( "serial" )
private static class BreakTask extends IndexTask
{
@Override
public void run( final GraphDatabaseAPI graphdb )
{
new Thread()
{
@Override
public void run()
{
runTask( graphdb );
}
}.start();
}
void runTask( GraphDatabaseAPI graphdb )
{
super.run( graphdb );
}
@Override
protected void enterIndex()
{
breakpoint1();
}
@Override
protected void done()
{
breakpoint2();
}
private void breakpoint1()
{
// the debugger will break here
}
private void breakpoint2()
{
// the debugger will break here
}
}
}
| false
|
community_lucene-index_src_test_java_org_neo4j_concurrencytest_ShutdownRaceIT.java
|
4,385
|
public class ToNetworkStoreWriter implements StoreWriter
{
private final ChannelBuffer targetBuffer;
private final ByteCounterMonitor bufferMonitor;
public ToNetworkStoreWriter( ChannelBuffer targetBuffer, Monitors monitors )
{
this.targetBuffer = targetBuffer;
bufferMonitor = monitors.newMonitor( ByteCounterMonitor.class, getClass(), "storeCopier" );
}
@Override
public int write( String path, ReadableByteChannel data, ByteBuffer temporaryBuffer,
boolean hasData ) throws IOException
{
char[] chars = path.toCharArray();
targetBuffer.writeShort( chars.length );
Protocol.writeChars( targetBuffer, chars );
targetBuffer.writeByte( hasData ? 1 : 0 );
// TODO Make use of temporaryBuffer?
BlockLogBuffer buffer = new BlockLogBuffer( targetBuffer, bufferMonitor );
int totalWritten = 2 + chars.length*2 + 1;
if ( hasData )
{
totalWritten += buffer.write( data );
buffer.done();
}
return totalWritten;
}
@Override
public void done()
{
targetBuffer.writeShort( 0 );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_storecopy_ToNetworkStoreWriter.java
|
4,386
|
public class ToFileStoreWriter implements StoreWriter
{
private final File basePath;
public ToFileStoreWriter( File graphDbStoreDir )
{
this.basePath = graphDbStoreDir;
}
@Override
public int write( String path, ReadableByteChannel data, ByteBuffer temporaryBuffer,
boolean hasData ) throws IOException
{
try
{
temporaryBuffer.clear();
File file = new File( basePath, path );
RandomAccessFile randomAccessFile = null;
try
{
file.getParentFile().mkdirs();
randomAccessFile = new RandomAccessFile( file, "rw" );
int totalWritten = 0;
if ( hasData )
{
FileChannel channel = randomAccessFile.getChannel();
while ( data.read( temporaryBuffer ) >= 0 )
{
temporaryBuffer.flip();
totalWritten += temporaryBuffer.limit();
channel.write( temporaryBuffer );
temporaryBuffer.clear();
}
}
return totalWritten;
}
finally
{
if ( randomAccessFile != null )
{
randomAccessFile.close();
}
}
}
catch ( Throwable t )
{
throw new IOException( t );
}
}
@Override
public void done()
{
// Do nothing
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_storecopy_ToFileStoreWriter.java
|
4,387
|
{
@Override
public LogBuffer createActiveLogFile( Config config, long prevCommittedId ) throws IllegalStateException, IOException
{
FileChannel channel = FileChannel.open( logWriterTarget.toPath(), CREATE, READ, WRITE );
return new DirectLogBuffer( new StoreFileChannel( channel ), ByteBuffer.allocate(512) );
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,388
|
return new LogExtractor( logPositionCache, logLoader, null, startTxId, endTxIdHint ){
long txCounter = 1338l;
@Override
public long extractNext( LogBuffer target ) throws IOException
{
if(txCounter <= 1339)
{
// This doesn't really matter, what we're interested in is putting something in the log
// that we can then verify in the test. This ensures the correct line of data flow is implemented,
// what the actual log format for this made up data source is makes no difference.
target.putLong( txCounter );
}
return txCounter <= 1339 ? txCounter++ : -1;
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_storecopy_ThirdPartyDSStoreCopyIT.java
|
4,389
|
{
@Override
public Response<Void> fetchDataStream( MadeUpWriter writer, int dataSize )
{
writer.write( new FailingByteChannel( dataSize, failureMessage ) );
return new Response<Void>( null, storeIdToUse,
TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_TestCommunication.java
|
4,390
|
public class TestCommunication
{
private static final byte INTERNAL_PROTOCOL_VERSION = 0;
private static final byte APPLICATION_PROTOCOL_VERSION = 0;
private static final int PORT = 1234;
private StoreId storeIdToUse;
private LifeSupport life = new LifeSupport();
private Builder builder;
@Before
public void doBefore()
{
storeIdToUse = new StoreId();
builder = new Builder();
}
@After
public void shutdownLife()
{
life.shutdown();
}
@Test
public void clientGetResponseFromServerViaComLayer() throws Throwable
{
MadeUpServerImplementation serverImplementation = new MadeUpServerImplementation( storeIdToUse );
MadeUpServer server = builder.server( serverImplementation );
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
int value1 = 10;
int value2 = 5;
Response<Integer> response = client.multiply( 10, 5 );
waitUntilResponseHasBeenWritten( server, 1000 );
assertEquals( (Integer) (value1 * value2), response.response() );
assertTrue( serverImplementation.gotCalled() );
assertTrue( server.responseHasBeenWritten() );
}
private void waitUntilResponseHasBeenWritten( MadeUpServer server, int maxTime ) throws Exception
{
long time = currentTimeMillis();
while ( !server.responseHasBeenWritten() && currentTimeMillis()-time < maxTime )
{
Thread.sleep( 50 );
}
}
@Test( expected = MismatchingStoreIdException.class )
public void makeSureClientStoreIdsMustMatch() throws Throwable
{
MadeUpServer server = builder.server();
MadeUpClient client = builder.storeId( new StoreId( 10, 10, versionStringToLong( ALL_STORES_VERSION ) ) ).client();
life.add( server );
life.add( client );
life.start();
client.multiply( 1, 2 );
}
@Test( expected = MismatchingStoreIdException.class )
public void makeSureServerStoreIdsMustMatch() throws Throwable
{
MadeUpServer server = builder.storeId( new StoreId( 10, 10, versionStringToLong( ALL_STORES_VERSION ) ) ).server();
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
client.multiply( 1, 2 );
}
@Test
public void makeSureClientCanStreamBigData() throws Throwable
{
MadeUpServer server = builder.server();
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
client.fetchDataStream( new ToAssertionWriter(), FRAME_LENGTH*3 );
}
@Test
public void clientThrowsServerSideErrorMidwayThroughStreaming() throws Throwable
{
final String failureMessage = "Just failing";
MadeUpServerImplementation serverImplementation = new MadeUpServerImplementation( storeIdToUse )
{
@Override
public Response<Void> fetchDataStream( MadeUpWriter writer, int dataSize )
{
writer.write( new FailingByteChannel( dataSize, failureMessage ) );
return new Response<Void>( null, storeIdToUse,
TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
};
MadeUpServer server = builder.server( serverImplementation );
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
try
{
client.fetchDataStream( new ToAssertionWriter(), FRAME_LENGTH*2 );
fail( "Should have thrown " + MadeUpException.class.getSimpleName() );
}
catch ( MadeUpException e )
{
assertEquals( failureMessage, e.getMessage() );
}
}
@Test
public void communicateBetweenJvms() throws Throwable
{
ServerInterface server = builder.serverInOtherJvm();
server.awaitStarted();
MadeUpClient client = builder.port( MadeUpServerProcess.PORT ).client();
life.add( client );
life.start();
assertEquals( (Integer)(9*5), client.multiply( 9, 5 ).response() );
client.fetchDataStream( new ToAssertionWriter(), 1024*1024*3 );
server.shutdown();
}
@Test
public void throwingServerSideExceptionBackToClient() throws Throwable
{
MadeUpServer server = builder.server();
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
String exceptionMessage = "The message";
try
{
client.throwException( exceptionMessage );
fail( "Should have thrown " + MadeUpException.class.getSimpleName() );
}
catch ( MadeUpException e )
{ // Good
assertEquals( exceptionMessage, e.getMessage() );
}
}
@Test
public void applicationProtocolVersionsMustMatch() throws Throwable
{
MadeUpServer server = builder.applicationProtocolVersion( (byte) (APPLICATION_PROTOCOL_VERSION+1) ).server();
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
try
{
client.multiply( 10, 20 );
fail( "Shouldn't be able to communicate with different application protocol versions" );
}
catch ( IllegalProtocolVersionException e ) { /* Good */ }
}
@Test
public void applicationProtocolVersionsMustMatchMultiJvm() throws Throwable
{
ServerInterface server = builder.applicationProtocolVersion( (byte)(APPLICATION_PROTOCOL_VERSION+1) ).serverInOtherJvm();
server.awaitStarted();
MadeUpClient client = builder.port( MadeUpServerProcess.PORT ).client();
life.add( client );
life.start();
try
{
client.multiply( 10, 20 );
fail( "Shouldn't be able to communicate with different application protocol versions" );
}
catch ( IllegalProtocolVersionException e ) { /* Good */ }
server.shutdown();
}
@Test
public void internalProtocolVersionsMustMatch() throws Throwable
{
MadeUpServer server = builder.internalProtocolVersion( (byte) 1 ).server();
MadeUpClient client = builder.internalProtocolVersion( (byte) 2 ).client();
life.add( server );
life.add( client );
life.start();
try
{
client.multiply( 10, 20 );
fail( "Shouldn't be able to communicate with different application protocol versions" );
}
catch ( IllegalProtocolVersionException e ) { /* Good */ }
}
@Test
public void internalProtocolVersionsMustMatchMultiJvm() throws Throwable
{
ServerInterface server = builder.internalProtocolVersion( (byte) 1 ).serverInOtherJvm();
server.awaitStarted();
MadeUpClient client = builder.port( MadeUpServerProcess.PORT ).internalProtocolVersion( (byte) 2 ).client();
life.add( client );
life.start();
try
{
client.multiply( 10, 20 );
fail( "Shouldn't be able to communicate with different application protocol versions" );
}
catch ( IllegalProtocolVersionException e ) { /* Good */ }
server.shutdown();
}
@Test
@Ignore("getting build back to green")
public void serverStopsStreamingToDeadClient() throws Throwable
{
MadeUpServer server = builder.server();
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
int failAtSize = FRAME_LENGTH*2;
ClientCrashingWriter writer = new ClientCrashingWriter( client, failAtSize );
try
{
client.fetchDataStream( writer, FRAME_LENGTH*10 );
fail( "Should fail in the middle" );
}
catch ( ComException e )
{ // Expected
}
assertTrue( writer.getSizeRead() >= failAtSize );
long maxWaitUntil = System.currentTimeMillis()+2*1000;
while ( !server.responseFailureEncountered() && System.currentTimeMillis() < maxWaitUntil ) yield();
assertTrue( "Failure writing the response should have been encountered", server.responseFailureEncountered() );
assertFalse( "Response shouldn't have been successful", server.responseHasBeenWritten() );
}
@Test
public void serverContextVerificationCanThrowException() throws Throwable
{
final String failureMessage = "I'm failing";
TxChecksumVerifier failingVerifier = new TxChecksumVerifier()
{
@Override
public void assertMatch( long txId, int masterId, long checksum )
{
throw new FailingException( failureMessage );
}
};
MadeUpServer server = builder.verifier( failingVerifier ).server();
MadeUpClient client = builder.client();
life.add( server );
life.add( client );
life.start();
try
{
client.multiply( 10, 5 );
fail( "Should have failed" );
}
catch ( Exception e )
{ // Good
// TODO catch FailingException instead of Exception and make Server throw the proper
// one instead of getting a "channel closed".
}
}
@Test
public void clientCanReadChunkSizeBiggerThanItsOwn() throws Throwable
{ // Given that frameLength is the same for both client and server.
int serverChunkSize = 20000;
int clientChunkSize = serverChunkSize/10;
MadeUpServer server = builder.chunkSize( serverChunkSize ).server();
MadeUpClient client = builder.chunkSize( clientChunkSize ).client();
life.add( server );
life.add( client );
life.start();
// Tell server to stream data occupying roughly two chunks. The chunks
// from server are 10 times bigger than the clients chunk size.
client.fetchDataStream( new ToAssertionWriter(), serverChunkSize*2 );
}
@Test
public void serverCanReadChunkSizeBiggerThanItsOwn() throws Throwable
{ // Given that frameLength is the same for both client and server.
int serverChunkSize = 1000;
int clientChunkSize = serverChunkSize*10;
MadeUpServer server = builder.chunkSize( serverChunkSize ).server();
MadeUpClient client = builder.chunkSize( clientChunkSize ).client();
life.add( server );
life.add( client );
life.start();
// Tell server to stream data occupying roughly two chunks. The chunks
// from server are 10 times bigger than the clients chunk size.
client.sendDataStream( new DataProducer( clientChunkSize*2 ) );
}
@Test
public void impossibleToHaveBiggerChunkSizeThanFrameSize() throws Throwable
{
Builder myBuilder = builder.chunkSize( MadeUpServer.FRAME_LENGTH+10 );
try
{
myBuilder.server().start();
fail( "Shouldn't be possible" );
}
catch ( IllegalArgumentException e )
{ // Good
}
try
{
myBuilder.client();
fail( "Shouldn't be possible" );
}
catch ( IllegalArgumentException e )
{ // Good
}
}
class Builder
{
private final int port;
private final int chunkSize;
private final byte internalProtocolVersion;
private final byte applicationProtocolVersion;
private final TxChecksumVerifier verifier;
private final StoreId storeId;
public Builder()
{
this( PORT, FRAME_LENGTH, INTERNAL_PROTOCOL_VERSION, APPLICATION_PROTOCOL_VERSION,
ALWAYS_MATCH, storeIdToUse );
}
public Builder( int port, int chunkSize, byte internalProtocolVersion, byte applicationProtocolVersion,
TxChecksumVerifier verifier, StoreId storeId )
{
this.port = port;
this.chunkSize = chunkSize;
this.internalProtocolVersion = internalProtocolVersion;
this.applicationProtocolVersion = applicationProtocolVersion;
this.verifier = verifier;
this.storeId = storeId;
}
public Builder port( int port )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder chunkSize( int chunkSize )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder internalProtocolVersion( byte internalProtocolVersion )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder applicationProtocolVersion( byte applicationProtocolVersion )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder verifier( TxChecksumVerifier verifier )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public Builder storeId( StoreId storeId )
{
return new Builder( port, chunkSize, internalProtocolVersion, applicationProtocolVersion, verifier, storeId );
}
public MadeUpServer server()
{
return new MadeUpServer( new MadeUpServerImplementation( storeId ), port,
internalProtocolVersion, applicationProtocolVersion, verifier, chunkSize );
}
public MadeUpServer server( MadeUpCommunicationInterface target )
{
return new MadeUpServer( target, port, internalProtocolVersion, applicationProtocolVersion, verifier, chunkSize );
}
public MadeUpClient client()
{
return new MadeUpClient( port, storeId, internalProtocolVersion, applicationProtocolVersion, chunkSize );
}
public ServerInterface serverInOtherJvm()
{
ServerInterface server = new MadeUpServerProcess().start( new StartupData(
storeId.getCreationTime(), storeId.getRandomId(),
storeId.getStoreVersion(), internalProtocolVersion,
applicationProtocolVersion, chunkSize ) );
server.awaitStarted();
return server;
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_TestCommunication.java
|
4,391
|
class StartupData implements Serializable
{
private static final long serialVersionUID = 1L;
final long creationTime;
final long storeId;
final long storeVersion;
final byte applicationProtocolVersion;
final byte internalProtocolVersion;
final int chunkSize;
public StartupData( long creationTime, long storeId, long storeVersion,
byte internalProtocolVersion, byte applicationProtocolVersion, int chunkSize )
{
this.creationTime = creationTime;
this.storeId = storeId;
this.storeVersion = storeVersion;
this.internalProtocolVersion = internalProtocolVersion;
this.applicationProtocolVersion = applicationProtocolVersion;
this.chunkSize = chunkSize;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_StartupData.java
|
4,392
|
return new AbstractStoreChannel() {
@Override
public void close() throws IOException
{
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_ServerUtilTest.java
|
4,393
|
private static class Something
{
private boolean closed;
public void doStuff() throws Exception
{
if ( closed )
{
throw new Exception( "Closed" );
}
}
public void close()
{
this.closed = true;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ResourcePoolTest.java
|
4,394
|
private class ResourceHolder implements Runnable
{
private final Semaphore latch = new Semaphore( 0 );
private final CountDownLatch released = new CountDownLatch( 1 );
private final CountDownLatch onAcquire;
private final ResourcePool pool;
private final AtomicBoolean release = new AtomicBoolean( );
private ResourceHolder( ResourcePool pool, CountDownLatch onAcquire )
{
this.pool = pool;
this.onAcquire = onAcquire;
}
@Override
public void run()
{
try
{
pool.acquire();
onAcquire.countDown();
try
{
latch.acquire();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
if ( release.get() )
{
pool.release();
released.countDown();
}
}
catch ( Throwable e )
{
e.printStackTrace();
}
}
public void release()
{
this.release.set( true);
latch.release();
try
{
released.await();
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
}
public void release( CountDownLatch releaseLatch )
{
release();
releaseLatch.countDown();
}
public void end()
{
this.release.set( false);
latch.release();
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ResourcePoolTest.java
|
4,395
|
{
@Override
protected Something create()
{
return new Something();
}
@Override
protected boolean isAlive( Something resource )
{
return !resource.closed;
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_ResourcePoolTest.java
|
4,396
|
{
@Override
protected Something create()
{
return new Something();
}
@Override
protected boolean isAlive( Something resource )
{
return !resource.closed;
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_ResourcePoolTest.java
|
4,397
|
public class ResourcePoolTest
{
@Test
public void shouldNotReuseBrokenInstances() throws Exception
{
ResourcePool<Something> pool = new ResourcePool<Something>( 5 )
{
@Override
protected Something create()
{
return new Something();
}
@Override
protected boolean isAlive( Something resource )
{
return !resource.closed;
}
};
Something somethingFirst = pool.acquire();
somethingFirst.doStuff();
pool.release();
Something something = pool.acquire();
assertEquals( somethingFirst, something );
something.doStuff();
something.close();
pool.release();
Something somethingElse = pool.acquire();
assertFalse( something == somethingElse );
somethingElse.doStuff();
}
@Test
public void shouldTimeoutGracefully() throws InterruptedException
{
FakeClock clock = new FakeClock();
ResourcePool.CheckStrategy timeStrategy = new ResourcePool.CheckStrategy.TimeoutCheckStrategy( 100, clock );
while ( clock.currentTimeMillis() <= 100 )
{
assertFalse( timeStrategy.shouldCheck() );
clock.forward( 10, TimeUnit.MILLISECONDS );
}
assertTrue( timeStrategy.shouldCheck() );
clock.forward( 1, TimeUnit.MILLISECONDS );
assertFalse( timeStrategy.shouldCheck() );
}
@Test
public void shouldBuildUpGracefullyUntilReachedMinPoolSize() throws InterruptedException
{
// GIVEN
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, 5 );
// WHEN
acquireFromPool( pool, 5 );
// THEN
assertEquals( -1, stateMonitor.currentPeakSize.get() );
assertEquals( -1, stateMonitor.targetSize.get() ); // that means the target size was not updated
assertEquals( 0, stateMonitor.disposed.get() ); // no disposed happened, since the count to update is 10
}
@Test
public void shouldBuildUpGracefullyWhilePassingMinPoolSizeBeforeTimerRings() throws InterruptedException
{
// GIVEN
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, 5 );
// WHEN
acquireFromPool( pool, 15 );
// THEN
assertEquals( -1, stateMonitor.currentPeakSize.get() );
assertEquals( 15, stateMonitor.created.get() );
assertEquals( -1, stateMonitor.targetSize.get() );
assertEquals( 0, stateMonitor.disposed.get() );
}
@Test
public void shouldUpdateTargetSizeWhenSpikesOccur() throws Exception
{
// given
final int MIN_SIZE = 5;
final int MAX_SIZE = 10;
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, MIN_SIZE );
// when
List<ResourceHolder> holders = acquireFromPool( pool, MAX_SIZE );
clock.forward( 110, TimeUnit.MILLISECONDS );
holders.addAll( acquireFromPool( pool, 1 ) ); // Needed to trigger the alarm
// then
assertEquals( MAX_SIZE + 1, stateMonitor.currentPeakSize.get() );
// We have not released anything, so targetSize will not be reduced
assertEquals( MAX_SIZE + 1, stateMonitor.targetSize.get() ); // + 1 from the acquire
for ( ResourceHolder holder : holders )
{
holder.end();
}
}
@Test
public void shouldKeepSmallPeakAndNeverDisposeIfAcquireAndReleaseContinuously() throws Exception
{
// given
final int MIN_SIZE = 1;
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, MIN_SIZE );
// when
for ( int i = 0; i < 200; i++ )
{
List<ResourceHolder> newOnes = acquireFromPool( pool, 1 );
CountDownLatch release = new CountDownLatch( newOnes.size() );
for ( ResourceHolder newOne : newOnes )
{
newOne.release( release );
}
release.await();
}
// then
assertEquals( -1, stateMonitor.currentPeakSize.get() ); // no alarm has rung, -1 is the default
assertEquals( 1, stateMonitor.created.get() );
assertEquals( 0, stateMonitor.disposed.get() ); // we should always be below min size, so 0 dispose calls
}
@Test
public void shouldSlowlyReduceTheNumberOfResourcesInThePoolWhenResourcesAreReleased() throws Exception
{
// given
final int MIN_SIZE = 50;
final int MAX_SIZE = 200;
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, MIN_SIZE );
List<ResourceHolder> holders = new LinkedList<ResourceHolder>();
buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects( MAX_SIZE, clock, pool, holders );
// when
// After the peak, stay below MIN_SIZE concurrent usage, using up all already present resources.
clock.forward( 110, TimeUnit.MILLISECONDS );
for ( int i = 0; i < MAX_SIZE; i++ )
{
acquireFromPool( pool, 1 ).get( 0 ).release();
}
// then
// currentPeakSize must have reset from the latest alarm to MIN_SIZE.
assertEquals( 1, stateMonitor.currentPeakSize.get() ); // Alarm
// targetSize must be set to MIN_SIZE since currentPeakSize was that 2 alarms ago and didn't increase
assertEquals( MIN_SIZE, stateMonitor.targetSize.get() );
// Only pooled resources must be used, disposing what is in excess
// +1 for the alarm from buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects
assertEquals( MAX_SIZE - MIN_SIZE + 1, stateMonitor.disposed.get() );
}
@Test
public void shouldMaintainPoolAtHighWatermarkWhenConcurrentUsagePassesMinSize() throws Exception
{
// given
final int MIN_SIZE = 50;
final int MAX_SIZE = 200;
final int MID_SIZE = 90;
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, MIN_SIZE );
List<ResourceHolder> holders = new LinkedList<ResourceHolder>();
buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects( MAX_SIZE, clock, pool, holders );
// when
// After the peak, stay at MID_SIZE concurrent usage, using up all already present resources in the process
// but also keeping the high watermark above the MIN_SIZE
clock.forward( 110, TimeUnit.MILLISECONDS );
// Requires some rounds to happen, since there is constant racing between releasing and acquiring which does
// not always result in reaping of resources, as there is reuse
for ( int i = 0; i < 10; i++ )
{
// The latch is necessary to reduce races between batches
CountDownLatch release = new CountDownLatch( MID_SIZE );
for ( ResourceHolder holder : acquireFromPool( pool, MID_SIZE ) )
{
holder.release( release );
}
release.await();
clock.forward( 110, TimeUnit.MILLISECONDS );
}
// then
// currentPeakSize should be at MID_SIZE
assertEquals( MID_SIZE, stateMonitor.currentPeakSize.get() );
// target size too
assertEquals( MID_SIZE, stateMonitor.targetSize.get() );
// only the excess from the MAX_SIZE down to mid size must have been disposed
// +1 for the alarm from buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects
assertEquals( MAX_SIZE - MID_SIZE + 1, stateMonitor.disposed.get() );
}
@Test
public void shouldReclaimAndRecreateWhenLullBetweenSpikesOccurs() throws Exception
{
// given
final int MIN_SIZE = 50;
final int BELOW_MIN_SIZE = MIN_SIZE / 5;
final int MAX_SIZE = 200;
StatefulMonitor stateMonitor = new StatefulMonitor();
FakeClock clock = new FakeClock();
final ResourcePool<Something> pool = getResourcePool( stateMonitor, clock, MIN_SIZE );
List<ResourceHolder> holders = new LinkedList<ResourceHolder>();
buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects( MAX_SIZE, clock, pool, holders );
// when
// After the peak, stay well below concurrent usage, using up all already present resources in the process
clock.forward( 110, TimeUnit.MILLISECONDS );
// Requires some rounds to happen, since there is constant racing between releasing and acquiring which does
// not always result in reaping of resources, as there is reuse
for ( int i = 0; i < 30; i++ )
{
// The latch is necessary to reduce races between batches
CountDownLatch release = new CountDownLatch( BELOW_MIN_SIZE );
for ( ResourceHolder holder : acquireFromPool( pool, BELOW_MIN_SIZE ) )
{
holder.release( release );
}
release.await();
clock.forward( 110, TimeUnit.MILLISECONDS );
}
// then
// currentPeakSize should be at MIN_SIZE / 5
assertEquals( BELOW_MIN_SIZE, stateMonitor.currentPeakSize.get() );
// target size should remain at MIN_SIZE
assertEquals( MIN_SIZE, stateMonitor.targetSize.get() );
// only the excess from the MAX_SIZE down to min size must have been disposed
// +1 for the alarm from buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects
assertEquals( MAX_SIZE - MIN_SIZE + 1, stateMonitor.disposed.get() );
stateMonitor.created.set( 0 );
stateMonitor.disposed.set( 0 );
// when
// After the lull, recreate a peak
buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects( MAX_SIZE, clock, pool, holders );
// then
assertEquals( MAX_SIZE - MIN_SIZE + 1, stateMonitor.created.get() );
assertEquals( 0, stateMonitor.disposed.get() );
}
private void buildAPeakOfAcquiredResourcesAndTriggerAlarmWithSideEffects( int MAX_SIZE, FakeClock clock,
ResourcePool<Something>
pool,
List<ResourceHolder> holders ) throws
InterruptedException
{
holders.addAll( acquireFromPool( pool, MAX_SIZE ) );
clock.forward( 110, TimeUnit.MILLISECONDS );
// "Ring the bell" only on acquisition, of course.
holders.addAll( acquireFromPool( pool, 1 ) );
for ( ResourceHolder holder : holders )
{
holder.release();
}
}
private ResourcePool<Something> getResourcePool( StatefulMonitor stateMonitor,
FakeClock clock,
int minSize )
{
return new ResourcePool<Something>( minSize,
new ResourcePool.CheckStrategy.TimeoutCheckStrategy( 100, clock ), stateMonitor )
{
@Override
protected Something create()
{
return new Something();
}
@Override
protected boolean isAlive( Something resource )
{
return !resource.closed;
}
};
}
private List<ResourceHolder> acquireFromPool( final ResourcePool pool, int times ) throws InterruptedException
{
List<ResourceHolder> acquirers = new LinkedList<ResourceHolder>();
final CountDownLatch latch = new CountDownLatch( times );
for ( int i = 0; i < times; i++ )
{
ResourceHolder holder = new ResourceHolder( pool, latch );
Thread t = new Thread( holder );
acquirers.add( holder );
t.start();
}
latch.await();
return acquirers;
}
private class ResourceHolder implements Runnable
{
private final Semaphore latch = new Semaphore( 0 );
private final CountDownLatch released = new CountDownLatch( 1 );
private final CountDownLatch onAcquire;
private final ResourcePool pool;
private final AtomicBoolean release = new AtomicBoolean( );
private ResourceHolder( ResourcePool pool, CountDownLatch onAcquire )
{
this.pool = pool;
this.onAcquire = onAcquire;
}
@Override
public void run()
{
try
{
pool.acquire();
onAcquire.countDown();
try
{
latch.acquire();
}
catch ( InterruptedException e )
{
throw new RuntimeException( e );
}
if ( release.get() )
{
pool.release();
released.countDown();
}
}
catch ( Throwable e )
{
e.printStackTrace();
}
}
public void release()
{
this.release.set( true);
latch.release();
try
{
released.await();
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
}
public void release( CountDownLatch releaseLatch )
{
release();
releaseLatch.countDown();
}
public void end()
{
this.release.set( false);
latch.release();
}
}
private class StatefulMonitor implements ResourcePool.Monitor<Something>
{
public AtomicInteger currentPeakSize = new AtomicInteger(-1);
public AtomicInteger targetSize = new AtomicInteger( -1 );
public AtomicInteger created = new AtomicInteger( 0 );
public AtomicInteger acquired = new AtomicInteger( 0 );
public AtomicInteger disposed = new AtomicInteger( 0 );
@Override
public void updatedCurrentPeakSize( int currentPeakSize )
{
this.currentPeakSize.set( currentPeakSize );
}
@Override
public void updatedTargetSize( int targetSize )
{
this.targetSize.set( targetSize );
}
@Override
public void created( Something something )
{
this.created.incrementAndGet();
}
@Override
public void acquired( Something something )
{
this.acquired.incrementAndGet();
}
@Override
public void disposed( Something something )
{
this.disposed.incrementAndGet();
}
}
private static class Something
{
private boolean closed;
public void doStuff() throws Exception
{
if ( closed )
{
throw new Exception( "Closed" );
}
}
public void close()
{
this.closed = true;
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ResourcePoolTest.java
|
4,398
|
public class Adapter<R> implements Monitor<R>
{
@Override
public void updatedCurrentPeakSize( int currentPeakSize )
{
}
@Override
public void updatedTargetSize( int targetSize )
{
}
@Override
public void created( R resource )
{
}
@Override
public void acquired( R resource )
{
}
@Override
public void disposed( R resource )
{
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ResourcePool.java
|
4,399
|
public class TimeoutCheckStrategy implements CheckStrategy
{
private final long interval;
private long lastCheckTime;
private final Clock clock;
public TimeoutCheckStrategy( long interval, Clock clock )
{
this.interval = interval;
this.lastCheckTime = clock.currentTimeMillis();
this.clock = clock;
}
@Override
public boolean shouldCheck()
{
long currentTime = clock.currentTimeMillis();
if ( currentTime > lastCheckTime + interval )
{
lastCheckTime = currentTime;
return true;
}
return false;
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ResourcePool.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.