Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
3,900
|
@SuppressWarnings( "unchecked" )
public class RandomAdversary extends AbstractAdversary
{
private static final double STANDARD_PROPABILITY_FACTOR = 1.0;
private final double mischiefRate;
private final double failureRate;
private final double errorRate;
private volatile double probabilityFactor;
public RandomAdversary( double mischiefRate, double failureRate, double errorRate )
{
assert 0 <= mischiefRate && mischiefRate < 1.0 :
"Expected mischief rate in [0.0; 1.0[ but was " + mischiefRate;
assert 0 <= failureRate && failureRate < 1.0 :
"Expected failure rate in [0.0; 1.0[ but was " + failureRate;
assert 0 <= errorRate && errorRate < 1.0 :
"Expected error rate in [0.0; 1.0[ but was " + errorRate;
assert mischiefRate + errorRate + failureRate < 1.0 :
"Expected error rate + failure rate in [0.0; 1.0[ but was " +
(mischiefRate + errorRate + failureRate);
this.mischiefRate = mischiefRate;
this.failureRate = failureRate;
this.errorRate = errorRate;
probabilityFactor = STANDARD_PROPABILITY_FACTOR;
}
@Override
public void injectFailure( Class<? extends Throwable>... failureTypes )
{
maybeDoBadStuff( failureTypes, false );
}
@Override
public boolean injectFailureOrMischief( Class<? extends Throwable>... failureTypes )
{
return maybeDoBadStuff( failureTypes, true );
}
private boolean maybeDoBadStuff( Class<? extends Throwable>[] failureTypes, boolean includingMischeif )
{
double luckyDraw = rng.nextDouble();
double factor = probabilityFactor;
boolean resetUponFailure = false;
if ( factor < 0 )
{
resetUponFailure = true;
factor = -factor;
}
if ( luckyDraw <= errorRate * factor )
{
if ( resetUponFailure )
{
probabilityFactor = STANDARD_PROPABILITY_FACTOR;
}
throwOneOf( OutOfMemoryError.class, NullPointerException.class );
}
if ( failureTypes.length > 0 && luckyDraw <= (failureRate + errorRate) * factor )
{
if ( resetUponFailure )
{
probabilityFactor = STANDARD_PROPABILITY_FACTOR;
}
throwOneOf( failureTypes );
}
return includingMischeif && luckyDraw <= (mischiefRate + failureRate + errorRate) * factor;
}
public void setProbabilityFactor( double factor )
{
probabilityFactor = factor;
}
public void setAndResetProbabilityFactor( double factor )
{
// The negative sign bit indicates that the rate should be reset upon failure
probabilityFactor = -factor;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_RandomAdversary.java
|
3,901
|
public class CountingAdversary extends AbstractAdversary
{
private final AtomicInteger countDown;
private final int startingCount;
private final boolean resetCountDownOnFailure;
public CountingAdversary( int countDownTillFailure, boolean resetCountDownOnFailure )
{
this.startingCount = countDownTillFailure;
this.resetCountDownOnFailure = resetCountDownOnFailure;
countDown = new AtomicInteger( countDownTillFailure );
}
@Override
public void injectFailure( Class<? extends Throwable>... failureTypes )
{
int count, newCount;
do {
count = countDown.get();
newCount = count - 1;
} while( !countDown.compareAndSet( count, newCount ) );
if ( resetCountDownOnFailure && newCount < 1 )
{
reset();
}
if ( newCount == 0 )
{
try
{
Thread.sleep( 10 );
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
throwOneOf( failureTypes );
}
}
@Override
public boolean injectFailureOrMischief( Class<? extends Throwable>... failureTypes )
{
injectFailure( failureTypes );
return false;
}
private void reset()
{
// The current count is going to be either zero or negative when we get here.
int count;
do {
count = countDown.get();
} while( count < 1 && !countDown.compareAndSet( count, startingCount + count ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_CountingAdversary.java
|
3,902
|
public class ClassGuardedAdversary implements Adversary
{
private final Adversary delegate;
private final Set<String> victimClasses;
private volatile boolean enabled;
public ClassGuardedAdversary( Adversary delegate, String... victimClassNames )
{
this.delegate = delegate;
victimClasses = new HashSet<String>();
Collections.addAll( victimClasses, victimClassNames );
enabled = true;
}
@Override
public void injectFailure( Class<? extends Throwable>... failureTypes )
{
if ( enabled && calledFromVictimClass() )
{
delegateFailureInjection( failureTypes );
}
}
@Override
public boolean injectFailureOrMischief( Class<? extends Throwable>... failureTypes )
{
if ( enabled && calledFromVictimClass() )
{
return delegateFailureOrMischiefInjection( failureTypes );
}
return false;
}
protected void delegateFailureInjection( Class<? extends Throwable>[] failureTypes )
{
delegate.injectFailure( failureTypes );
}
protected boolean delegateFailureOrMischiefInjection( Class<? extends Throwable>[] failureTypes )
{
return delegate.injectFailureOrMischief( failureTypes );
}
private boolean calledFromVictimClass()
{
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
for ( StackTraceElement element : stackTrace )
{
if ( victimClasses.contains( element.getClassName() ) )
{
return true;
}
}
return false;
}
public void disable()
{
enabled = false;
}
public void enable()
{
enabled = true;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_ClassGuardedAdversary.java
|
3,903
|
{
@Override
public void run()
{
adversary.setAndResetProbabilityFactor( factor );
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_AdversarySignals.java
|
3,904
|
{
@Override
public void run()
{
adversary.setProbabilityFactor( factor );
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_AdversarySignals.java
|
3,905
|
{
@Override
public void handle( Signal sig )
{
handleSignal();
}
} );
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_AdversarySignals.java
|
3,906
|
public final class AdversarySignals
{
private static AdversarySignals instance = new AdversarySignals();
private boolean installed;
private List<Runnable> installedHandlers;
private AdversarySignals()
{
installed = false;
installedHandlers = new ArrayList<Runnable>();
}
public static AdversarySignals getInstance()
{
return instance;
}
public synchronized void installAsSIGUSR2()
{
if ( !installed )
{
Signal.handle( new Signal( "USR2" ), new SignalHandler()
{
@Override
public void handle( Signal sig )
{
handleSignal();
}
} );
installed = true;
}
}
private synchronized void handleSignal()
{
for ( Runnable handler : installedHandlers )
{
handler.run();
}
}
public synchronized void setFactorWhenSignalled(
final RandomAdversary adversary,
final double factor )
{
installedHandlers.add( new Runnable()
{
@Override
public void run()
{
adversary.setProbabilityFactor( factor );
}
} );
}
public synchronized void setAndResetFactorWhenSignalled(
final RandomAdversary adversary,
final double factor )
{
installedHandlers.add( new Runnable()
{
@Override
public void run()
{
adversary.setAndResetProbabilityFactor( factor );
}
} );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_AdversarySignals.java
|
3,907
|
@SuppressWarnings( "unchecked" )
public abstract class AbstractAdversary implements Adversary
{
protected final Random rng;
public AbstractAdversary()
{
rng = new Random();
}
protected void throwOneOf( Class<? extends Throwable>... types )
{
int choice = rng.nextInt( types.length );
Class<? extends Throwable> type = types[choice];
Throwable throwable;
try
{
throwable = type.newInstance();
}
catch ( Exception e )
{
throw new AssertionError( new Exception( "Failed to instantiate failure", e ) );
}
sneakyThrow( throwable );
}
public static void sneakyThrow(Throwable throwable)
{
AbstractAdversary.<RuntimeException>_sneakyThrow( throwable );
}
// http://youtu.be/7qXXWHfJha4
private static <T extends Throwable> void _sneakyThrow( Throwable throwable ) throws T
{
throw (T) throwable;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_adversaries_AbstractAdversary.java
|
3,908
|
class BackupImpl implements TheBackupInterface
{
private final BackupMonitor backupMonitor;
public interface SPI
{
String getStoreDir();
StoreId getStoreId();
}
private final StringLogger logger;
private final SPI spi;
private final XaDataSourceManager xaDataSourceManager;
private final KernelPanicEventGenerator kpeg;
private CountDownLatch countDownLatch;
public BackupImpl( StringLogger logger, SPI spi, XaDataSourceManager xaDataSourceManager,
KernelPanicEventGenerator kpeg,
Monitors monitors )
{
this.logger = logger;
this.spi = spi;
this.xaDataSourceManager = xaDataSourceManager;
this.kpeg = kpeg;
this.backupMonitor = monitors.newMonitor( BackupMonitor.class, getClass() );
}
@Override
public Response<Void> fullBackup( StoreWriter writer )
{
backupMonitor.startCopyingFiles();
RequestContext context = ServerUtil.rotateLogsAndStreamStoreFiles( spi.getStoreDir(),
xaDataSourceManager,
kpeg, logger, false, writer, new DefaultFileSystemAbstraction(), backupMonitor );
writer.done();
backupMonitor.finishedCopyingStoreFiles();
return packResponse( context );
}
@Override
public Response<Void> incrementalBackup( RequestContext context )
{
return packResponse( context );
}
private Response<Void> packResponse( RequestContext context )
{
// On Windows there's a problem extracting logs from the current log version
// where a rotation is requested during the time of extracting transactions
// from it, especially if the extraction process is waiting for the client
// to catch up on reading them. On Linux/Mac this isn't a due to a more flexible
// file handling system. Solution: rotate before doing an incremental backup
// in Windows to avoid running into that problem.
if ( Settings.osIsWindows() )
{
ServerUtil.rotateLogs( xaDataSourceManager, kpeg, logger );
}
return ServerUtil.packResponse( spi.getStoreId(), xaDataSourceManager, context, null, ServerUtil.ALL );
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupImpl.java
|
3,909
|
{
@Override
public long getOldChannelThreshold()
{
return Client.DEFAULT_READ_RESPONSE_TIMEOUT_SECONDS * 1000;
}
@Override
public int getMaxConcurrentTransactions()
{
return 3;
}
@Override
public int getChunkSize()
{
return FRAME_LENGTH;
}
@Override
public HostnamePort getServerAddress()
{
return server;
}
}, logging, FRAME_LENGTH, PROTOCOL_VERSION,
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupServer.java
|
3,910
|
{
@Override
public Long getValue()
{
return 42L;
}
@Override
public Class<Long> getType()
{
return Long.class;
}
} );
| false
|
community_server_src_test_java_org_dummy_web_service_DummyPluginInitializer.java
|
3,911
|
class BackupService
{
class BackupOutcome
{
private final Map<String, Long> lastCommittedTxs;
private final boolean consistent;
BackupOutcome( Map<String, Long> lastCommittedTxs, boolean consistent )
{
this.lastCommittedTxs = lastCommittedTxs;
this.consistent = consistent;
}
public Map<String, Long> getLastCommittedTxs()
{
return Collections.unmodifiableMap( lastCommittedTxs );
}
public boolean isConsistent()
{
return consistent;
}
}
private final FileSystemAbstraction fileSystem;
BackupService() {
this.fileSystem = new DefaultFileSystemAbstraction();
}
BackupService( FileSystemAbstraction fileSystem )
{
this.fileSystem = fileSystem;
}
BackupOutcome doFullBackup( final String sourceHostNameOrIp, final int sourcePort, String targetDirectory,
boolean checkConsistency, Config tuningConfiguration )
{
if ( directoryContainsDb( targetDirectory ) )
{
throw new RuntimeException( targetDirectory + " already contains a database" );
}
Map<String, String> params = tuningConfiguration.getParams();
params.put( GraphDatabaseSettings.store_dir.name(), targetDirectory);
tuningConfiguration.applyChanges( params );
long timestamp = System.currentTimeMillis();
Map<String, Long> lastCommittedTxs = new TreeMap<>();
boolean consistent = !checkConsistency; // default to true if we're not checking consistency
GraphDatabaseAPI targetDb = null;
try
{
RemoteStoreCopier storeCopier = new RemoteStoreCopier( tuningConfiguration, loadKernelExtensions(),
new ConsoleLogger( StringLogger.SYSTEM ), new DefaultFileSystemAbstraction() );
storeCopier.copyStore( new RemoteStoreCopier.StoreCopyRequester()
{
private BackupClient client;
@Override
public Response<?> copyStore( StoreWriter writer )
{
client = new BackupClient( sourceHostNameOrIp, sourcePort, new DevNullLoggingService(),
new Monitors(), null );
client.start();
return client.fullBackup( writer );
}
@Override
public void done()
{
client.stop();
}
});
targetDb = startTemporaryDb( targetDirectory, VerificationLevel.NONE /* run full check instead */ );
new LogicalLogSeeder().ensureAtLeastOneLogicalLogPresent( sourceHostNameOrIp, sourcePort, targetDb );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
if(targetDb != null)
{
targetDb.shutdown();
}
}
bumpLogFile( targetDirectory, timestamp );
if ( checkConsistency )
{
StringLogger logger = StringLogger.SYSTEM;
try
{
consistent = new ConsistencyCheckService().runFullConsistencyCheck(
targetDirectory,
tuningConfiguration,
ProgressMonitorFactory.textual( System.err ),
logger ).isSuccessful();
}
catch ( ConsistencyCheckIncompleteException e )
{
e.printStackTrace( System.err );
}
finally
{
logger.flush();
}
}
return new BackupOutcome( lastCommittedTxs, consistent );
}
BackupOutcome doIncrementalBackup( String sourceHostNameOrIp, int sourcePort, String targetDirectory,
boolean verification ) throws IncrementalBackupNotPossibleException
{
if ( !directoryContainsDb( targetDirectory ) )
{
throw new RuntimeException( targetDirectory + " doesn't contain a database" );
}
// In case someone deleted the logical log from a full backup
ConfigParam keepLogs = new ConfigParam()
{
@Override
public void configure( Map<String, String> config )
{
config.put( GraphDatabaseSettings.keep_logical_logs.name(), Settings.TRUE );
}
};
GraphDatabaseAPI targetDb = startTemporaryDb( targetDirectory,
VerificationLevel.valueOf( verification ), keepLogs );
long backupStartTime = System.currentTimeMillis();
BackupOutcome outcome = null;
try
{
outcome = doIncrementalBackup( sourceHostNameOrIp, sourcePort, targetDb );
}
finally
{
targetDb.shutdown();
}
bumpLogFile( targetDirectory, backupStartTime );
return outcome;
}
BackupOutcome doIncrementalBackupOrFallbackToFull( String sourceHostNameOrIp, int sourcePort, String targetDirectory,
boolean verification, Config config )
{
if(!directoryContainsDb( targetDirectory ))
{
return doFullBackup( sourceHostNameOrIp, sourcePort, targetDirectory, verification, config );
}
try
{
return doIncrementalBackup( sourceHostNameOrIp, sourcePort, targetDirectory, verification );
}
catch(IncrementalBackupNotPossibleException e)
{
try
{
// Our existing backup is out of date. Archive the old backup for safekeeping and do full backup.
File targetDirFile = new File( targetDirectory );
File oldBackupFile = new File( targetDirectory, "backup.old" );
prepareForFullBackup( targetDirFile, oldBackupFile );
BackupOutcome outcome = doFullBackup( sourceHostNameOrIp, sourcePort, targetDirFile.getAbsolutePath(),
verification, config );
return outcome;
}
catch ( IOException fullBackupFailure )
{
throw new RuntimeException( "Failed to perform incremental backup, fell back to full backup, " +
"but that failed as well: '" + fullBackupFailure.getMessage() + "'.", fullBackupFailure );
}
}
}
private void prepareForFullBackup( File targetDirFile, File oldBackupFile ) throws IOException
{
if(oldBackupFile.exists())
{
FileUtils.deleteRecursively( oldBackupFile );
}
if( targetDirFile.getUsableSpace() < FileUtils.directorySize( targetDirFile ) )
{
throw new RuntimeException( "Failed to run incremental backup because the existing backup is too " +
"old. Fell back to full backup, but there is not enough disk space available. " +
"You can mitigate this by removing the existing backup in '" +
targetDirFile.getAbsolutePath() + "' and running the backup again." );
}
FileUtils.moveDirectoryContents( targetDirFile, oldBackupFile );
}
private void replaceOldBackupWithNew( File oldBackup, File newBackup ) throws IOException
{
if(oldBackup.exists())
{
FileUtils.deleteRecursively( oldBackup );
}
FileUtils.moveFile( newBackup, oldBackup );
}
BackupOutcome doIncrementalBackup( String sourceHostNameOrIp, int sourcePort, GraphDatabaseAPI targetDb )
throws IncrementalBackupNotPossibleException
{
return incrementalWithContext( sourceHostNameOrIp, sourcePort, targetDb, slaveContextOf( targetDb ) );
}
private RequestContext slaveContextOf( GraphDatabaseAPI graphDb )
{
XaDataSourceManager dsManager = dsManager( graphDb );
List<Tx> txs = new ArrayList<Tx>();
for ( XaDataSource ds : dsManager.getAllRegisteredDataSources() )
{
txs.add( RequestContext.lastAppliedTx( ds.getName(), ds.getLastCommittedTxId() ) );
}
return RequestContext.anonymous( txs.toArray( new Tx[txs.size()] ) );
}
boolean directoryContainsDb( String targetDirectory )
{
return fileSystem.fileExists( new File( targetDirectory, NeoStore.DEFAULT_NAME ) );
}
static GraphDatabaseAPI startTemporaryDb( String targetDirectory, ConfigParam... params )
{
Map<String, String> config = new HashMap<String, String>();
config.put( OnlineBackupSettings.online_backup_enabled.name(), Settings.FALSE );
for ( ConfigParam param : params )
{
if ( param != null )
{
param.configure( config );
}
}
return (GraphDatabaseAPI) new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( targetDirectory )
.setConfig( config ).newGraphDatabase();
}
/**
* Performs an incremental backup based off the given context. This means
* receiving and applying selectively (i.e. irrespective of the actual state
* of the target db) a set of transactions starting at the desired txId and
* spanning up to the latest of the master, for every data source
* registered.
*
* @param targetDb The database that contains a previous full copy
* @param context The context, i.e. a mapping of data source name to txid
* which will be the first in the returned stream
* @return A backup context, ready to perform
*/
private BackupOutcome incrementalWithContext( String sourceHostNameOrIp, int sourcePort, GraphDatabaseAPI targetDb,
RequestContext context ) throws IncrementalBackupNotPossibleException
{
BackupClient client = new BackupClient( sourceHostNameOrIp, sourcePort,
targetDb.getDependencyResolver().resolveDependency( Logging.class ),
targetDb.getDependencyResolver().resolveDependency( Monitors.class ),
targetDb.storeId() );
client.start();
Map<String, Long> lastCommittedTxs;
boolean consistent = false;
try
{
lastCommittedTxs = unpackResponse( client.incrementalBackup( context ),
targetDb.getDependencyResolver().resolveDependency( XaDataSourceManager.class ),
new ProgressTxHandler() );
trimLogicalLogCount( targetDb );
consistent = true;
}
catch(RuntimeException e)
{
if(e.getCause() instanceof NoSuchLogVersionException )
{
throw new IncrementalBackupNotPossibleException("It's been too long since this backup was last updated, and it has " +
"fallen too far behind the database transaction stream for incremental backup to be possible. " +
"You need to perform a full backup at this point. You can modify this time interval by setting " +
"the '" + GraphDatabaseSettings.keep_logical_logs.name() + "' configuration on the database to a " +
"higher value.", e);
}
else
{
throw e;
}
}
finally
{
try
{
client.stop();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
}
return new BackupOutcome( lastCommittedTxs, consistent );
}
private void trimLogicalLogCount( GraphDatabaseAPI targetDb )
{
for ( XaDataSource ds : dsManager( targetDb ).getAllRegisteredDataSources() )
{
try
{
ds.rotateLogicalLog();
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
long currentVersion = ds.getCurrentLogVersion() - 1;
// TODO
/*
* Checking the file size to determine if transactions exist in
* a log feels hack-ish. Maybe fix this to read the header
* and check latest txid?
*/
while ( ds.getLogicalLogLength( currentVersion ) <= 16
&& currentVersion > 0 )
{
currentVersion--;
}
/*
* Ok, we skipped all logs that have no transactions in them. Current is the
* one with the tx in it. Skip it.
*/
currentVersion--;
/*
* Now delete the rest.
*/
while ( ds.getLogicalLogLength( currentVersion ) > 0 )
{
ds.deleteLogicalLog( currentVersion );
currentVersion--;
}
}
}
private XaDataSourceManager dsManager( GraphDatabaseAPI targetDb )
{
return targetDb.getDependencyResolver().resolveDependency( XaDataSourceManager.class );
}
private Map<String, Long> unpackResponse( Response<Void> response, XaDataSourceManager xaDsm, TxHandler txHandler )
{
try
{
ServerUtil.applyReceivedTransactions( response, xaDsm, txHandler );
return extractLastCommittedTxs( xaDsm );
}
catch ( IOException e )
{
throw new RuntimeException( "Unable to apply received transactions", e );
}
}
private Map<String, Long> extractLastCommittedTxs( XaDataSourceManager xaDsm )
{
TreeMap<String, Long> lastCommittedTxs = new TreeMap<String, Long>();
for ( XaDataSource ds : xaDsm.getAllRegisteredDataSources() )
{
lastCommittedTxs.put( ds.getName(), ds.getLastCommittedTxId() );
}
return lastCommittedTxs;
}
private static boolean bumpLogFile( String targetDirectory, long toTimestamp )
{
File dbDirectory = new File( targetDirectory );
File[] candidates = dbDirectory.listFiles( new FilenameFilter()
{
@Override
public boolean accept( File dir, String name )
{
/*
* Contains ensures that previously timestamped files are
* picked up as well
*/
return name.equals( StringLogger.DEFAULT_NAME );
}
} );
if ( candidates.length != 1 )
{
return false;
}
// candidates has a unique member, the right one
File previous = candidates[0];
// Build to, from existing parent + new filename
File to = new File( previous.getParentFile(), StringLogger.DEFAULT_NAME
+ "." + toTimestamp );
return previous.renameTo( to );
}
private List<KernelExtensionFactory<?>> loadKernelExtensions()
{
List<KernelExtensionFactory<?>> kernelExtensions = new ArrayList<>();
for ( KernelExtensionFactory factory : Service.load( KernelExtensionFactory.class ) )
{
kernelExtensions.add( factory );
}
return kernelExtensions;
}
private static class ProgressTxHandler implements TxHandler
{
private final ProgressListener progress = ProgressMonitorFactory.textual( System.out ).openEnded( "Transactions applied", 1000 );
@Override
public void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource )
{
progress.add( 1 );
}
@Override
public void done()
{
progress.done();
}
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupService.java
|
3,912
|
@Service.Implementation(KernelExtensionFactory.class)
public class OnlineBackupExtensionFactory extends KernelExtensionFactory<OnlineBackupExtensionFactory.Dependencies>
{
static final String KEY = "online backup";
public interface Dependencies
{
Config getConfig();
XaDataSourceManager xaDataSourceManager();
GraphDatabaseAPI getGraphDatabaseAPI();
Logging logging();
KernelPanicEventGenerator kpeg();
Monitors monitors();
}
public OnlineBackupExtensionFactory()
{
super( KEY );
}
@Override
public Class getSettingsClass()
{
return OnlineBackupSettings.class;
}
@Override
public Lifecycle newKernelExtension( Dependencies dependencies ) throws Throwable
{
return new OnlineBackupKernelExtension( dependencies.getConfig(), dependencies.getGraphDatabaseAPI(),
dependencies.xaDataSourceManager(), dependencies.kpeg(), dependencies.logging(),
dependencies.monitors() );
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_OnlineBackupExtensionFactory.java
|
3,913
|
public class OnlineBackup
{
private final String hostNameOrIp;
private final int port;
private BackupService.BackupOutcome outcome;
/**
* Factory method for this class. The OnlineBackup instance returned will perform backup operations against the
* hostname and port passed in as parameters.
*
* @param hostNameOrIp The hostname or the IP address of the backup server
* @param port The port at which the remote backup server is listening
* @return An OnlineBackup instance ready to perform backup operations from the given remote server
*/
public static OnlineBackup from( String hostNameOrIp, int port )
{
return new OnlineBackup( hostNameOrIp, port );
}
/**
* Factory method for this class. The OnlineBackup instance returned will perform backup operations against the
* hostname passed in as parameter, using the default backup port.
*
* @param hostNameOrIp The hostname or IP address of the backup server
* @return An OnlineBackup instance ready to perform backup operations from the given remote server
*/
public static OnlineBackup from( String hostNameOrIp )
{
return new OnlineBackup( hostNameOrIp, BackupServer.DEFAULT_PORT );
}
private OnlineBackup( String hostNameOrIp, int port )
{
this.hostNameOrIp = hostNameOrIp;
this.port = port;
}
/**
* Performs a backup into targetDirectory. The server contacted is the one configured in the factory method used to
* obtain this instance. After the backup is complete, a verification phase will take place, checking
* the database for consistency. If any errors are found, they will be printed in stderr.
*
* If the target directory does not contain a database, a full backup will be performed, otherwise an incremental
* backup mechanism is used.
*
* If the backup has become too far out of date for an incremental backup to succeed, a full backup is performed.
*
*
* @param targetDirectory A directory holding a complete database previously obtained from the backup server.
* @return The same OnlineBackup instance, possible to use for a new backup operation
*/
public OnlineBackup backup( String targetDirectory )
{
outcome = new BackupService().doIncrementalBackupOrFallbackToFull( hostNameOrIp, port, targetDirectory, true,
defaultConfig() );
return this;
}
/**
* Performs a backup into targetDirectory. The server contacted is the one configured in the factory method used to
* obtain this instance. After the backup is complete, and if the verification parameter is set to true,
* a verification phase will take place, checking the database for consistency. If any errors are found, they will
* be printed in stderr.
*
* If the target directory does not contain a database, a full backup will be performed, otherwise an incremental
* backup mechanism is used.
*
* If the backup has become too far out of date for an incremental backup to succeed, a full backup is performed.
*
*
* @param targetDirectory A directory holding a complete database previously obtained from the backup server.
* @param verification If true, the verification phase will be run.
* @return The same OnlineBackup instance, possible to use for a new backup operation
*/
public OnlineBackup backup( String targetDirectory, boolean verification )
{
outcome = new BackupService().doIncrementalBackupOrFallbackToFull( hostNameOrIp, port, targetDirectory,
verification, defaultConfig() );
return this;
}
/**
* Performs a backup into targetDirectory. The server contacted is the one configured in the factory method used to
* obtain this instance. After the backup is complete, a verification phase will take place, checking
* the database for consistency. If any errors are found, they will be printed in stderr.
*
* If the target directory does not contain a database, a full backup will be performed, otherwise an incremental
* backup mechanism is used.
*
* If the backup has become too far out of date for an incremental backup to succeed, a full backup is performed.
*
* @param targetDirectory A directory holding a complete database previously obtained from the backup server.
* @param tuningConfiguration The {@link Config} to use when running the consistency check
* @return The same OnlineBackup instance, possible to use for a new backup operation
*/
public OnlineBackup backup( String targetDirectory, Config tuningConfiguration )
{
outcome = new BackupService().doIncrementalBackupOrFallbackToFull( hostNameOrIp, port, targetDirectory, true,
tuningConfiguration );
return this;
}
/**
* Performs a backup into targetDirectory. The server contacted is the one configured in the factory method used to
* obtain this instance. After the backup is complete, and if the verification parameter is set to true,
* a verification phase will take place, checking the database for consistency. If any errors are found, they will
* be printed in stderr.
*
* If the target directory does not contain a database, a full backup will be performed, otherwise an incremental
* backup mechanism is used.
*
* If the backup has become too far out of date for an incremental backup to succeed, a full backup is performed.
*
* @param targetDirectory A directory holding a complete database previously obtained from the backup server.
* @param tuningConfiguration The {@link Config} to use when running the consistency check
* @param verification If true, the verification phase will be run.
* @return The same OnlineBackup instance, possible to use for a new backup operation
*/
public OnlineBackup backup( String targetDirectory, Config tuningConfiguration,
boolean verification )
{
outcome = new BackupService().doIncrementalBackupOrFallbackToFull( hostNameOrIp, port, targetDirectory,
verification, tuningConfiguration );
return this;
}
/**
* Performs a full backup storing the resulting database at the given directory. The server contacted is the one
* configured in the factory method used to obtain this instance. At the end of the backup, a verification phase
* will take place, running over the resulting database ensuring it is consistent. If the check fails, the fact
* will be printed in stderr.
*
* If the target directory already contains a database, a RuntimeException denoting the fact will be thrown.
*
* @param targetDirectory The directory in which to store the database
* @return The same OnlineBackup instance, possible to use for a new backup operation.
* @deprecated Use {@link #backup(String)} instead.
*/
@Deprecated
public OnlineBackup full( String targetDirectory )
{
outcome = new BackupService().doFullBackup( hostNameOrIp, port, targetDirectory, true, defaultConfig() );
return this;
}
/**
* Performs a full backup storing the resulting database at the given directory. The server contacted is the one
* configured in the factory method used to obtain this instance. If the verification flag is set, at the end of
* the backup, a verification phase will take place, running over the resulting database ensuring it is consistent.
* If the check fails, the fact will be printed in stderr.
*
* If the target directory already contains a database, a RuntimeException denoting the fact will be thrown.
*
* @param targetDirectory The directory in which to store the database
* @param verification a boolean indicating whether to perform verification on the created backup
* @return The same OnlineBackup instance, possible to use for a new backup operation.
* @deprecated Use {@link #backup(String, boolean)} instead
*/
@Deprecated
public OnlineBackup full( String targetDirectory, boolean verification )
{
outcome = new BackupService().doFullBackup( hostNameOrIp, port, targetDirectory, verification,
defaultConfig() );
return this;
}
/**
* Performs a full backup storing the resulting database at the given directory. The server contacted is the one
* configured in the factory method used to obtain this instance. If the verification flag is set, at the end of
* the backup, a verification phase will take place, running over the resulting database ensuring it is consistent.
* If the check fails, the fact will be printed in stderr. The consistency check will run with the provided
* tuning configuration.
*
* If the target directory already contains a database, a RuntimeException denoting the fact will be thrown.
*
* @param targetDirectory The directory in which to store the database
* @param verification a boolean indicating whether to perform verification on the created backup
* @param tuningConfiguration The {@link Config} to use when running the consistency check
* @return The same OnlineBackup instance, possible to use for a new backup operation.
* @deprecated Use {@link #backup(String, Config, boolean)} instead.
*/
@Deprecated
public OnlineBackup full( String targetDirectory, boolean verification, Config tuningConfiguration )
{
outcome = new BackupService().doFullBackup( hostNameOrIp, port, targetDirectory, verification,
tuningConfiguration );
return this;
}
/**
* Performs an incremental backup on the database stored in targetDirectory. The server contacted is the one
* configured in the factory method used to obtain this instance. After the incremental backup is complete, a
* verification phase will take place, checking the database for consistency. If any errors are found, they will
* be printed in stderr.
*
* If the target directory does not contain a database or it is not compatible with the one present in the
* configured backup server a RuntimeException will be thrown denoting the fact.
*
* @param targetDirectory A directory holding a complete database previously obtained from the backup server.
* @return The same OnlineBackup instance, possible to use for a new backup operation
* @deprecated Use {@link #backup(String)} instead.
*/
@Deprecated
public OnlineBackup incremental( String targetDirectory )
{
outcome = new BackupService().doIncrementalBackup( hostNameOrIp, port, targetDirectory, true );
return this;
}
/**
* Performs an incremental backup on the database stored in targetDirectory. The server contacted is the one
* configured in the factory method used to obtain this instance. After the incremental backup is complete, and if
* the verification parameter is set to true, a verification phase will take place, checking the database for
* consistency. If any errors are found, they will be printed in stderr.
*
* If the target directory does not contain a database or it is not compatible with the one present in the
* configured backup server a RuntimeException will be thrown denoting the fact.
*
* @param targetDirectory A directory holding a complete database previously obtained from the backup server.
* @param verification If true, the verification phase will be run.
* @return The same OnlineBackup instance, possible to use for a new backup operation
* @deprecated Use {@link #backup(String, boolean)} instead.
*/
@Deprecated
public OnlineBackup incremental( String targetDirectory, boolean verification )
{
outcome = new BackupService().doIncrementalBackup( hostNameOrIp, port, targetDirectory, verification );
return this;
}
/**
* Performs an incremental backup on the supplied target database. The server contacted is the one
* configured in the factory method used to obtain this instance. After the incremental backup is complete
* a verification phase will take place, checking the database for consistency. If any errors are found, they will
* be printed in stderr.
*
* If the target database is not compatible with the one present in the target backup server, a RuntimeException
* will be thrown denoting the fact.
*
* @param targetDb The database on which the incremental backup is to be applied
* @return The same OnlineBackup instance, possible to use for a new backup operation.
* @deprecated Use {@link #backup(String)} instead.
*/
@Deprecated
public OnlineBackup incremental( GraphDatabaseAPI targetDb )
{
outcome = new BackupService().doIncrementalBackup( hostNameOrIp, port, targetDb );
return this;
}
/**
* Provides information about the last committed transaction for each data source present in the last backup
* operation performed by this OnlineBackup.
* In particular, it returns a map where the keys are the names of the data sources and the values the longs that
* are the last committed transaction id for that data source.
* @return A map from data source name to last committed transaction id.
*/
public Map<String, Long> getLastCommittedTxs()
{
return outcome().getLastCommittedTxs();
}
/**
* @return the consistency outcome of the last made backup. I
*/
public boolean isConsistent()
{
return outcome().isConsistent();
}
private BackupOutcome outcome()
{
if ( outcome == null )
{
throw new IllegalStateException( "No outcome yet. Please call full or incremental backup first" );
}
return outcome;
}
private Config defaultConfig()
{
return new Config( stringMap(), GraphDatabaseSettings.class, ConsistencyCheckSettings.class );
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_OnlineBackup.java
|
3,914
|
public class LogicalLogSeeder
{
public void ensureAtLeastOneLogicalLogPresent( String sourceHostNameOrIp, int sourcePort, GraphDatabaseAPI targetDb )
{
// Then go over all datasources, try to extract the latest tx
Set<String> noTxPresent = new HashSet<>();
XaDataSourceManager dsManager = targetDb.getDependencyResolver().resolveDependency( XaDataSourceManager.class );
for ( XaDataSource ds : dsManager.getAllRegisteredDataSources() )
{
long lastTx = ds.getLastCommittedTxId();
try
{
// This fails if the tx is not present with NSLVE
ds.getMasterForCommittedTx( lastTx );
}
catch ( NoSuchLogVersionException e )
{
// Note the name of the datasource
noTxPresent.add( ds.getName() );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
if ( !noTxPresent.isEmpty() )
{
/*
* Create a fake slave context, asking for the transactions that
* span the next-to-last up to the latest for each datasource
*/
BackupClient recoveryClient = new BackupClient(
sourceHostNameOrIp, sourcePort,
targetDb.getDependencyResolver().resolveDependency( Logging.class ),
targetDb.getDependencyResolver().resolveDependency( Monitors.class ),
targetDb.storeId() );
recoveryClient.start();
Response<Void> recoveryResponse = null;
Map<String, Long> recoveryDiff = new HashMap<>();
for ( String ds : noTxPresent )
{
recoveryDiff.put( ds, -1L );
}
RequestContext recoveryCtx = addDiffToSlaveContext( slaveContextOf( dsManager ), recoveryDiff );
try
{
recoveryResponse = recoveryClient.incrementalBackup( recoveryCtx );
// Ok, the response is here, apply it.
TransactionStream txs = recoveryResponse.transactions();
ByteBuffer scratch = ByteBuffer.allocate( 64 );
while ( txs.hasNext() )
{
/*
* For each tx stream in the response, create the latest archived
* logical log file and write out in there the transaction.
*
*/
Triplet<String, Long, TxExtractor> tx = txs.next();
scratch.clear();
XaDataSource ds = dsManager.getXaDataSource( tx.first() );
long logVersion = ds.getCurrentLogVersion() - 1;
FileChannel newLog = new RandomAccessFile( ds.getFileName( logVersion ), "rw" ).getChannel();
newLog.truncate( 0 );
LogIoUtils.writeLogHeader( scratch, logVersion, -1 );
// scratch buffer is flipped by writeLogHeader
newLog.write( scratch );
ReadableByteChannel received = tx.third().extract();
scratch.flip();
while ( received.read( scratch ) > 0 )
{
scratch.flip();
newLog.write( scratch );
scratch.flip();
}
newLog.force( false );
newLog.close();
received.close();
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
finally
{
try
{
recoveryClient.stop();
}
catch ( Throwable throwable )
{
throw new RuntimeException( throwable );
}
if ( recoveryResponse != null )
{
recoveryResponse.close();
}
targetDb.shutdown();
}
}
}
private RequestContext slaveContextOf( XaDataSourceManager dsManager )
{
List<RequestContext.Tx> txs = new ArrayList<>();
for ( XaDataSource ds : dsManager.getAllRegisteredDataSources() )
{
txs.add( RequestContext.lastAppliedTx( ds.getName(), ds.getLastCommittedTxId() ) );
}
return RequestContext.anonymous( txs.toArray( new RequestContext.Tx[txs.size()] ) );
}
private RequestContext addDiffToSlaveContext( RequestContext original,
Map<String, Long> diffPerDataSource )
{
RequestContext.Tx[] oldTxs = original.lastAppliedTransactions();
RequestContext.Tx[] newTxs = new RequestContext.Tx[oldTxs.length];
for ( int i = 0; i < oldTxs.length; i++ )
{
RequestContext.Tx oldTx = oldTxs[i];
String dsName = oldTx.getDataSourceName();
long originalTxId = oldTx.getTxId();
Long diff = diffPerDataSource.get( dsName );
if ( diff == null )
{
diff = 0L;
}
long newTxId = originalTxId + diff;
newTxs[i] = RequestContext.lastAppliedTx( dsName, newTxId );
}
return RequestContext.anonymous( newTxs );
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_LogicalLogSeeder.java
|
3,915
|
public class IncrementalBackupTests
{
private File serverPath;
private File backupPath;
@Rule
public TestName testName = new TestName();
@Before
public void before() throws Exception
{
File base = TargetDirectory.forTest( getClass() ).cleanDirectory( testName.getMethodName() );
serverPath = new File( base, "server" );
backupPath = new File( base, "backup" );
}
@Test
public void shouldDoIncrementalBackup() throws Exception
{
DbRepresentation initialDataSetRepresentation = createInitialDataSet2( serverPath );
ServerInterface server = startServer( serverPath, "127.0.0.1:6362" );
// START SNIPPET: onlineBackup
OnlineBackup backup = OnlineBackup.from( "127.0.0.1" );
backup.full( backupPath.getPath() );
// END SNIPPET: onlineBackup
assertEquals( initialDataSetRepresentation, DbRepresentation.of( backupPath ) );
shutdownServer( server );
DbRepresentation furtherRepresentation = addMoreData2( serverPath );
server = startServer( serverPath, null );
// START SNIPPET: onlineBackup
backup.incremental( backupPath.getPath() );
// END SNIPPET: onlineBackup
assertEquals( furtherRepresentation, DbRepresentation.of( backupPath ) );
shutdownServer( server );
}
private DbRepresentation createInitialDataSet2( File path )
{
GraphDatabaseService db = startGraphDatabase( path );
Transaction tx = db.beginTx();
db.createNode().setProperty( "name", "Goofy" );
Node donald = db.createNode();
donald.setProperty( "name", "Donald" );
Node daisy = db.createNode();
daisy.setProperty( "name", "Daisy" );
Relationship knows = donald.createRelationshipTo( daisy,
DynamicRelationshipType.withName( "LOVES" ) );
knows.setProperty( "since", 1940 );
tx.success();
tx.finish();
DbRepresentation result = DbRepresentation.of( db );
db.shutdown();
return result;
}
private DbRepresentation addMoreData2( File path )
{
GraphDatabaseService db = startGraphDatabase( path );
Transaction tx = db.beginTx();
Node donald = db.getNodeById( 2 );
Node gladstone = db.createNode();
gladstone.setProperty( "name", "Gladstone" );
Relationship hates = donald.createRelationshipTo( gladstone,
DynamicRelationshipType.withName( "HATES" ) );
hates.setProperty( "since", 1948 );
tx.success();
tx.finish();
DbRepresentation result = DbRepresentation.of( db );
db.shutdown();
return result;
}
private GraphDatabaseService startGraphDatabase( File path )
{
return new GraphDatabaseFactory().
newEmbeddedDatabaseBuilder( path.getPath() ).
setConfig( OnlineBackupSettings.online_backup_enabled, Settings.FALSE ).
setConfig( GraphDatabaseSettings.keep_logical_logs, Settings.TRUE ).
newGraphDatabase();
}
private ServerInterface startServer( File path, String serverAddress ) throws Exception
{
ServerInterface server = new EmbeddedServer( path.getPath(), serverAddress );
server.awaitStarted();
return server;
}
private void shutdownServer( ServerInterface server ) throws Exception
{
server.shutdown();
Thread.sleep( 1000 );
}
}
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_IncrementalBackupTests.java
|
3,916
|
public class IncrementalBackupNotPossibleException extends RuntimeException
{
public IncrementalBackupNotPossibleException( String msg, Exception cause )
{
super(msg, cause);
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_IncrementalBackupNotPossibleException.java
|
3,917
|
@RunWith(PowerMockRunner.class)
@PrepareForTest(BackupTool.ToolFailureException.class)
public class ExitCodeTest {
@Test
public void shouldToolFailureExceptionCauseExitCode() {
// setup
PowerMockito.mockStatic(System.class);
// when
new BackupTool.ToolFailureException("tool failed").haltJVM();
// then
PowerMockito.verifyStatic();
System.exit(1);
}
@Test
public void shouldBackupToolMainCauseExitCode() {
// setup
PowerMockito.mockStatic(System.class);
// when
BackupTool.main(new String[] {});
// then
PowerMockito.verifyStatic();
System.exit(1);
}
}
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_ExitCodeTest.java
|
3,918
|
public class EmbeddedServer implements ServerInterface
{
private GraphDatabaseService db;
public EmbeddedServer( String storeDir, String serverAddress )
{
GraphDatabaseBuilder graphDatabaseBuilder = new GraphDatabaseFactory().newEmbeddedDatabaseBuilder( storeDir );
graphDatabaseBuilder.setConfig( OnlineBackupSettings.online_backup_enabled, Settings.TRUE );
graphDatabaseBuilder.setConfig( OnlineBackupSettings.online_backup_server, serverAddress );
this.db = graphDatabaseBuilder.newGraphDatabase();
}
public void shutdown()
{
db.shutdown();
}
public void awaitStarted()
{
}
}
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_EmbeddedServer.java
|
3,919
|
public class BackupToolTest
{
@Test
public void shouldUseIncrementalOrFallbackToFull() throws Exception
{
String[] args = new String[]{"-from", "single://localhost", "-to", "my_backup"};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
// when
new BackupTool( service, systemOut ).run( args );
// then
verify( service ).doIncrementalBackupOrFallbackToFull( eq( "localhost" ),
eq( BackupServer.DEFAULT_PORT ), eq( "my_backup" ), eq( true ), any( Config.class ) );
verify( systemOut ).println( "Performing backup from 'single://localhost'" );
verify( systemOut ).println( "Done" );
}
@Test
public void shouldIgnoreIncrementalFlag() throws Exception
{
String[] args = new String[]{"-incremental", "-from", "single://localhost", "-to", "my_backup"};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
// when
new BackupTool( service, systemOut ).run( args );
// then
verify( service ).doIncrementalBackupOrFallbackToFull( eq( "localhost" ), eq( BackupServer.DEFAULT_PORT ),
eq( "my_backup" ), eq( true ), any( Config.class ) );
verify( systemOut ).println( "Performing backup from 'single://localhost'" );
verify( systemOut ).println( "Done" );
}
@Test
public void shouldIgnoreFullFlag() throws Exception
{
String[] args = new String[]{"-full", "-from", "single://localhost", "-to", "my_backup"};
BackupService service = mock( BackupService.class );
when(service.directoryContainsDb( anyString() )).thenReturn( true );
PrintStream systemOut = mock( PrintStream.class );
// when
new BackupTool( service, systemOut ).run( args );
// then
verify( service ).doIncrementalBackupOrFallbackToFull( eq( "localhost" ), eq( BackupServer.DEFAULT_PORT ),
eq( "my_backup" ), eq( true ), any(Config.class) );
verify( systemOut ).println( "Performing backup from 'single://localhost'" );
verify( systemOut ).println( "Done" );
}
@Test
public void appliesDefaultTuningConfigurationForConsistencyChecker() throws Exception
{
// given
String[] args = new String[]{"-from", "single://localhost",
"-to", "my_backup"};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
// when
new BackupTool( service, systemOut ).run( args );
// then
ArgumentCaptor<Config> config = ArgumentCaptor.forClass( Config.class );
verify( service ).doIncrementalBackupOrFallbackToFull( anyString(), anyInt(), anyString(), anyBoolean(),
config.capture() );
assertFalse( config.getValue().get( ConsistencyCheckSettings.consistency_check_property_owners ) );
assertEquals( TaskExecutionOrder.MULTI_PASS,
config.getValue().get( ConsistencyCheckSettings.consistency_check_execution_order ) );
WindowPoolImplementation expectedPoolImplementation = !Settings.osIsWindows() ?
WindowPoolImplementation.SCAN_RESISTANT :
WindowPoolImplementation.MOST_FREQUENTLY_USED;
assertEquals( expectedPoolImplementation,
config.getValue().get( ConsistencyCheckSettings.consistency_check_window_pool_implementation ) );
}
@Test
public void passesOnConfigurationIfProvided() throws Exception
{
// given
File propertyFile = TargetDirectory.forTest( getClass() ).file( "neo4j.properties" );
Properties properties = new Properties();
properties.setProperty( ConsistencyCheckSettings.consistency_check_property_owners.name(), "true" );
properties.store( new FileWriter( propertyFile ), null );
String[] args = new String[]{"-from", "single://localhost",
"-to", "my_backup", "-config", propertyFile.getPath()};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
// when
new BackupTool( service, systemOut ).run( args );
// then
ArgumentCaptor<Config> config = ArgumentCaptor.forClass( Config.class );
verify( service ).doIncrementalBackupOrFallbackToFull( anyString(), anyInt(), anyString(), anyBoolean(),
config.capture() );
assertTrue( config.getValue().get( ConsistencyCheckSettings.consistency_check_property_owners ) );
}
@Test
public void exitWithFailureIfConfigSpecifiedButPropertiesFileDoesNotExist() throws Exception
{
// given
File propertyFile = TargetDirectory.forTest( getClass() ).file( "nonexistent_file" );
String[] args = new String[]{"-from", "single://localhost",
"-to", "my_backup", "-config", propertyFile.getPath()};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
BackupTool backupTool = new BackupTool( service, systemOut );
try
{
// when
backupTool.run( args );
fail( "should exit abnormally" );
}
catch ( BackupTool.ToolFailureException e )
{
// then
assertThat( e.getMessage(), containsString( "Could not read configuration properties file" ) );
assertThat( e.getCause(), instanceOf( IOException.class ) );
}
verifyZeroInteractions( service );
}
@Test
public void exitWithFailureIfNoSourceSpecified() throws Exception
{
// given
String[] args = new String[]{"-to", "my_backup"};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
BackupTool backupTool = new BackupTool( service, systemOut );
try
{
// when
backupTool.run( args );
fail( "should exit abnormally" );
}
catch ( BackupTool.ToolFailureException e )
{
// then
assertEquals( "Please specify -from, examples:\n" +
" -from single://192.168.1.34\n" +
" -from single://192.168.1.34:1234\n" +
" -from ha://192.168.1.15:2181\n" +
" -from ha://192.168.1.15:2181,192.168.1.16:2181",
e.getMessage() );
}
verifyZeroInteractions( service );
}
@Test
public void exitWithFailureIfInvalidSourceSpecified() throws Exception
{
// given
String[] args = new String[]{"-from", "foo:localhost:123", "-to", "my_backup"};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
BackupTool backupTool = new BackupTool( service, systemOut );
try
{
// when
backupTool.run( args );
fail( "should exit abnormally" );
}
catch ( BackupTool.ToolFailureException e )
{
// then
assertEquals( "foo was specified as a backup module but it was not found. " +
"Please make sure that the implementing service is on the classpath.",
e.getMessage() );
}
verifyZeroInteractions( service );
}
@Test
public void exitWithFailureIfNoDestinationSpecified() throws Exception
{
// given
String[] args = new String[]{"-from", "single://localhost"};
BackupService service = mock( BackupService.class );
PrintStream systemOut = mock( PrintStream.class );
BackupTool backupTool = new BackupTool( service, systemOut );
try
{
// when
backupTool.run( args );
fail( "should exit abnormally" );
}
catch ( BackupTool.ToolFailureException e )
{
// then
assertEquals( "Specify target location with -to <target-directory>",
e.getMessage() );
}
verifyZeroInteractions( service );
}
}
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_BackupToolTest.java
|
3,920
|
static class ToolFailureException extends Exception
{
ToolFailureException( String message )
{
super( message );
}
ToolFailureException( String message, Throwable cause )
{
super( message, cause );
}
void haltJVM()
{
System.out.println( getMessage() );
if ( getCause() != null )
{
getCause().printStackTrace( System.out );
}
System.exit( 1 );
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupTool.java
|
3,921
|
public class BackupTool
{
private static final String TO = "to";
private static final String FROM = "from";
private static final String VERIFY = "verify";
private static final String CONFIG = "config";
public static final String DEFAULT_SCHEME = "single";
static final String MISMATCHED_STORE_ID = "You tried to perform a backup from database %s, " +
"but the target directory contained a backup from database %s. ";
public static void main( String[] args )
{
BackupTool tool = new BackupTool( new BackupService(new DefaultFileSystemAbstraction()), System.out );
try
{
tool.run( args );
}
catch ( ToolFailureException e )
{
e.haltJVM();
}
}
private final BackupService backupService;
private final PrintStream systemOut;
private final FileSystemAbstraction fs;
BackupTool( BackupService backupService, PrintStream systemOut )
{
this.backupService = backupService;
this.systemOut = systemOut;
this.fs = new DefaultFileSystemAbstraction();
}
void run( String[] args ) throws ToolFailureException
{
Args arguments = new Args( args );
checkArguments( arguments );
String from = arguments.get( FROM, null );
String to = arguments.get( TO, null );
boolean verify = arguments.getBoolean( VERIFY, true, true );
Config tuningConfiguration = readTuningConfiguration( TO, arguments );
URI backupURI = null;
try
{
backupURI = new URI( from );
}
catch ( URISyntaxException e )
{
throw new ToolFailureException( "Please properly specify a location to backup as a valid URI in the form " +
"<scheme>://<host>[:port], where scheme is the target database's running mode, eg ha" );
}
String module = backupURI.getScheme();
/*
* So, the scheme is considered to be the module name and an attempt at
* loading the service is made.
*/
BackupExtensionService service = null;
if ( module != null && !DEFAULT_SCHEME.equals( module ) )
{
try
{
service = Service.load( BackupExtensionService.class, module );
}
catch ( NoSuchElementException e )
{
throw new ToolFailureException( String.format(
"%s was specified as a backup module but it was not found. " +
"Please make sure that the implementing service is on the classpath.",
module ) );
}
}
if ( service != null )
{ // If in here, it means a module was loaded. Use it and substitute the
// passed URI
Logging logging;
try
{
getClass().getClassLoader().loadClass( "ch.qos.logback.classic.LoggerContext" );
LifeSupport life = new LifeSupport();
LogbackService logbackService = life.add( new LogbackService( tuningConfiguration, (LoggerContext) getSingleton().getLoggerFactory(), "neo4j-backup-logback.xml" ) );
life.start();
logging = logbackService;
}
catch ( Throwable e )
{
logging = new SystemOutLogging();
}
try
{
backupURI = service.resolve( backupURI, arguments, logging );
}
catch ( Throwable e )
{
throw new ToolFailureException( e.getMessage() );
}
}
try
{
systemOut.println("Performing backup from '" + backupURI.toASCIIString() + "'");
doBackup( backupURI, to, verify, tuningConfiguration );
}
catch ( TransactionFailureException e )
{
if ( e.getCause() instanceof UpgradeNotAllowedByConfigurationException )
{
try
{
systemOut.println( "The database present in the target directory is of an older version. " +
"Backing that up in target and performing a full backup from source" );
moveExistingDatabase( fs, to );
}
catch ( IOException e1 )
{
throw new ToolFailureException( "There was a problem moving the old database out of the way" +
" - cannot continue, aborting.", e );
}
doBackup( backupURI, to, verify, tuningConfiguration );
}
else
{
throw new ToolFailureException( "TransactionFailureException from existing backup at '" + from + "'" +
".", e );
}
}
}
private void checkArguments( Args arguments ) throws ToolFailureException
{
if ( arguments.get( FROM, null ) == null )
{
throw new ToolFailureException( "Please specify " + dash( FROM ) + ", examples:\n" +
" " + dash( FROM ) + " single://192.168.1.34\n" +
" " + dash( FROM ) + " single://192.168.1.34:1234\n" +
" " + dash( FROM ) + " ha://192.168.1.15:2181\n" +
" " + dash( FROM ) + " ha://192.168.1.15:2181,192.168.1.16:2181" );
}
if ( arguments.get( TO, null ) == null )
{
throw new ToolFailureException( "Specify target location with " + dash( TO )
+ " <target-directory>" );
}
}
public Config readTuningConfiguration( String storeDir, Args arguments ) throws ToolFailureException
{
Map<String, String> specifiedProperties = stringMap();
String propertyFilePath = arguments.get( CONFIG, null );
if ( propertyFilePath != null )
{
File propertyFile = new File( propertyFilePath );
try
{
specifiedProperties = MapUtil.load( propertyFile );
}
catch ( IOException e )
{
throw new ToolFailureException( String.format( "Could not read configuration properties file [%s]",
propertyFilePath ), e );
}
}
specifiedProperties.put( GraphDatabaseSettings.store_dir.name(), storeDir );
return new Config( specifiedProperties, GraphDatabaseSettings.class, ConsistencyCheckSettings.class );
}
private void doBackup( URI from, String to, boolean checkConsistency, Config tuningConfiguration ) throws ToolFailureException
{
try
{
backupService.doIncrementalBackupOrFallbackToFull( from.getHost(), extractPort( from ), to, checkConsistency,
tuningConfiguration );
systemOut.println( "Done" );
}
catch ( MismatchingStoreIdException e )
{
systemOut.println("Backup failed.");
throw new ToolFailureException( String.format( MISMATCHED_STORE_ID, e.getExpected(), e.getEncountered() ) );
}
catch ( ComException e )
{
throw new ToolFailureException( "Couldn't connect to '" + from + "'", e );
}
}
private int extractPort( URI from )
{
int port = from.getPort();
if ( port == -1 )
{
port = BackupServer.DEFAULT_PORT;
}
return port;
}
private static void moveExistingDatabase( FileSystemAbstraction fs, String to ) throws IOException
{
File toDir = new File( to );
File backupDir = new File( toDir, "old-version" );
if ( !fs.mkdir( backupDir ) )
{
throw new IOException( "Trouble making target backup directory "
+ backupDir.getAbsolutePath() );
}
StoreFile.move( fs, toDir, backupDir, StoreFile.legacyStoreFiles() );
LogFiles.move( fs, toDir, backupDir );
}
static class ToolFailureException extends Exception
{
ToolFailureException( String message )
{
super( message );
}
ToolFailureException( String message, Throwable cause )
{
super( message, cause );
}
void haltJVM()
{
System.out.println( getMessage() );
if ( getCause() != null )
{
getCause().printStackTrace( System.out );
}
System.exit( 1 );
}
}
private static String dash( String name )
{
return "-" + name;
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupTool.java
|
3,922
|
{
@Override
public boolean matches( Object o )
{
File[] files = (File[]) o;
if ( files == null )
{
return false;
}
for ( File file : files )
{
if ( file.getAbsolutePath().contains( fileName ) )
{
return true;
}
}
return false;
}
@Override
public void describeTo( Description description )
{
description.appendText( String.format( "[%s] in list of copied files", fileName ) );
}
};
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_BackupServiceIT.java
|
3,923
|
{
@Override
public void run()
{
try
{
firstStoreFinishedStreaming.await();
Transaction tx = db.beginTx();
try
{
Node node1 = db.createNode();
Node node2 = db.createNode();
node1.createRelationshipTo( node2, DynamicRelationshipType.withName( "foobydoo" ) );
tx.success();
}
finally
{
tx.finish();
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).getNeoStoreDataSource().getNeoStore().flushAll();
transactionCommitted.countDown();
}
}
catch ( Exception e )
{
e.printStackTrace();
}
}
} );
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_BackupServiceIT.java
|
3,924
|
{
@Override
public void startCopyingFiles()
{
}
@Override
public void finishedCopyingStoreFiles()
{
}
@Override
public void finishedRotatingLogicalLogs()
{
}
@Override
public void streamedFile( File storefile )
{
if ( neitherStoreHasBeenStreamed() )
{
if ( storefile.getAbsolutePath().contains( NODE_STORE ) )
{
storesThatHaveBeenStreamed.add( NODE_STORE );
firstStoreFinishedStreaming.countDown();
}
else if ( storefile.getAbsolutePath().contains( RELATIONSHIP_STORE ) )
{
storesThatHaveBeenStreamed.add(RELATIONSHIP_STORE);
firstStoreFinishedStreaming.countDown();
}
}
}
private boolean neitherStoreHasBeenStreamed()
{
return storesThatHaveBeenStreamed.isEmpty();
}
@Override
public void streamingFile( File storefile )
{
if ( storefile.getAbsolutePath().contains( RELATIONSHIP_STORE ) )
{
if ( streamedFirst( NODE_STORE ) )
{
try
{
transactionCommitted.await();
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
}
}
else if ( storefile.getAbsolutePath().contains( NODE_STORE ) )
{
if ( streamedFirst( RELATIONSHIP_STORE ) )
{
try
{
transactionCommitted.await();
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
}
}
}
private boolean streamedFirst( String store )
{
return !storesThatHaveBeenStreamed.isEmpty() && storesThatHaveBeenStreamed.get( 0 ).equals( store );
}
} );
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_BackupServiceIT.java
|
3,925
|
public class BackupServiceIT
{
private static final TargetDirectory target = TargetDirectory.forTest( BackupServiceIT.class );
private static final String NODE_STORE = "neostore.nodestore.db";
private static final String RELATIONSHIP_STORE = "neostore.relationshipstore.db";
@Rule
public TargetDirectory.TestDirectory testDirectory = target.testDirectory();
public static final String BACKUP_HOST = "localhost";
private FileSystemAbstraction fileSystem;
private File storeDir;
private File backupDir;
public int backupPort = 8200;
@Before
public void setup() throws IOException
{
fileSystem = new DefaultFileSystemAbstraction();
storeDir = new File( testDirectory.directory(), "store_dir" );
fileSystem.deleteRecursively( storeDir );
fileSystem.mkdir( storeDir );
backupDir = new File( testDirectory.directory(), "backup_dir" );
fileSystem.deleteRecursively( backupDir );
backupPort = backupPort + 1;
}
@Test
public void shouldThrowExceptionWhenDoingFullBackupOnADirectoryContainingANeoStore() throws Exception
{
// given
fileSystem.mkdir( backupDir );
fileSystem.create( new File( backupDir, NeoStore.DEFAULT_NAME ) ).close();
try
{
// when
new BackupService( fileSystem ).doFullBackup( "", 0, backupDir.getAbsolutePath(), true, new Config() );
}
catch ( RuntimeException ex )
{
// then
assertThat( ex.getMessage(), containsString( "already contains a database" ) );
}
}
@Test
public void shouldCopyStoreFiles() throws Throwable
{
// given
GraphDatabaseService db = createDb( storeDir, defaultBackupPortHostParams() );
createAndIndexNode( db, 1 );
// when
BackupService backupService = new BackupService( fileSystem );
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
db.shutdown();
// then
File[] files = fileSystem.listFiles( backupDir );
for ( final StoreFile storeFile : StoreFile.values() )
{
assertThat( files, hasFile( storeFile.storeFileName() ) );
}
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
}
@Test
public void shouldFindTransactionLogContainingLastNeoStoreAndLuceneTransactionInAnEmptyStore() throws IOException
{
// given
GraphDatabaseService db = createDb( storeDir, defaultBackupPortHostParams() );
// when
BackupService backupService = new BackupService( fileSystem );
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
db.shutdown();
// then
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
assertNotNull( getLastMasterForCommittedTx( DEFAULT_DATA_SOURCE_NAME ) );
assertNotNull( getLastMasterForCommittedTx( DEFAULT_NAME ) );
}
@Test
public void shouldFindTransactionLogContainingLastNeoStoreTransaction() throws Throwable
{
// given
GraphDatabaseService db = createDb( storeDir, defaultBackupPortHostParams() );
createAndIndexNode( db, 1 );
// when
BackupService backupService = new BackupService( fileSystem );
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
db.shutdown();
// then
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
assertNotNull( getLastMasterForCommittedTx( DEFAULT_DATA_SOURCE_NAME ) );
}
@Test
public void shouldFindTransactionLogContainingLastLuceneTransaction() throws Throwable
{
// given
GraphDatabaseService db = createDb( storeDir, defaultBackupPortHostParams() );
createAndIndexNode( db, 1 );
// when
BackupService backupService = new BackupService( fileSystem );
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
db.shutdown();
// then
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
assertNotNull( getLastMasterForCommittedTx( DEFAULT_NAME ) );
}
@Test
public void shouldGiveHelpfulErrorMessageIfLogsPrunedPastThePointOfNoReturn() throws Exception
{
// Given
Map<String, String> config = defaultBackupPortHostParams();
config.put( GraphDatabaseSettings.keep_logical_logs.name(), "false" );
GraphDatabaseAPI db = createDb( storeDir, config );
BackupService backupService = new BackupService( fileSystem );
createAndIndexNode( db, 1 );
// A full backup
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
// And the log the backup uses is rotated out
createAndIndexNode( db, 2 );
rotateLog( db );
createAndIndexNode( db, 3 );
rotateLog( db );
createAndIndexNode( db, 3 );
rotateLog( db );
// when
try
{
backupService.doIncrementalBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false );
fail("Should have thrown exception.");
}
// Then
catch(IncrementalBackupNotPossibleException e)
{
assertThat( e.getMessage(), equalTo("It's been too long since this backup was last updated, and it has " +
"fallen too far behind the database transaction stream for incremental backup to be possible. " +
"You need to perform a full backup at this point. You can modify this time interval by setting " +
"the '" + GraphDatabaseSettings.keep_logical_logs.name() + "' configuration on the database to a " +
"higher value.") );
}
db.shutdown();
}
@Test
public void shouldFallbackToFullBackupIfIncrementalFailsAndExplicitlyAskedToDoThis() throws Exception
{
// Given
Map<String, String> config = defaultBackupPortHostParams();
config.put( GraphDatabaseSettings.keep_logical_logs.name(), "false" );
GraphDatabaseAPI db = createDb( storeDir, config );
BackupService backupService = new BackupService( fileSystem );
createAndIndexNode( db, 1 );
// A full backup
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
// And the log the backup uses is rotated out
createAndIndexNode( db, 2 );
rotateLog( db );
createAndIndexNode( db, 3 );
rotateLog( db );
createAndIndexNode( db, 3 );
rotateLog( db );
// when
backupService.doIncrementalBackupOrFallbackToFull( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false, new Config(defaultBackupPortHostParams()));
// Then
db.shutdown();
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
}
@Test
public void shouldNotOverwriteExistingBackupWithFullIfStoreIdsDontMatch() throws Exception
{
// Given
Map<String, String> config = defaultBackupPortHostParams();
config.put( GraphDatabaseSettings.keep_logical_logs.name(), "false" );
GraphDatabaseAPI db = createDb( storeDir, config );
BackupService backupService = new BackupService( fileSystem );
createAndIndexNode( db, 1 );
// A full backup
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
// And then a completely different database runs on that port
db.shutdown();
FileUtils.deleteRecursively( storeDir );
db = createDb( storeDir, config );
// when
try
{
backupService.doIncrementalBackupOrFallbackToFull( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config(defaultBackupPortHostParams()));
fail("Should have thrown exception.");
}
// Then
catch(MismatchingStoreIdException e)
{
// good
}
assertTrue( backupDir.exists() );
db.shutdown();
}
@Test
public void shouldNotFallbackToFullBackupIfThereIsNoDiskSpaceForIt() throws Exception
{
// Given
Map<String, String> config = defaultBackupPortHostParams();
config.put( GraphDatabaseSettings.keep_logical_logs.name(), "false" );
GraphDatabaseAPI db = createDb( storeDir, config );
BackupService backupService = new BackupService( fileSystem );
createAndIndexNode( db, 1 );
// A full backup
backupService.doFullBackup( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config( defaultBackupPortHostParams() ) );
// And then a completely different database runs on that port
db.shutdown();
FileUtils.deleteRecursively( storeDir );
db = createDb( storeDir, config );
// when
try
{
backupService.doIncrementalBackupOrFallbackToFull( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config(defaultBackupPortHostParams()));
fail("Should have thrown exception.");
}
// Then
catch(MismatchingStoreIdException e)
{
// good
}
assertTrue( backupDir.exists() );
db.shutdown();
}
@Test
public void shouldDoFullBackupOnIncrementalFallbackToFullIfNoBackupFolderExists() throws Exception
{
// Given
Map<String, String> config = defaultBackupPortHostParams();
config.put( GraphDatabaseSettings.keep_logical_logs.name(), "false" );
GraphDatabaseAPI db = createDb( storeDir, config );
BackupService backupService = new BackupService( fileSystem );
createAndIndexNode( db, 1 );
// when
backupService.doIncrementalBackupOrFallbackToFull( BACKUP_HOST, backupPort, backupDir.getAbsolutePath(), false,
new Config(defaultBackupPortHostParams()));
// then
db.shutdown();
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
}
private void rotateLog( GraphDatabaseAPI db ) throws IOException
{
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).getNeoStoreDataSource().rotateLogicalLog();
}
@Test
public void shouldContainTransactionsThatHappenDuringBackupProcess() throws Throwable
{
// given
Map<String, String> params = defaultBackupPortHostParams();
params.put( OnlineBackupSettings.online_backup_enabled.name(), "false" );
final List<String> storesThatHaveBeenStreamed = new ArrayList<String>( );
final CountDownLatch firstStoreFinishedStreaming = new CountDownLatch( 1 );
final CountDownLatch transactionCommitted = new CountDownLatch( 1 );
final GraphDatabaseAPI db = (GraphDatabaseAPI) new GraphDatabaseFactory().newEmbeddedDatabaseBuilder(
storeDir.getAbsolutePath() ).setConfig( params ).newGraphDatabase();
Config config = new Config( defaultBackupPortHostParams() );
Monitors monitors = new Monitors();
monitors.addMonitorListener( new BackupMonitor()
{
@Override
public void startCopyingFiles()
{
}
@Override
public void finishedCopyingStoreFiles()
{
}
@Override
public void finishedRotatingLogicalLogs()
{
}
@Override
public void streamedFile( File storefile )
{
if ( neitherStoreHasBeenStreamed() )
{
if ( storefile.getAbsolutePath().contains( NODE_STORE ) )
{
storesThatHaveBeenStreamed.add( NODE_STORE );
firstStoreFinishedStreaming.countDown();
}
else if ( storefile.getAbsolutePath().contains( RELATIONSHIP_STORE ) )
{
storesThatHaveBeenStreamed.add(RELATIONSHIP_STORE);
firstStoreFinishedStreaming.countDown();
}
}
}
private boolean neitherStoreHasBeenStreamed()
{
return storesThatHaveBeenStreamed.isEmpty();
}
@Override
public void streamingFile( File storefile )
{
if ( storefile.getAbsolutePath().contains( RELATIONSHIP_STORE ) )
{
if ( streamedFirst( NODE_STORE ) )
{
try
{
transactionCommitted.await();
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
}
}
else if ( storefile.getAbsolutePath().contains( NODE_STORE ) )
{
if ( streamedFirst( RELATIONSHIP_STORE ) )
{
try
{
transactionCommitted.await();
}
catch ( InterruptedException e )
{
e.printStackTrace();
}
}
}
}
private boolean streamedFirst( String store )
{
return !storesThatHaveBeenStreamed.isEmpty() && storesThatHaveBeenStreamed.get( 0 ).equals( store );
}
} );
OnlineBackupKernelExtension backup = new OnlineBackupKernelExtension(
config,
db,
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class ),
db.getDependencyResolver().resolveDependency( KernelPanicEventGenerator.class ),
new DevNullLoggingService(),
monitors );
backup.start();
// when
BackupService backupService = new BackupService( fileSystem );
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.execute( new Runnable()
{
@Override
public void run()
{
try
{
firstStoreFinishedStreaming.await();
Transaction tx = db.beginTx();
try
{
Node node1 = db.createNode();
Node node2 = db.createNode();
node1.createRelationshipTo( node2, DynamicRelationshipType.withName( "foobydoo" ) );
tx.success();
}
finally
{
tx.finish();
db.getDependencyResolver().resolveDependency( XaDataSourceManager.class ).getNeoStoreDataSource().getNeoStore().flushAll();
transactionCommitted.countDown();
}
}
catch ( Exception e )
{
e.printStackTrace();
}
}
} );
BackupService.BackupOutcome backupOutcome = backupService.doFullBackup( BACKUP_HOST, backupPort,
backupDir.getAbsolutePath(), true,
new Config( params ) );
backup.stop();
executor.shutdown();
executor.awaitTermination( 30, TimeUnit.SECONDS );
db.shutdown();
// then
assertEquals( DbRepresentation.of( storeDir ), DbRepresentation.of( backupDir ) );
assertTrue( backupOutcome.isConsistent() );
}
private Map<String, String> defaultBackupPortHostParams()
{
Map<String, String> params = new HashMap<String, String>();
params.put( OnlineBackupSettings.online_backup_server.name(), BACKUP_HOST + ":" + backupPort );
return params;
}
private GraphDatabaseAPI createDb( File storeDir, Map<String, String> params )
{
return (GraphDatabaseAPI) new GraphDatabaseFactory()
.newEmbeddedDatabaseBuilder( storeDir.getPath() )
.setConfig( params )
.newGraphDatabase();
}
private void createAndIndexNode( GraphDatabaseService db, int i )
{
Transaction tx = db.beginTx();
try
{
Index<Node> index = db.index().forNodes( "delete_me" );
Node node = db.createNode();
node.setProperty( "id", System.currentTimeMillis() + i );
index.add( node, "delete", "me" );
tx.success();
}
finally
{
tx.finish();
}
}
private BaseMatcher<File[]> hasFile( final String fileName )
{
return new BaseMatcher<File[]>()
{
@Override
public boolean matches( Object o )
{
File[] files = (File[]) o;
if ( files == null )
{
return false;
}
for ( File file : files )
{
if ( file.getAbsolutePath().contains( fileName ) )
{
return true;
}
}
return false;
}
@Override
public void describeTo( Description description )
{
description.appendText( String.format( "[%s] in list of copied files", fileName ) );
}
};
}
private Pair<Integer,Long> getLastMasterForCommittedTx( String dataSourceName ) throws IOException
{
GraphDatabaseAPI db = (GraphDatabaseAPI) new GraphDatabaseFactory().newEmbeddedDatabase(
backupDir.getAbsolutePath() );
try
{
XaDataSourceManager xaDataSourceManager = db.getDependencyResolver().resolveDependency(
XaDataSourceManager.class );
XaDataSource dataSource = xaDataSourceManager.getXaDataSource( dataSourceName );
long lastCommittedTxId = dataSource.getLastCommittedTxId();
return dataSource.getMasterForCommittedTx( lastCommittedTxId );
}
finally
{
db.shutdown();
}
}
}
| false
|
enterprise_backup_src_test_java_org_neo4j_backup_BackupServiceIT.java
|
3,926
|
private static class ProgressTxHandler implements TxHandler
{
private final ProgressListener progress = ProgressMonitorFactory.textual( System.out ).openEnded( "Transactions applied", 1000 );
@Override
public void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource )
{
progress.add( 1 );
}
@Override
public void done()
{
progress.done();
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupService.java
|
3,927
|
class BackupOutcome
{
private final Map<String, Long> lastCommittedTxs;
private final boolean consistent;
BackupOutcome( Map<String, Long> lastCommittedTxs, boolean consistent )
{
this.lastCommittedTxs = lastCommittedTxs;
this.consistent = consistent;
}
public Map<String, Long> getLastCommittedTxs()
{
return Collections.unmodifiableMap( lastCommittedTxs );
}
public boolean isConsistent()
{
return consistent;
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupService.java
|
3,928
|
{
@Override
public boolean accept( File dir, String name )
{
/*
* Contains ensures that previously timestamped files are
* picked up as well
*/
return name.equals( StringLogger.DEFAULT_NAME );
}
} );
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupService.java
|
3,929
|
{
@Override
public void configure( Map<String, String> config )
{
config.put( GraphDatabaseSettings.keep_logical_logs.name(), Settings.TRUE );
}
};
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupService.java
|
3,930
|
{
private BackupClient client;
@Override
public Response<?> copyStore( StoreWriter writer )
{
client = new BackupClient( sourceHostNameOrIp, sourcePort, new DevNullLoggingService(),
new Monitors(), null );
client.start();
return client.fullBackup( writer );
}
@Override
public void done()
{
client.stop();
}
});
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_BackupService.java
|
3,931
|
@Path( "/" )
public class DummyThirdPartyWebService
{
public static final String DUMMY_WEB_SERVICE_MOUNT_POINT = "/dummy";
@GET
@Produces( MediaType.TEXT_PLAIN )
public Response sayHello()
{
return Response.ok()
.entity( "hello" )
.build();
}
@GET
@Path("/{something}/{somethingElse}")
@Produces( MediaType.TEXT_PLAIN )
public Response forSecurityTesting() {
return Response.ok().entity("you've reached a dummy service").build();
}
@GET
@Path( "inject-test" )
@Produces( MediaType.TEXT_PLAIN )
public Response countNodes( @Context GraphDatabaseService db )
{
Transaction transaction = db.beginTx();
try
{
return Response.ok()
.entity( String.valueOf( countNodesIn( db ) ) )
.build();
}
finally
{
transaction.finish();
}
}
@GET
@Path( "needs-auth-header" )
@Produces( MediaType.APPLICATION_JSON )
public Response authHeader( @Context HttpHeaders headers )
{
StringBuilder theEntity = new StringBuilder( "{" );
Iterator<Map.Entry<String, List<String>>> headerIt = headers.getRequestHeaders().entrySet().iterator();
while ( headerIt.hasNext() )
{
Map.Entry<String, List<String>> header = headerIt.next();
if (header.getValue().size() != 1)
{
throw new IllegalArgumentException( "Mutlivalued header: "
+ header.getKey() );
}
theEntity.append( "\"" ).append( header.getKey() ).append( "\":\"" )
.append( header.getValue().get( 0 ) ).append( "\"" );
if ( headerIt.hasNext() )
{
theEntity.append( ", " );
}
}
theEntity.append( "}" );
return Response.ok().entity( theEntity.toString() ).build();
}
private int countNodesIn( GraphDatabaseService db )
{
int count = 0;
for ( @SuppressWarnings("unused") Node node : GlobalGraphOperations.at(db).getAllNodes() )
{
count++;
}
return count;
}
}
| false
|
community_server_src_test_java_org_dummy_web_service_DummyThirdPartyWebService.java
|
3,932
|
public class DummyPluginInitializer implements PluginLifecycle
{
public DummyPluginInitializer()
{
}
@Override
public Collection<Injectable<?>> start( GraphDatabaseService graphDatabaseService, Configuration config )
{
return Collections.<Injectable<?>>singleton( new Injectable<Long>()
{
@Override
public Long getValue()
{
return 42L;
}
@Override
public Class<Long> getType()
{
return Long.class;
}
} );
}
@Override
public void stop()
{
}
}
| false
|
community_server_src_test_java_org_dummy_web_service_DummyPluginInitializer.java
|
3,933
|
{
@Override
public boolean process( Message<? extends MessageType> message )
{
received.set( true );
sem.release();
return true;
}
} );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_com_message_NetworkSenderReceiverTest.java
|
3,934
|
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( requested );
}
};
| false
|
community_graph-matching_src_test_java_examples_TestSiteIndexExamples.java
|
3,935
|
public class RelatedNodesQuestionTest
{
@Test
public void question5346011()
{
GraphDatabaseService service = new TestGraphDatabaseFactory().newImpermanentDatabase();
try ( Transaction tx = service.beginTx() )
{
RelationshipIndex index = service.index().forRelationships( "exact" );
// ...creation of the nodes and relationship
Node node1 = service.createNode();
Node node2 = service.createNode();
String a_uuid = "xyz";
Relationship relationship = node1.createRelationshipTo( node2, DynamicRelationshipType.withName( "related" ) );
index.add( relationship, "uuid", a_uuid );
// query
IndexHits<Relationship> hits = index.get( "uuid", a_uuid, node1, node2 );
assertEquals( 1, hits.size() );
tx.success();
}
service.shutdown();
}
}
| false
|
community_lucene-index_src_test_java_examples_RelatedNodesQuestionTest.java
|
3,936
|
public class LuceneIndexSiteExamples
{
private static GraphDatabaseService graphDb;
private Transaction tx;
@BeforeClass
public static void setUpDb()
{
graphDb = new TestGraphDatabaseFactory().newImpermanentDatabase();
}
@Before
public void beginTx()
{
tx = graphDb.beginTx();
}
@After
public void finishTx()
{
tx.success();
tx.finish();
}
@Test
public void addSomeThings()
{
// START SNIPPET: add
Index<Node> persons = graphDb.index().forNodes( "persons" );
Node morpheus = graphDb.createNode();
Node trinity = graphDb.createNode();
Node neo = graphDb.createNode();
persons.add( morpheus, "name", "Morpheus" );
persons.add( morpheus, "rank", "Captain" );
persons.add( trinity, "name", "Trinity" );
persons.add( neo, "name", "Neo" );
persons.add( neo, "title", "The One" );
// END SNIPPET: add
}
@Test
public void doSomeGets()
{
Index<Node> persons = graphDb.index().forNodes( "persons" );
// START SNIPPET: get
Node morpheus = persons.get( "name", "Morpheus" ).getSingle();
// END SNIPPET: get
assertNotNull( morpheus );
}
@Test
public void doSomeQueries()
{
Index<Node> persons = graphDb.index().forNodes( "persons" );
// START SNIPPET: query
for ( Node person : persons.query( "name", "*e*" ) )
{
// It will get Morpheus and Neo
}
Node neo = persons.query( "name:*e* AND title:\"The One\"" ).getSingle();
// END SNIPPET: query
assertNotNull( neo );
}
}
| false
|
community_lucene-index_src_test_java_examples_LuceneIndexSiteExamples.java
|
3,937
|
{
@Override
public void write( int b ) throws IOException
{ // silent
}
} );
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,938
|
private static class System
{
static PrintStream out = new PrintStream( new OutputStream()
{
@Override
public void write( int b ) throws IOException
{ // silent
}
} );
}
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,939
|
{
{
add( "The Matrix" );
add( "Malèna" );
add( "The Matrix Reloaded" );
}
};
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,940
|
{
{
add( "The Matrix" );
}
};
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,941
|
{
{
add( "Monica Bellucci" );
add( "Keanu Reeves" );
}
};
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,942
|
{
{
add( "Keanu Reeves" );
add( "Keanu Reeves" );
}
};
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,943
|
public class ImdbDocTest
{
private static GraphDatabaseService graphDb;
private Transaction tx;
/*
* Since this is a doc test, the code in here will be publically visible in e.g. a manual.
* This test desires to print something to System.out and there's no point in this test,
* when executed, actually printing anything to System.out. So we fool it by creating this
* inner class with the same name and looks which it uses instead.
*/
private static class System
{
static PrintStream out = new PrintStream( new OutputStream()
{
@Override
public void write( int b ) throws IOException
{ // silent
}
} );
}
@BeforeClass
public static void setUpDb()
{
graphDb = new TestGraphDatabaseFactory().newImpermanentDatabaseBuilder().setConfig( GraphDatabaseSettings
.cache_type, WeakCacheProvider.NAME ).newGraphDatabase();
try ( Transaction tx = graphDb.beginTx() )
{
// START SNIPPET: createIndexes
IndexManager index = graphDb.index();
Index<Node> actors = index.forNodes( "actors" );
Index<Node> movies = index.forNodes( "movies" );
RelationshipIndex roles = index.forRelationships( "roles" );
// END SNIPPET: createIndexes
// START SNIPPET: createNodes
// Actors
Node reeves = graphDb.createNode();
reeves.setProperty( "name", "Keanu Reeves" );
actors.add( reeves, "name", reeves.getProperty( "name" ) );
Node bellucci = graphDb.createNode();
bellucci.setProperty( "name", "Monica Bellucci" );
actors.add( bellucci, "name", bellucci.getProperty( "name" ) );
// multiple values for a field, in this case for search only
// and not stored as a property.
actors.add( bellucci, "name", "La Bellucci" );
// Movies
Node theMatrix = graphDb.createNode();
theMatrix.setProperty( "title", "The Matrix" );
theMatrix.setProperty( "year", 1999 );
movies.add( theMatrix, "title", theMatrix.getProperty( "title" ) );
movies.add( theMatrix, "year", theMatrix.getProperty( "year" ) );
Node theMatrixReloaded = graphDb.createNode();
theMatrixReloaded.setProperty( "title", "The Matrix Reloaded" );
theMatrixReloaded.setProperty( "year", 2003 );
movies.add( theMatrixReloaded, "title", theMatrixReloaded.getProperty( "title" ) );
movies.add( theMatrixReloaded, "year", 2003 );
Node malena = graphDb.createNode();
malena.setProperty( "title", "Malèna" );
malena.setProperty( "year", 2000 );
movies.add( malena, "title", malena.getProperty( "title" ) );
movies.add( malena, "year", malena.getProperty( "year" ) );
// END SNIPPET: createNodes
// START SNIPPET: createRelationships
// we need a relationship type
DynamicRelationshipType ACTS_IN = DynamicRelationshipType.withName( "ACTS_IN" );
// create relationships
Relationship role1 = reeves.createRelationshipTo( theMatrix, ACTS_IN );
role1.setProperty( "name", "Neo" );
roles.add( role1, "name", role1.getProperty( "name" ) );
Relationship role2 = reeves.createRelationshipTo( theMatrixReloaded, ACTS_IN );
role2.setProperty( "name", "Neo" );
roles.add( role2, "name", role2.getProperty( "name" ) );
Relationship role3 = bellucci.createRelationshipTo( theMatrixReloaded, ACTS_IN );
role3.setProperty( "name", "Persephone" );
roles.add( role3, "name", role3.getProperty( "name" ) );
Relationship role4 = bellucci.createRelationshipTo( malena, ACTS_IN );
role4.setProperty( "name", "Malèna Scordia" );
roles.add( role4, "name", role4.getProperty( "name" ) );
// END SNIPPET: createRelationships
tx.success();
}
try ( Transaction tx = graphDb.beginTx() )
{
String title = "Movie and Actor Graph";
try ( PrintWriter pw = AsciiDocGenerator.getPrintWriter( "target/docs/dev", title ) )
{
pw.println( AsciidocHelper.createGraphVizDeletingReferenceNode( title, graphDb, "initial" ) );
pw.flush();
}
}
}
@AfterClass
public static void tearDownDb()
{
graphDb.shutdown();
}
@Before
public void beginTx()
{
tx = graphDb.beginTx();
}
@After
public void finishTx()
{
tx.close();
}
private void rollbackTx()
{
finishTx();
beginTx();
}
@Test
public void checkIfIndexExists()
{
// START SNIPPET: checkIfExists
IndexManager index = graphDb.index();
boolean indexExists = index.existsForNodes( "actors" );
// END SNIPPET: checkIfExists
assertTrue( indexExists );
}
@Test
public void deleteIndex()
{
GraphDatabaseService graphDb = new GraphDatabaseFactory().newEmbeddedDatabase( TargetDirectory.forTest(
getClass() ).cleanDirectory( "delete" ).getAbsolutePath() );
try ( Transaction tx = graphDb.beginTx() )
{
// START SNIPPET: delete
IndexManager index = graphDb.index();
Index<Node> actors = index.forNodes( "actors" );
actors.delete();
// END SNIPPET: delete
tx.success();
}
assertFalse( indexExists( graphDb ) );
graphDb.shutdown();
}
private boolean indexExists( GraphDatabaseService graphDb )
{
try ( Transaction tx = graphDb.beginTx() )
{
return graphDb.index().existsForNodes( "actors" );
}
}
@Test
public void removeFromIndex()
{
IndexManager index = graphDb.index();
Index<Node> actors = index.forNodes( "actors" );
Node bellucci = actors.get( "name", "Monica Bellucci" ).getSingle();
assertNotNull( bellucci );
// START SNIPPET: removeNodeFromIndex
// completely remove bellucci from the actors index
actors.remove( bellucci );
// END SNIPPET: removeNodeFromIndex
Node node = actors.get( "name", "Monica Bellucci" ).getSingle();
assertEquals( null, node );
node = actors.get( "name", "La Bellucci" ).getSingle();
assertEquals( null, node );
rollbackTx();
// START SNIPPET: removeNodeFromIndex
// remove any "name" entry of bellucci from the actors index
actors.remove( bellucci, "name" );
// END SNIPPET: removeNodeFromIndex
node = actors.get( "name", "Monica Bellucci" ).getSingle();
assertEquals( null, node );
node = actors.get( "name", "La Bellucci" ).getSingle();
assertEquals( null, node );
rollbackTx();
// START SNIPPET: removeNodeFromIndex
// remove the "name" -> "La Bellucci" entry of bellucci
actors.remove( bellucci, "name", "La Bellucci" );
// END SNIPPET: removeNodeFromIndex
node = actors.get( "name", "La Bellucci" ).getSingle();
assertEquals( null, node );
node = actors.get( "name", "Monica Bellucci" ).getSingle();
assertEquals( bellucci, node );
}
@Test
public void update()
{
IndexManager index = graphDb.index();
Index<Node> actors = index.forNodes( "actors" );
// START SNIPPET: update
// create a node with a property
// so we have something to update later on
Node fishburn = graphDb.createNode();
fishburn.setProperty( "name", "Fishburn" );
// index it
actors.add( fishburn, "name", fishburn.getProperty( "name" ) );
// END SNIPPET: update
Node node = actors.get( "name", "Fishburn" ).getSingle();
assertEquals( fishburn, node );
// START SNIPPET: update
// update the index entry
// when the property value changes
actors.remove( fishburn, "name", fishburn.getProperty( "name" ) );
fishburn.setProperty( "name", "Laurence Fishburn" );
actors.add( fishburn, "name", fishburn.getProperty( "name" ) );
// END SNIPPET: update
node = actors.get( "name", "Fishburn" ).getSingle();
assertEquals( null, node );
node = actors.get( "name", "Laurence Fishburn" ).getSingle();
assertEquals( fishburn, node );
}
@Test
public void doGetForNodes()
{
Index<Node> actors = graphDb.index().forNodes( "actors" );
// START SNIPPET: getSingleNode
IndexHits<Node> hits = actors.get( "name", "Keanu Reeves" );
Node reeves = hits.getSingle();
// END SNIPPET: getSingleNode
assertEquals( "Keanu Reeves", reeves.getProperty( "name" ) );
}
// @Test
// public void getSameFromDifferentValuesO
@Test
public void doGetForRelationships()
{
RelationshipIndex roles = graphDb.index().forRelationships( "roles" );
// START SNIPPET: getSingleRelationship
Relationship persephone = roles.get( "name", "Persephone" ).getSingle();
Node actor = persephone.getStartNode();
Node movie = persephone.getEndNode();
// END SNIPPET: getSingleRelationship
assertEquals( "Monica Bellucci", actor.getProperty( "name" ) );
assertEquals( "The Matrix Reloaded", movie.getProperty( "title" ) );
@SuppressWarnings("serial") List<String> expectedActors = new ArrayList<String>()
{
{
add( "Keanu Reeves" );
add( "Keanu Reeves" );
}
};
List<String> foundActors = new ArrayList<>();
// START SNIPPET: getRelationships
for ( Relationship role : roles.get( "name", "Neo" ) )
{
// this will give us Reeves twice
Node reeves = role.getStartNode();
// END SNIPPET: getRelationships
foundActors.add( (String) reeves.getProperty( "name" ) );
// START SNIPPET: getRelationships
}
// END SNIPPET: getRelationships
assertEquals( expectedActors, foundActors );
}
@Test
public void doQueriesForNodes()
{
IndexManager index = graphDb.index();
Index<Node> actors = index.forNodes( "actors" );
Index<Node> movies = index.forNodes( "movies" );
Set<String> found = new HashSet<>();
@SuppressWarnings("serial") Set<String> expectedActors = new HashSet<String>()
{
{
add( "Monica Bellucci" );
add( "Keanu Reeves" );
}
};
@SuppressWarnings("serial") Set<String> expectedMovies = new HashSet<String>()
{
{
add( "The Matrix" );
}
};
// START SNIPPET: actorsQuery
for ( Node actor : actors.query( "name", "*e*" ) )
{
// This will return Reeves and Bellucci
// END SNIPPET: actorsQuery
found.add( (String) actor.getProperty( "name" ) );
// START SNIPPET: actorsQuery
}
// END SNIPPET: actorsQuery
assertEquals( expectedActors, found );
found.clear();
// START SNIPPET: matrixQuery
for ( Node movie : movies.query( "title:*Matrix* AND year:1999" ) )
{
// This will return "The Matrix" from 1999 only.
// END SNIPPET: matrixQuery
found.add( (String) movie.getProperty( "title" ) );
// START SNIPPET: matrixQuery
}
// END SNIPPET: matrixQuery
assertEquals( expectedMovies, found );
// START SNIPPET: matrixSingleQuery
Node matrix = movies.query( "title:*Matrix* AND year:2003" ).getSingle();
// END SNIPPET: matrixSingleQuery
assertEquals( "The Matrix Reloaded", matrix.getProperty( "title" ) );
// START SNIPPET: queryWithScore
IndexHits<Node> hits = movies.query( "title", "The*" );
for ( Node movie : hits )
{
System.out.println( movie.getProperty( "title" ) + " " + hits.currentScore() );
// END SNIPPET: queryWithScore
assertTrue( ((String) movie.getProperty( "title" )).startsWith( "The" ) );
// START SNIPPET: queryWithScore
}
// END SNIPPET: queryWithScore
assertEquals( 2, hits.size() );
// START SNIPPET: queryWithRelevance
hits = movies.query( "title", new QueryContext( "The*" ).sortByScore() );
// END SNIPPET: queryWithRelevance
float previous = Float.MAX_VALUE;
// START SNIPPET: queryWithRelevance
for ( Node movie : hits )
{
// hits sorted by relevance (score)
// END SNIPPET: queryWithRelevance
assertTrue( hits.currentScore() <= previous );
previous = hits.currentScore();
// START SNIPPET: queryWithRelevance
}
// END SNIPPET: queryWithRelevance
assertEquals( 2, hits.size() );
// START SNIPPET: termQuery
// a TermQuery will give exact matches
Node actor = actors.query( new TermQuery( new Term( "name", "Keanu Reeves" ) ) ).getSingle();
// END SNIPPET: termQuery
assertEquals( "Keanu Reeves", actor.getProperty( "name" ) );
Node theMatrix = movies.get( "title", "The Matrix" ).getSingle();
Node theMatrixReloaded = movies.get( "title", "The Matrix Reloaded" ).getSingle();
Node malena = movies.get( "title", "Malèna" ).getSingle();
// START SNIPPET: wildcardTermQuery
hits = movies.query( new WildcardQuery( new Term( "title", "The Matrix*" ) ) );
for ( Node movie : hits )
{
System.out.println( movie.getProperty( "title" ) );
// END SNIPPET: wildcardTermQuery
assertTrue( ((String) movie.getProperty( "title" )).startsWith( "The Matrix" ) );
// START SNIPPET: wildcardTermQuery
}
// END SNIPPET: wildcardTermQuery
assertEquals( 2, hits.size() );
// START SNIPPET: numericRange
movies.add( theMatrix, "year-numeric", new ValueContext( 1999 ).indexNumeric() );
movies.add( theMatrixReloaded, "year-numeric", new ValueContext( 2003 ).indexNumeric() );
movies.add( malena, "year-numeric", new ValueContext( 2000 ).indexNumeric() );
int from = 1997;
int to = 1999;
hits = movies.query( QueryContext.numericRange( "year-numeric", from, to ) );
// END SNIPPET: numericRange
assertEquals( theMatrix, hits.getSingle() );
// START SNIPPET: sortedNumericRange
hits = movies.query(
QueryContext.numericRange( "year-numeric", from, null )
.sortNumeric( "year-numeric", false ) );
// END SNIPPET: sortedNumericRange
List<String> sortedMovies = new ArrayList<>();
@SuppressWarnings("serial") List<String> expectedSortedMovies = new ArrayList<String>()
{
{
add( "The Matrix" );
add( "Malèna" );
add( "The Matrix Reloaded" );
}
};
for ( Node hit : hits )
{
sortedMovies.add( (String) hit.getProperty( "title" ) );
}
assertEquals( expectedSortedMovies, sortedMovies );
// START SNIPPET: exclusiveRange
movies.add( theMatrix, "score", new ValueContext( 8.7 ).indexNumeric() );
movies.add( theMatrixReloaded, "score", new ValueContext( 7.1 ).indexNumeric() );
movies.add( malena, "score", new ValueContext( 7.4 ).indexNumeric() );
// include 8.0, exclude 9.0
hits = movies.query( QueryContext.numericRange( "score", 8.0, 9.0, true, false ) );
// END SNIPPET: exclusiveRange
found.clear();
for ( Node hit : hits )
{
found.add( (String) hit.getProperty( "title" ) );
}
assertEquals( expectedMovies, found );
// START SNIPPET: compoundQueries
hits = movies.query( "title:*Matrix* AND year:1999" );
// END SNIPPET: compoundQueries
assertEquals( theMatrix, hits.getSingle() );
// START SNIPPET: defaultOperator
QueryContext query = new QueryContext( "title:*Matrix* year:1999" )
.defaultOperator( Operator.AND );
hits = movies.query( query );
// END SNIPPET: defaultOperator
// with OR the result would be 2 hits
assertEquals( 1, hits.size() );
// START SNIPPET: sortedResult
hits = movies.query( "title", new QueryContext( "*" ).sort( "title" ) );
for ( Node hit : hits )
{
// all movies with a title in the index, ordered by title
}
// END SNIPPET: sortedResult
assertEquals( 3, hits.size() );
// START SNIPPET: sortedResult
// or
hits = movies.query( new QueryContext( "title:*" ).sort( "year", "title" ) );
for ( Node hit : hits )
{
// all movies with a title in the index, ordered by year, then title
}
// END SNIPPET: sortedResult
assertEquals( 3, hits.size() );
}
@Test
public void doQueriesForRelationships()
{
IndexManager index = graphDb.index();
RelationshipIndex roles = index.forRelationships( "roles" );
Index<Node> actors = graphDb.index().forNodes( "actors" );
Index<Node> movies = index.forNodes( "movies" );
Node reeves = actors.get( "name", "Keanu Reeves" ).getSingle();
Node theMatrix = movies.get( "title", "The Matrix" ).getSingle();
// START SNIPPET: queryForRelationships
// find relationships filtering on start node
// using exact matches
IndexHits<Relationship> reevesAsNeoHits;
reevesAsNeoHits = roles.get( "name", "Neo", reeves, null );
Relationship reevesAsNeo = reevesAsNeoHits.iterator().next();
reevesAsNeoHits.close();
// END SNIPPET: queryForRelationships
assertEquals( "Neo", reevesAsNeo.getProperty( "name" ) );
Node actor = reevesAsNeo.getStartNode();
assertEquals( reeves, actor );
// START SNIPPET: queryForRelationships
// find relationships filtering on end node
// using a query
IndexHits<Relationship> matrixNeoHits;
matrixNeoHits = roles.query( "name", "*eo", null, theMatrix );
Relationship matrixNeo = matrixNeoHits.iterator().next();
matrixNeoHits.close();
// END SNIPPET: queryForRelationships
assertEquals( "Neo", matrixNeo.getProperty( "name" ) );
actor = matrixNeo.getStartNode();
assertEquals( reeves, actor );
// START SNIPPET: queryForRelationshipType
// find relationships filtering on end node
// using a relationship type.
// this is how to add it to the index:
roles.add( reevesAsNeo, "type", reevesAsNeo.getType().name() );
// Note that to use a compound query, we can't combine committed
// and uncommitted index entries, so we'll commit before querying:
tx.success();
tx.finish();
// and now we can search for it:
try ( Transaction tx = graphDb.beginTx() )
{
IndexHits<Relationship> typeHits = roles.query( "type:ACTS_IN AND name:Neo", null, theMatrix );
Relationship typeNeo = typeHits.iterator().next();
typeHits.close();
// END SNIPPET: queryForRelationshipType
assertThat(typeNeo, inTx( graphDb, hasProperty( "name" ).withValue( "Neo" ) ));
actor = matrixNeo.getStartNode();
assertEquals( reeves, actor );
}
}
@Test
public void fulltext()
{
// START SNIPPET: fulltext
IndexManager index = graphDb.index();
Index<Node> fulltextMovies = index.forNodes( "movies-fulltext",
MapUtil.stringMap( IndexManager.PROVIDER, "lucene", "type", "fulltext" ) );
// END SNIPPET: fulltext
Index<Node> movies = index.forNodes( "movies" );
Node theMatrix = movies.get( "title", "The Matrix" ).getSingle();
Node theMatrixReloaded = movies.get( "title", "The Matrix Reloaded" ).getSingle();
// START SNIPPET: fulltext
fulltextMovies.add( theMatrix, "title", "The Matrix" );
fulltextMovies.add( theMatrixReloaded, "title", "The Matrix Reloaded" );
// search in the fulltext index
Node found = fulltextMovies.query( "title", "reloAdEd" ).getSingle();
// END SNIPPET: fulltext
assertEquals( theMatrixReloaded, found );
}
@Test
public void cacheSettings()
{
// START SNIPPET: cache
Index<Node> index = graphDb.index().forNodes( "actors" );
((LuceneIndex<Node>) index).setCacheCapacity( "name", 300000 );
// END SNIPPET: cache
}
@Test
public void batchInsert()
{
Neo4jTestCase.deleteFileOrDirectory( new File(
"target/neo4jdb-batchinsert" ) );
// START SNIPPET: batchInsert
BatchInserter inserter = BatchInserters.inserter( "target/neo4jdb-batchinsert" );
BatchInserterIndexProvider indexProvider =
new LuceneBatchInserterIndexProvider( inserter );
BatchInserterIndex actors =
indexProvider.nodeIndex( "actors", MapUtil.stringMap( "type", "exact" ) );
actors.setCacheCapacity( "name", 100000 );
Map<String, Object> properties = MapUtil.map( "name", "Keanu Reeves" );
long node = inserter.createNode( properties );
actors.add( node, properties );
//make the changes visible for reading, use this sparsely, requires IO!
actors.flush();
// Make sure to shut down the index provider as well
indexProvider.shutdown();
inserter.shutdown();
// END SNIPPET: batchInsert
GraphDatabaseService db = new GraphDatabaseFactory().newEmbeddedDatabase( "target/neo4jdb-batchinsert" );
try ( Transaction tx = db.beginTx() )
{
Index<Node> index = db.index().forNodes( "actors" );
Node reeves = index.get( "name", "Keanu Reeves" ).next();
assertEquals( node, reeves.getId() );
}
db.shutdown();
}
}
| false
|
community_lucene-index_src_test_java_examples_ImdbDocTest.java
|
3,944
|
public class BatchInsertDocTest
{
@Test
public void insert()
{
// START SNIPPET: insert
BatchInserter inserter = BatchInserters.inserter( "target/batchinserter-example", fileSystem );
Label personLabel = DynamicLabel.label( "Person" );
inserter.createDeferredSchemaIndex( personLabel ).on( "name" ).create();
Map<String, Object> properties = new HashMap<>();
properties.put( "name", "Mattias" );
long mattiasNode = inserter.createNode( properties, personLabel );
properties.put( "name", "Chris" );
long chrisNode = inserter.createNode( properties, personLabel );
RelationshipType knows = DynamicRelationshipType.withName( "KNOWS" );
// To set properties on the relationship, use a properties map
// instead of null as the last parameter.
inserter.createRelationship( mattiasNode, chrisNode, knows, null );
inserter.shutdown();
// END SNIPPET: insert
// try it out from a normal db
GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fileSystem ).newImpermanentDatabase(
"target/batchinserter-example" );
try ( Transaction tx = db.beginTx() )
{
Node mNode = db.getNodeById( mattiasNode );
Node cNode = mNode.getSingleRelationship( knows, Direction.OUTGOING ).getEndNode();
assertThat( (String) cNode.getProperty( "name" ), is( "Chris" ) );
assertThat( db.schema()
.getIndexes( personLabel )
.iterator()
.hasNext(), is( true ) );
}
finally
{
db.shutdown();
}
}
@Test
public void insertWithConfig()
{
// START SNIPPET: configuredInsert
Map<String, String> config = new HashMap<>();
config.put( "neostore.nodestore.db.mapped_memory", "90M" );
BatchInserter inserter = BatchInserters.inserter(
"target/batchinserter-example-config", fileSystem, config );
// Insert data here ... and then shut down:
inserter.shutdown();
// END SNIPPET: configuredInsert
}
@Test
public void insertWithConfigFile() throws IOException
{
try ( Writer fw = fileSystem.openAsWriter( new File( "target/batchinsert-config" ), "utf-8", false ) )
{
fw.append( "neostore.nodestore.db.mapped_memory=90M\n"
+ "neostore.relationshipstore.db.mapped_memory=3G\n"
+ "neostore.propertystore.db.mapped_memory=50M\n"
+ "neostore.propertystore.db.strings.mapped_memory=100M\n"
+ "neostore.propertystore.db.arrays.mapped_memory=0M" );
}
// START SNIPPET: configFileInsert
try ( InputStream input = fileSystem.openAsInputStream( new File( "target/batchinsert-config" ) ) )
{
Map<String, String> config = MapUtil.load( input );
BatchInserter inserter = BatchInserters.inserter(
"target/batchinserter-example-config", fileSystem, config );
// Insert data here ... and then shut down:
inserter.shutdown();
}
// END SNIPPET: configFileInsert
}
@Test
public void batchDb()
{
// START SNIPPET: batchDb
GraphDatabaseService batchDb =
BatchInserters.batchDatabase( "target/batchdb-example", fileSystem );
Label personLabel = DynamicLabel.label( "Person" );
Node mattiasNode = batchDb.createNode( personLabel );
mattiasNode.setProperty( "name", "Mattias" );
Node chrisNode = batchDb.createNode();
chrisNode.setProperty( "name", "Chris" );
chrisNode.addLabel( personLabel );
RelationshipType knows = DynamicRelationshipType.withName( "KNOWS" );
mattiasNode.createRelationshipTo( chrisNode, knows );
// END SNIPPET: batchDb
long mattiasNodeId = mattiasNode.getId();
// START SNIPPET: batchDb
batchDb.shutdown();
// END SNIPPET: batchDb
// try it out from a normal db
GraphDatabaseService db = new TestGraphDatabaseFactory().setFileSystem( fileSystem ).newImpermanentDatabase(
"target/batchdb-example" );
try ( Transaction tx = db.beginTx() )
{
Node mNode = db.getNodeById( mattiasNodeId );
Node cNode = mNode.getSingleRelationship( knows, Direction.OUTGOING )
.getEndNode();
assertThat( cNode, inTx( db, hasProperty( "name" ).withValue( "Chris" ) ) );
}
finally
{
db.shutdown();
}
}
@Test
public void batchDbWithConfig()
{
// START SNIPPET: configuredBatchDb
Map<String, String> config = new HashMap<>();
config.put( "neostore.nodestore.db.mapped_memory", "90M" );
GraphDatabaseService batchDb =
BatchInserters.batchDatabase( "target/batchdb-example-config", fileSystem, config );
// Insert data here ... and then shut down:
batchDb.shutdown();
// END SNIPPET: configuredBatchDb
}
@Rule
public EphemeralFileSystemRule fileSystemRule = new EphemeralFileSystemRule();
private EphemeralFileSystemAbstraction fileSystem;
@Before
public void before() throws Exception
{
fileSystem = fileSystemRule.get();
fileSystem.mkdirs( new File( "target" ) );
}
}
| false
|
community_kernel_src_test_java_examples_BatchInsertDocTest.java
|
3,945
|
public class AutoIndexerExampleTests implements GraphHolder
{
private static final TargetDirectory target = TargetDirectory.forTest( AutoIndexerExampleTests.class );
private static GraphDatabaseService graphdb;
public @Rule
TestData<Map<String, Node>> data = TestData.producedThrough( GraphDescription.createGraphFor( this, true ) );
private String getStoreDir( String testName ) throws IOException
{
File base = new File( "target", "example-auto-index" );
FileUtils.deleteRecursively( base );
return new File( base.getAbsolutePath(), testName ).getAbsolutePath();
}
@Test
public void testConfig() throws Exception
{
String storeDirectory = getStoreDir( "testConfig" );
// START SNIPPET: ConfigAutoIndexer
/*
* Creating the configuration, adding nodeProp1 and nodeProp2 as
* auto indexed properties for Nodes and relProp1 and relProp2 as
* auto indexed properties for Relationships. Only those will be
* indexed. We also have to enable auto indexing for both these
* primitives explicitly.
*/
GraphDatabaseService graphDb = new GraphDatabaseFactory().
newEmbeddedDatabaseBuilder( storeDirectory ).
setConfig( GraphDatabaseSettings.node_keys_indexable, "nodeProp1,nodeProp2" ).
setConfig( GraphDatabaseSettings.relationship_keys_indexable, "relProp1,relProp2" ).
setConfig( GraphDatabaseSettings.node_auto_indexing, "true" ).
setConfig( GraphDatabaseSettings.relationship_auto_indexing, "true" ).
newGraphDatabase();
Node node1 = null, node2 = null;
Relationship rel = null;
try ( Transaction tx = graphDb.beginTx() )
{
// Create the primitives
node1 = graphDb.createNode();
node2 = graphDb.createNode();
rel = node1.createRelationshipTo( node2,
DynamicRelationshipType.withName( "DYNAMIC" ) );
// Add indexable and non-indexable properties
node1.setProperty( "nodeProp1", "nodeProp1Value" );
node2.setProperty( "nodeProp2", "nodeProp2Value" );
node1.setProperty( "nonIndexed", "nodeProp2NonIndexedValue" );
rel.setProperty( "relProp1", "relProp1Value" );
rel.setProperty( "relPropNonIndexed", "relPropValueNonIndexed" );
// Make things persistent
tx.success();
}
// END SNIPPET: ConfigAutoIndexer
// START SNIPPET: APIReadAutoIndex
try ( Transaction tx = graphDb.beginTx() )
{
// Get the Node auto index
ReadableIndex<Node> autoNodeIndex = graphDb.index()
.getNodeAutoIndexer()
.getAutoIndex();
// node1 and node2 both had auto indexed properties, get them
assertEquals( node1,
autoNodeIndex.get( "nodeProp1", "nodeProp1Value" ).getSingle() );
assertEquals( node2,
autoNodeIndex.get( "nodeProp2", "nodeProp2Value" ).getSingle() );
// node2 also had a property that should be ignored.
assertFalse( autoNodeIndex.get( "nonIndexed",
"nodeProp2NonIndexedValue" ).hasNext() );
// Get the relationship auto index
ReadableIndex<Relationship> autoRelIndex = graphDb.index()
.getRelationshipAutoIndexer()
.getAutoIndex();
// One property was set for auto indexing
assertEquals( rel,
autoRelIndex.get( "relProp1", "relProp1Value" ).getSingle() );
// The rest should be ignored
assertFalse( autoRelIndex.get( "relPropNonIndexed",
"relPropValueNonIndexed" ).hasNext() );
}
// END SNIPPET: APIReadAutoIndex
graphDb.shutdown();
}
@Test
public void testAPI() throws Exception
{
String storeDirectory = getStoreDir( "testAPI" );
// START SNIPPET: APIAutoIndexer
// Start without any configuration
GraphDatabaseService graphDb = new GraphDatabaseFactory().
newEmbeddedDatabase( storeDirectory );
// Get the Node AutoIndexer, set nodeProp1 and nodeProp2 as auto
// indexed.
AutoIndexer<Node> nodeAutoIndexer = graphDb.index()
.getNodeAutoIndexer();
nodeAutoIndexer.startAutoIndexingProperty( "nodeProp1" );
nodeAutoIndexer.startAutoIndexingProperty( "nodeProp2" );
// Get the Relationship AutoIndexer
AutoIndexer<Relationship> relAutoIndexer = graphDb.index()
.getRelationshipAutoIndexer();
relAutoIndexer.startAutoIndexingProperty( "relProp1" );
// None of the AutoIndexers are enabled so far. Do that now
nodeAutoIndexer.setEnabled( true );
relAutoIndexer.setEnabled( true );
// END SNIPPET: APIAutoIndexer
Node node1 = null, node2 = null;
Relationship rel = null;
try ( Transaction tx = graphDb.beginTx() )
{
// Create the primitives
node1 = graphDb.createNode();
node2 = graphDb.createNode();
rel = node1.createRelationshipTo( node2,
DynamicRelationshipType.withName( "DYNAMIC" ) );
// Add indexable and non-indexable properties
node1.setProperty( "nodeProp1", "nodeProp1Value" );
node2.setProperty( "nodeProp2", "nodeProp2Value" );
node1.setProperty( "nonIndexed", "nodeProp2NonIndexedValue" );
rel.setProperty( "relProp1", "relProp1Value1" );
rel.setProperty( "relPropNonIndexed", "relProp1Value2" );
// Make things persistent
tx.success();
}
try ( Transaction tx = graphDb.beginTx() )
{
// Get the Node auto index
ReadableIndex<Node> autoNodeIndex = nodeAutoIndexer.getAutoIndex();
// node1 and node2 both had auto indexed properties, get them
assertEquals( node1,
autoNodeIndex.get( "nodeProp1", "nodeProp1Value" ).getSingle() );
assertEquals( node2,
autoNodeIndex.get( "nodeProp2", "nodeProp2Value" ).getSingle() );
// node2 also had a property that should be ignored.
assertFalse( autoNodeIndex.get( "nonIndexed",
"nodeProp2NonIndexedValue" ).hasNext() );
// Get the relationship auto index
ReadableIndex<Relationship> autoRelIndex = relAutoIndexer.getAutoIndex();
// All properties ignored
assertEquals( rel,
autoRelIndex.get( "relProp1", "relProp1Value1" ).getSingle() );
assertFalse( autoRelIndex.get( "relPropNonIndexed", "relProp1Value2" ).hasNext() );
}
graphDb.shutdown();
}
@Test
public void testMutations() throws Exception
{
String storeDirectory = getStoreDir( "mutations" );
// START SNIPPET: Mutations
/*
* Creating the configuration
*/
GraphDatabaseService graphDb = new GraphDatabaseFactory().
newEmbeddedDatabaseBuilder( storeDirectory ).
setConfig( GraphDatabaseSettings.node_keys_indexable, "nodeProp1,nodeProp2" ).
setConfig( GraphDatabaseSettings.node_auto_indexing, "true" ).
newGraphDatabase();
Node node1 = null, node2 = null, node3 = null, node4 = null;
try ( Transaction tx = graphDb.beginTx() )
{
// Create the primitives
node1 = graphDb.createNode();
node2 = graphDb.createNode();
node3 = graphDb.createNode();
node4 = graphDb.createNode();
// Add indexable and non-indexable properties
node1.setProperty( "nodeProp1", "nodeProp1Value" );
node2.setProperty( "nodeProp2", "nodeProp2Value" );
node3.setProperty( "nodeProp1", "nodeProp3Value" );
node4.setProperty( "nodeProp2", "nodeProp4Value" );
// Make things persistent
tx.success();
}
/*
* Here both nodes are indexed. To demonstrate removal, we stop
* autoindexing nodeProp1.
*/
AutoIndexer<Node> nodeAutoIndexer = graphDb.index().getNodeAutoIndexer();
nodeAutoIndexer.stopAutoIndexingProperty( "nodeProp1" );
try ( Transaction tx = graphDb.beginTx() )
{
/*
* nodeProp1 is no longer auto indexed. It will be
* removed regardless. Note that node3 will remain.
*/
node1.setProperty( "nodeProp1", "nodeProp1Value2" );
/*
* node2 will be auto updated
*/
node2.setProperty( "nodeProp2", "nodeProp2Value2" );
/*
* remove node4 property nodeProp2 from index.
*/
node4.removeProperty( "nodeProp2" );
// Make things persistent
tx.success();
}
try ( Transaction tx = graphDb.beginTx() )
{
// Verify
ReadableIndex<Node> nodeAutoIndex = nodeAutoIndexer.getAutoIndex();
// node1 is completely gone
assertFalse( nodeAutoIndex.get( "nodeProp1", "nodeProp1Value" ).hasNext() );
assertFalse( nodeAutoIndex.get( "nodeProp1", "nodeProp1Value2" ).hasNext() );
// node2 is updated
assertFalse( nodeAutoIndex.get( "nodeProp2", "nodeProp2Value" ).hasNext() );
assertEquals( node2,
nodeAutoIndex.get( "nodeProp2", "nodeProp2Value2" ).getSingle() );
/*
* node3 is still there, despite its nodeProp1 property not being monitored
* any more because it was not touched, in contrast with node1.
*/
assertEquals( node3,
nodeAutoIndex.get( "nodeProp1", "nodeProp3Value" ).getSingle() );
// Finally, node4 is removed because the property was removed.
assertFalse( nodeAutoIndex.get( "nodeProp2", "nodeProp4Value" ).hasNext() );
}
// END SNIPPET: Mutations
graphDb.shutdown();
}
@Test
@Graph( autoIndexNodes = true,
autoIndexRelationships = true,
value = { "I know you" })
public void canCreateMoreInvolvedGraphWithPropertiesAndAutoIndex() throws Exception
{
GraphDatabaseService graphDatabase = data.get().values().iterator().next().getGraphDatabase();
assertTrue( "node autoindex Nodes not enabled.", graphDatabase.index().getNodeAutoIndexer().isEnabled() );
assertTrue( "node autoindex Rels not enabled.", graphDatabase.index().getRelationshipAutoIndexer().isEnabled() );
}
@BeforeClass
public static void startDatabase()
{
graphdb = new GraphDatabaseFactory().newEmbeddedDatabase( target.makeGraphDbDir().getAbsolutePath() );
}
@AfterClass
public static void stopDatabase()
{
if ( graphdb != null )
{
graphdb.shutdown();
}
graphdb = null;
}
@Override
public GraphDatabaseService graphdb()
{
return graphdb;
}
}
| false
|
community_lucene-index_src_test_java_examples_AutoIndexerExampleTests.java
|
3,946
|
MATRIX_EXAMPLE
{
public Node create( GraphDatabaseService graphdb )
{
Node neo = graphdb.createNode();
neo.setProperty( "name", "Thomas Anderson" );
neo.setProperty( "age", 29 );
Node trinity = graphdb.createNode();
trinity.setProperty( "name", "Trinity" );
Node morpheus = graphdb.createNode();
morpheus.setProperty( "name", "Morpheus" );
morpheus.setProperty( "rank", "Captain" );
morpheus.setProperty( "occupation", "Total badass" );
Node cypher = graphdb.createNode();
cypher.setProperty( "name", "Cypher" );
cypher.setProperty( "last name", "Reagan" );
Node smith = graphdb.createNode();
smith.setProperty( "name", "Agent Smith" );
smith.setProperty( "version", "1.0b" );
smith.setProperty( "language", "C++" );
Node architect = graphdb.createNode();
architect.setProperty( "name", "The Architect" );
Relationship relationship;
relationship = neo.createRelationshipTo( morpheus,
StandardGraphs.MatrixTypes.KNOWS );
relationship = neo.createRelationshipTo( trinity,
StandardGraphs.MatrixTypes.KNOWS );
relationship = morpheus.createRelationshipTo( trinity,
StandardGraphs.MatrixTypes.KNOWS );
relationship.setProperty( "since", "a year before the movie" );
relationship.setProperty( "cooporates on", "The Nebuchadnezzar" );
relationship = trinity.createRelationshipTo( neo,
StandardGraphs.MatrixTypes.LOVES );
relationship.setProperty( "since", "meeting the oracle" );
relationship = morpheus.createRelationshipTo( cypher,
StandardGraphs.MatrixTypes.KNOWS );
relationship.setProperty( "disclosure", "public" );
relationship = cypher.createRelationshipTo( smith,
StandardGraphs.MatrixTypes.KNOWS );
relationship.setProperty( "disclosure", "secret" );
relationship = smith.createRelationshipTo( architect,
StandardGraphs.MatrixTypes.CODED_BY );
return neo;
}
};
| false
|
community_graph-algo_src_test_java_common_StandardGraphs.java
|
3,947
|
SMALL_CIRCLE
{
public Node create( GraphDatabaseService graphdb )
{
Node start = graphdb.createNode(), end = graphdb.createNode();
start.createRelationshipTo( end, this );
end.createRelationshipTo( start, this );
return end;
}
},
| false
|
community_graph-algo_src_test_java_common_StandardGraphs.java
|
3,948
|
CROSS_PATHS_GRAPH
{
public Node create( GraphDatabaseService graphdb )
{
Node start = graphdb.createNode(), end = graphdb.createNode();
Node a = graphdb.createNode(), b = graphdb.createNode(), c = graphdb.createNode(), d = graphdb.createNode();
start.createRelationshipTo( a, this );
start.createRelationshipTo( b, this );
a.createRelationshipTo( c, this );
a.createRelationshipTo( d, this );
b.createRelationshipTo( c, this );
b.createRelationshipTo( d, this );
c.createRelationshipTo( end, this );
d.createRelationshipTo( end, this );
return end;
}
},
| false
|
community_graph-algo_src_test_java_common_StandardGraphs.java
|
3,949
|
public class SimpleGraphBuilder
{
public static final String KEY_ID = "name";
GraphDatabaseService graphDb;
HashMap<String,Node> nodes;
HashMap<Node,String> nodeNames;
Set<Relationship> edges;
RelationshipType currentRelType = null;
public SimpleGraphBuilder( GraphDatabaseService graphDb,
RelationshipType relationshipType )
{
super();
this.graphDb = graphDb;
nodes = new HashMap<String,Node>();
nodeNames = new HashMap<Node,String>();
edges = new HashSet<Relationship>();
setCurrentRelType( relationshipType );
}
public void clear()
{
for ( Node node : nodes.values() )
{
for ( Relationship relationship : node.getRelationships() )
{
relationship.delete();
}
node.delete();
}
nodes.clear();
nodeNames.clear();
edges.clear();
}
public Set<Relationship> getAllEdges()
{
return edges;
}
public Set<Node> getAllNodes()
{
return nodeNames.keySet();
}
public void setCurrentRelType( RelationshipType currentRelType )
{
this.currentRelType = currentRelType;
}
public Node makeNode( String id )
{
return makeNode( id, Collections.<String, Object>emptyMap() );
}
public Node makeNode( String id, Object... keyValuePairs )
{
return makeNode( id, toMap( keyValuePairs ) );
}
private Map<String, Object> toMap( Object[] keyValuePairs )
{
Map<String, Object> map = new HashMap<String, Object>();
for ( int i = 0; i < keyValuePairs.length; i++ )
{
map.put( keyValuePairs[i++].toString(), keyValuePairs[i] );
}
return map;
}
public Node makeNode( String id, Map<String, Object> properties )
{
Node node = graphDb.createNode();
nodes.put( id, node );
nodeNames.put( node, id );
node.setProperty( KEY_ID, id );
for ( Map.Entry<String, Object> property : properties.entrySet() )
{
if ( property.getKey().equals( KEY_ID ) )
{
throw new RuntimeException( "Can't use '" + property.getKey() + "'" );
}
node.setProperty( property.getKey(), property.getValue() );
}
return node;
}
public Node getNode( String id )
{
return getNode( id, false );
}
public Node getNode( String id, boolean force )
{
Node node = nodes.get( id );
if ( node == null && force )
{
node = makeNode( id );
}
return node;
}
public String getNodeId( Node node )
{
return nodeNames.get( node );
}
public Relationship makeEdge( String node1, String node2 )
{
return makeEdge( node1, node2, Collections.<String, Object>emptyMap() );
}
public Relationship makeEdge( String node1, String node2, Map<String, Object> edgeProperties )
{
Node n1 = getNode( node1, true ), n2 = getNode( node2, true );
Relationship relationship = n1
.createRelationshipTo( n2, currentRelType );
for ( Map.Entry<String, Object> property : edgeProperties.entrySet() )
{
relationship.setProperty( property.getKey(), property.getValue() );
}
edges.add( relationship );
return relationship;
}
public Relationship makeEdge( String node1, String node2, Object... keyValuePairs )
{
return makeEdge( node1, node2, toMap( keyValuePairs ) );
}
/**
* This creates a chain by adding a number of edges. Example: The input
* string "a,b,c,d,e" makes the chain a->b->c->d->e
* @param commaSeparatedNodeNames
* A string with the node names separated by commas.
*/
public void makeEdgeChain( String commaSeparatedNodeNames )
{
String[] nodeNames = commaSeparatedNodeNames.split( "," );
for ( int i = 0; i < nodeNames.length - 1; ++i )
{
makeEdge( nodeNames[i], nodeNames[i + 1] );
}
}
/**
* Same as makeEdgeChain, but with some property set on all edges.
* @param commaSeparatedNodeNames
* A string with the node names separated by commas.
* @param propertyName
* @param propertyValue
*/
public void makeEdgeChain( String commaSeparatedNodeNames,
String propertyName, Object propertyValue )
{
String[] nodeNames = commaSeparatedNodeNames.split( "," );
for ( int i = 0; i < nodeNames.length - 1; ++i )
{
makeEdge( nodeNames[i], nodeNames[i + 1], propertyName,
propertyValue );
}
}
/**
* This creates a number of edges from a number of node names, pairwise.
* Example: Input "a,b,c,d" gives a->b and c->d
* @param commaSeparatedNodeNames
*/
public void makeEdges( String commaSeparatedNodeNames )
{
String[] nodeNames = commaSeparatedNodeNames.split( "," );
for ( int i = 0; i < nodeNames.length / 2; ++i )
{
makeEdge( nodeNames[i * 2], nodeNames[i * 2 + 1] );
}
}
public void importEdges( File file )
{
try
{
CsvFileReader reader = new CsvFileReader( file );
while ( reader.hasNext() )
{
String[] line = reader.next();
makeEdge( line[0], line[1] );
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
/**
* Same as makeEdges, but with some property set on all edges.
* @param commaSeparatedNodeNames
* @param propertyName
* @param propertyValue
*/
public void makeEdges( String commaSeparatedNodeNames, String propertyName,
Object propertyValue )
{
String[] nodeNames = commaSeparatedNodeNames.split( "," );
for ( int i = 0; i < nodeNames.length / 2; ++i )
{
makeEdge( nodeNames[i * 2], nodeNames[i * 2 + 1], propertyName,
propertyValue );
}
}
/**
* @param node1Id
* @param node2Id
* @return One relationship between two given nodes, if there exists one,
* otherwise null.
*/
public Relationship getRelationship( String node1Id, String node2Id )
{
Node node1 = getNode( node1Id );
Node node2 = getNode( node2Id );
if ( node1 == null || node2 == null )
{
return null;
}
Iterable<Relationship> relationships = node1.getRelationships();
for ( Relationship relationship : relationships )
{
if ( relationship.getOtherNode( node1 ).equals( node2 ) )
{
return relationship;
}
}
return null;
}
}
| false
|
community_graph-algo_src_test_java_common_SimpleGraphBuilder.java
|
3,950
|
public abstract class Neo4jAlgoTestCase
{
protected static GraphDatabaseService graphDb;
protected static SimpleGraphBuilder graph = null;
protected Transaction tx;
public static enum MyRelTypes implements RelationshipType
{
R1, R2, R3
}
@BeforeClass
public static void setUpGraphDb() throws Exception
{
graphDb = new TestGraphDatabaseFactory().newImpermanentDatabase();
graph = new SimpleGraphBuilder( graphDb, MyRelTypes.R1 );
}
@Before
public void setUpTransaction()
{
tx = graphDb.beginTx();
}
@AfterClass
public static void tearDownGraphDb() throws Exception
{
graphDb.shutdown();
}
@After
public void tearDownTransactionAndGraph()
{
graph.clear();
tx.success();
tx.finish();
}
public static void deleteFileOrDirectory( File file )
{
if ( !file.exists() )
{
return;
}
if ( file.isDirectory() )
{
for ( File child : nonNull( file.listFiles() ) )
{
deleteFileOrDirectory( child );
}
}
else
{
assertTrue( "delete " + file, file.delete() );
}
}
protected void assertPathDef( Path path, String... names )
{
int i = 0;
for ( Node node : path.nodes() )
{
assertEquals( "Wrong node " + i + " in " + getPathDef( path ),
names[i++], node.getProperty( SimpleGraphBuilder.KEY_ID ) );
}
assertEquals( names.length, i );
}
protected void assertPath( Path path, Node... nodes )
{
int i = 0;
for ( Node node : path.nodes() )
{
assertEquals( "Wrong node " + i + " in " + getPathDef( path ),
nodes[i++].getProperty( SimpleGraphBuilder.KEY_ID ), node.getProperty( SimpleGraphBuilder.KEY_ID ) );
}
assertEquals( nodes.length, i );
}
protected <E> void assertContains( Iterable<E> actual, E... expected )
{
Set<E> expectation = new HashSet<E>( Arrays.asList( expected ) );
for ( E element : actual )
{
if ( !expectation.remove( element ) )
{
fail( "unexpected element <" + element + ">" );
}
}
if ( !expectation.isEmpty() )
{
fail( "the expected elements <" + expectation
+ "> were not contained" );
}
}
public String getPathDef( Path path )
{
StringBuilder builder = new StringBuilder();
for ( Node node : path.nodes() )
{
if ( builder.length() > 0 )
{
builder.append( "," );
}
builder.append( node.getProperty( SimpleGraphBuilder.KEY_ID ) );
}
return builder.toString();
}
public void assertPaths( Iterable<? extends Path> paths, String... pathDefinitions )
{
List<String> pathDefs = new ArrayList<String>( Arrays.asList( pathDefinitions ) );
List<String> unexpectedDefs = new ArrayList<String>();
for ( Path path : paths )
{
String pathDef = getPathDef( path );
int index = pathDefs.indexOf( pathDef );
if ( index != -1 )
{
pathDefs.remove( index );
}
else
{
unexpectedDefs.add( getPathDef( path ) );
}
}
assertTrue( "These unexpected paths were found: " + unexpectedDefs + ". In addition these expected paths weren't found:" + pathDefs, unexpectedDefs.isEmpty() );
assertTrue( "These were expected, but not found: " + pathDefs.toString(), pathDefs.isEmpty() );
}
}
| false
|
community_graph-algo_src_test_java_common_Neo4jAlgoTestCase.java
|
3,951
|
private static class RelationshipDescription
{
private final String end;
private final String start;
private final RelationshipType type;
public RelationshipDescription( String rel )
{
String[] parts = rel.split( " " );
if ( parts.length != 3 )
{
throw new IllegalArgumentException( "syntax error: \"" + rel
+ "\"" );
}
start = parts[0];
type = DynamicRelationshipType.withName( parts[1] );
end = parts[2];
}
public Relationship create( GraphDatabaseService graphdb,
Map<String, Node> nodes )
{
Node startNode = getNode( graphdb, nodes, start );
Node endNode = getNode( graphdb, nodes, end );
return startNode.createRelationshipTo( endNode, type );
}
private Node getNode( GraphDatabaseService graphdb,
Map<String, Node> nodes, String name )
{
Node node = nodes.get( name );
if ( node == null )
{
if ( nodes.size() == 0 )
{
node = graphdb.createNode();
}
else
{
node = graphdb.createNode();
}
node.setProperty( "name", name );
nodes.put( name, node );
}
return node;
}
}
| false
|
community_graph-algo_src_test_java_common_GraphDescription.java
|
3,952
|
public class GraphDescription implements GraphDefinition
{
private static class RelationshipDescription
{
private final String end;
private final String start;
private final RelationshipType type;
public RelationshipDescription( String rel )
{
String[] parts = rel.split( " " );
if ( parts.length != 3 )
{
throw new IllegalArgumentException( "syntax error: \"" + rel
+ "\"" );
}
start = parts[0];
type = DynamicRelationshipType.withName( parts[1] );
end = parts[2];
}
public Relationship create( GraphDatabaseService graphdb,
Map<String, Node> nodes )
{
Node startNode = getNode( graphdb, nodes, start );
Node endNode = getNode( graphdb, nodes, end );
return startNode.createRelationshipTo( endNode, type );
}
private Node getNode( GraphDatabaseService graphdb,
Map<String, Node> nodes, String name )
{
Node node = nodes.get( name );
if ( node == null )
{
if ( nodes.size() == 0 )
{
node = graphdb.createNode();
}
else
{
node = graphdb.createNode();
}
node.setProperty( "name", name );
nodes.put( name, node );
}
return node;
}
}
private final RelationshipDescription[] description;
public GraphDescription( String... description )
{
List<RelationshipDescription> lines = new ArrayList<RelationshipDescription>();
for ( String part : description )
{
for ( String line : part.split( "\n" ) )
{
lines.add( new RelationshipDescription( line ) );
}
}
this.description = lines.toArray( new RelationshipDescription[lines.size()] );
}
public Node create( GraphDatabaseService graphdb )
{
Map<String, Node> nodes = new HashMap<String, Node>();
Node node = null;
Transaction tx = graphdb.beginTx();
try
{
for ( RelationshipDescription rel : description )
{
node = rel.create( graphdb, nodes ).getEndNode();
}
tx.success();
}
finally
{
tx.finish();
}
return node;
}
}
| false
|
community_graph-algo_src_test_java_common_GraphDescription.java
|
3,953
|
public class CsvFileReader extends PrefetchingIterator<String[]>
{
private final BufferedReader reader;
private String delimiter;
public CsvFileReader( File file ) throws IOException
{
this( file, null );
}
public CsvFileReader( File file, String delimiter ) throws IOException
{
this.delimiter = delimiter;
this.reader = new BufferedReader( new FileReader( file ) );
}
@Override
protected String[] fetchNextOrNull()
{
try
{
String line = reader.readLine();
if ( line == null )
{
close();
return null;
}
if ( delimiter == null )
{
delimiter = figureOutDelimiter( line );
}
return line.split( delimiter );
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
protected String figureOutDelimiter( String line )
{
String[] candidates = new String[] { "\t", "," };
for ( String candidate : candidates )
{
if ( line.indexOf( candidate ) > -1 )
{
return candidate;
}
}
throw new RuntimeException( "Couldn't guess delimiter in '"
+ line + "'" );
}
public void close() throws IOException
{
reader.close();
}
}
| false
|
community_graph-algo_src_test_java_common_CsvFileReader.java
|
3,954
|
public class TestSiteIndexExamples
{
@ClassRule
public static EmbeddedDatabaseRule graphDb = new EmbeddedDatabaseRule();
// START SNIPPET: findNodesWithRelationshipsTo
public static Iterable<Node> findNodesWithRelationshipsTo(
RelationshipType type, Node... nodes )
{
if ( nodes == null || nodes.length == 0 )
{
throw new IllegalArgumentException( "No nodes supplied" );
}
final PatternNode requested = new PatternNode();
PatternNode anchor = null;
for ( Node node : nodes )
{
PatternNode pattern = new PatternNode();
pattern.setAssociation( node );
pattern.createRelationshipTo( requested, type );
if ( anchor == null )
{
anchor = pattern;
}
}
PatternMatcher matcher = PatternMatcher.getMatcher();
Iterable<PatternMatch> matches = matcher.match( anchor, nodes[0] );
return new IterableWrapper<Node, PatternMatch>( matches )
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( requested );
}
};
}
// END SNIPPET: findNodesWithRelationshipsTo
// START SNIPPET: findFriends
private static final long MILLSECONDS_PER_DAY = 1000 * 60 * 60 * 24;
enum FriendshipTypes implements RelationshipType
{
FRIEND,
LIVES_IN
}
/**
* Find all friends the specified person has known for more than the
* specified number of years.
*
* @param me the node to find the friends of.
* @param livesIn The name of the place where the friends should live.
* @param knownForYears the minimum age (in years) of the friendship.
* @return all nodes that live in the specified place that the specified
* nodes has known for the specified number of years.
*/
public Iterable<Node> findFriendsSinceSpecifiedTimeInSpecifiedPlace(
Node me, String livesIn,
final int knownForYears )
{
PatternNode root = new PatternNode(), place = new PatternNode();
final PatternNode friend = new PatternNode();
// Define the friendship
PatternRelationship friendship = root.createRelationshipTo( friend,
FriendshipTypes.FRIEND, Direction.BOTH );
// Define the age of the friendship
friendship.addPropertyConstraint( "since", new ValueMatcher()
{
long now = new Date().getTime();
@Override
public boolean matches( Object value )
{
if ( value instanceof Long )
{
long ageInDays = ( now - (Long) value )
/ MILLSECONDS_PER_DAY;
return ageInDays > ( knownForYears * 365 );
}
return false;
}
} );
// Define the place where the friend lives
friend.createRelationshipTo( place, FriendshipTypes.LIVES_IN );
place.addPropertyConstraint( "name",
CommonValueMatchers.exact( livesIn ) );
// Perform the matching
PatternMatcher matcher = PatternMatcher.getMatcher();
Iterable<PatternMatch> matches = matcher.match( root, me );
// Return the result
return new IterableWrapper<Node, PatternMatch>( matches )
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( friend );
}
};
}
// END SNIPPET: findFriends
@Test
public void verifyFunctionalityOfFindNodesWithRelationshipsTo()
throws Exception
{
final RelationshipType type = DynamicRelationshipType.withName( "RELATED" );
Node[] nodes = createGraph( new GraphDefinition<Node[]>()
{
@Override
public Node[] create( GraphDatabaseService graphdb )
{
Node[] nodes = new Node[5];
for ( int i = 0; i < nodes.length; i++ )
{
nodes[i] = graphdb.createNode();
}
for ( int i = 0; i < 3; i++ )
{
Node node = graphdb.createNode();
for ( int j = 0; j < nodes.length; j++ )
{
nodes[j].createRelationshipTo( node, type );
}
}
return nodes;
}
} );
try ( Transaction tx = graphDb.getGraphDatabaseService().beginTx() )
{
assertEquals( 3, count( findNodesWithRelationshipsTo( type, nodes ) ) );
tx.success();
}
}
@Test
public void verifyFunctionalityOfFindFriendsSinceSpecifiedTimeInSpecifiedPlace()
throws Exception
{
Node root = createGraph( new GraphDefinition<Node>()
{
@Override
public Node create( GraphDatabaseService graphdb )
{
Node me = graphdb.createNode();
Node stockholm = graphdb.createNode(), gothenburg = graphdb.createNode();
stockholm.setProperty( "name", "Stockholm" );
gothenburg.setProperty( "name", "Gothenburg" );
Node andy = friend( me, graphdb.createNode(), "Andy", 10,
stockholm );
friend( me, graphdb.createNode(), "Bob", 5, stockholm );
Node cecilia = friend( me, graphdb.createNode(), "Cecilia", 2,
stockholm );
andy.createRelationshipTo( cecilia, FriendshipTypes.FRIEND ).setProperty(
"since", yearsAgo( 10 ) );
friend( me, graphdb.createNode(), "David", 10, gothenburg );
return me;
}
Node friend( Node me, Node friend, String name, int knownForYears,
Node place )
{
friend.setProperty( "name", name );
me.createRelationshipTo( friend, FriendshipTypes.FRIEND ).setProperty(
"since", yearsAgo( knownForYears ) );
friend.createRelationshipTo( place, FriendshipTypes.LIVES_IN );
return friend;
}
Calendar calendar = Calendar.getInstance();
long yearsAgo( int years )
{
return new GregorianCalendar( calendar.get( Calendar.YEAR )
- years,
calendar.get( Calendar.MONTH ),
calendar.get( Calendar.DATE ) ).getTime().getTime();
}
} );
Set<String> expected = new HashSet<>( Arrays.asList( "Andy", "Bob" ) );
Iterable<Node> friends = findFriendsSinceSpecifiedTimeInSpecifiedPlace( root, "Stockholm", 3 );
try ( Transaction transaction = graphDb.getGraphDatabaseService().beginTx() )
{
for ( Node friend : friends )
{
String name = (String) friend.getProperty( "name", null );
assertNotNull( name );
assertTrue( "Unexpected friend: " + name, expected.remove( name ) );
}
assertTrue( "These friends were not found: " + expected, expected.isEmpty() );
}
}
private int count( Iterable<?> objects )
{
int count = 0;
for ( @SuppressWarnings( "unused" ) Object object : objects )
{
count++;
}
return count;
}
private interface GraphDefinition<RESULT>
{
RESULT create( GraphDatabaseService graphdb );
}
private <T> T createGraph( GraphDefinition<T> definition )
{
try ( Transaction tx = graphDb.getGraphDatabaseService().beginTx() )
{
T result = definition.create( graphDb.getGraphDatabaseService() );
tx.success();
return result;
}
}
}
| false
|
community_graph-matching_src_test_java_examples_TestSiteIndexExamples.java
|
3,955
|
{
long now = new Date().getTime();
@Override
public boolean matches( Object value )
{
if ( value instanceof Long )
{
long ageInDays = ( now - (Long) value )
/ MILLSECONDS_PER_DAY;
return ageInDays > ( knownForYears * 365 );
}
return false;
}
} );
| false
|
community_graph-matching_src_test_java_examples_TestSiteIndexExamples.java
|
3,956
|
public class RebuildSegmentInfo
{
public static void main( String[] args ) throws Exception
{
if(args.length != 1)
{
System.out.println("Usage: RebuildSegmentInfo <INDEX DIRECTORY>");
System.exit( 0 );
}
String path = args[0];
File file = new File( path );
Directory directory = FSDirectory.open( file );
SegmentInfos infos = new SegmentInfos();
int counter = 0;
for ( String fileName : directory.listAll() )
{
if ( directory.fileLength( fileName ) == 0 || !fileName.endsWith( "cfs" ) )
{
System.out.println( "Skipping " + fileName + ", size " + directory.fileLength( fileName ) );
continue;
}
else
{
System.out.println( "Doing " + fileName + ", size " + directory.fileLength( fileName ) );
}
String segmentName = fileName.substring( 1, fileName.lastIndexOf( '.' ) );
int segmentInt = Integer.parseInt( segmentName, Character.MAX_RADIX );
counter = Math.max( counter, segmentInt );
segmentName = fileName.substring( 0, fileName.lastIndexOf( '.' ) );
Directory fileReader = new CompoundFileReader( directory, fileName );
IndexInput indexStream = fileReader.openInput( segmentName + ".cfs" );
SegmentInfo segmentInfo = new SegmentInfo( directory, SegmentInfos.CURRENT_FORMAT, indexStream );
System.out.println( "Name was: \"" + segmentInfo.name + "\"" );
System.out.println( "Doc count was: " + segmentInfo.docCount );
infos.add( segmentInfo );
// indexStream.close();
// fileReader.close();
}
infos.counter = ++counter;
infos.commit( directory );
}
}
| false
|
community_lucene-index_src_main_java_org_apache_lucene_index_RebuildSegmentInfo.java
|
3,957
|
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( friend );
}
};
| false
|
community_graph-matching_src_test_java_examples_TestSiteIndexExamples.java
|
3,958
|
public class RebuildCompoundFile
{
public static void main( String[] args ) throws Exception
{
if(args.length != 1)
{
System.out.println("Usage: RebuildCompoundFile <INDEX DIRECTORY>");
System.exit( 0 );
}
String path = args[0];
File baseDirectory = new File( path );
Directory directory = FSDirectory.open( baseDirectory );
CompoundFileWriter writer = new CompoundFileWriter( directory, "_drr.cfs" );
for ( String pathname : directory.listAll() )
{
if ( pathname.endsWith( "gen" ) || pathname.endsWith( "cfs" ) || pathname.endsWith( "12" ) )
{
continue;
}
writer.addFile( pathname );
}
writer.close();
}
}
| false
|
community_lucene-index_src_main_java_org_apache_lucene_index_RebuildCompoundFile.java
|
3,959
|
public class IndexWriterAccessor
{
public static boolean isClosed(IndexWriter writer)
{
return writer.isClosed();
}
}
| false
|
community_lucene-index_src_test_java_org_apache_lucene_index_IndexWriterAccessor.java
|
3,960
|
public class TestPatternMatching implements GraphHolder
{
@Override
public GraphDatabaseService graphdb()
{
return graphDb;
}
public @Rule
TestData<Map<String, Node>> data = TestData.producedThrough( GraphDescription.createGraphFor( this, true ) );
private static GraphDatabaseService graphDb;
private Transaction tx;
private static enum MyRelTypes implements RelationshipType
{
R1,
R2,
R3,
hasRoleInGroup,
hasGroup,
hasRole
}
private Node createInstance( String name )
{
Node node = graphDb.createNode();
node.setProperty( "name", name );
return node;
}
@BeforeClass
public static void setUpDb()
{
graphDb = new GraphDatabaseFactory().newEmbeddedDatabase( TargetDirectory.forTest( TestPatternMatching.class ).makeGraphDbDir().getAbsolutePath() );
}
@Before
public void setUpTx()
{
tx = graphDb.beginTx();
}
@After
public void tearDownTx()
{
tx.finish();
}
@AfterClass
public static void tearDownDb()
{
graphDb.shutdown();
}
private Iterable<PatternMatch> doMatch( PatternNode pNode )
{
return PatternMatcher.getMatcher().match( pNode, new HashMap<String, PatternNode>() );
}
private Iterable<PatternMatch> doMatch( PatternNode pNode, Node node )
{
return PatternMatcher.getMatcher().match( pNode, node, new HashMap<String, PatternNode>() );
}
private Iterable<PatternMatch> doMatch( PatternNode pNode, Node node, PatternNode... optionalNodes )
{
return PatternMatcher.getMatcher().match( pNode, node, new HashMap<String, PatternNode>(), optionalNodes );
}
@Test
public void testAllRelTypes()
{
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
Node a1 = createInstance( "a1" );
Node b1 = createInstance( "b1" );
Set<Relationship> relSet = new HashSet<Relationship>();
relSet.add( a1.createRelationshipTo( b1, R1 ) );
relSet.add( a1.createRelationshipTo( b1, R2 ) );
PatternNode pA = new PatternNode();
PatternNode pB = new PatternNode();
PatternRelationship pRel = pA.createRelationshipTo( pB );
int count = 0;
for ( PatternMatch match : doMatch( pA, a1 ) )
{
assertEquals( match.getNodeFor( pA ), a1 );
assertEquals( match.getNodeFor( pB ), b1 );
assertTrue( relSet.remove( match.getRelationshipFor( pRel ) ) );
count++;
}
assertEquals( 0, relSet.size() );
assertEquals( 2, count );
}
@Test
public void testAllRelTypesWithRelProperty()
{
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
Node a1 = createInstance( "a1" );
Node b1 = createInstance( "b1" );
Relationship rel = a1.createRelationshipTo( b1, R1 );
rel = a1.createRelationshipTo( b1, R2 );
rel.setProperty( "musthave", true );
PatternNode pA = new PatternNode();
PatternNode pB = new PatternNode();
PatternRelationship pRel = pA.createRelationshipTo( pB );
pRel.addPropertyConstraint( "musthave", CommonValueMatchers.has() );
int count = 0;
for ( PatternMatch match : doMatch( pA, a1 ) )
{
assertEquals( match.getNodeFor( pA ), a1 );
assertEquals( match.getNodeFor( pB ), b1 );
count++;
}
assertEquals( 1, count );
}
@Test
public void testTeethStructure()
{
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
Node aT = createInstance( "aType" );
Node a1 = createInstance( "a1" );
Node bT = createInstance( "bType" );
Node b1 = createInstance( "b1" );
Node cT = createInstance( "cType" );
Node c1 = createInstance( "c1" );
Node c2 = createInstance( "c2" );
Node dT = createInstance( "dType" );
Node d1 = createInstance( "d1" );
Node d2 = createInstance( "d2" );
Node eT = createInstance( "eType" );
Node e1 = createInstance( "e1" );
aT.createRelationshipTo( a1, R1 );
bT.createRelationshipTo( b1, R1 );
cT.createRelationshipTo( c1, R1 );
cT.createRelationshipTo( c2, R1 );
dT.createRelationshipTo( d1, R1 );
dT.createRelationshipTo( d2, R1 );
eT.createRelationshipTo( e1, R1 );
a1.createRelationshipTo( b1, R2 );
b1.createRelationshipTo( c1, R2 );
b1.createRelationshipTo( c2, R2 );
c1.createRelationshipTo( d1, R2 );
c2.createRelationshipTo( d2, R2 );
d1.createRelationshipTo( e1, R2 );
d2.createRelationshipTo( e1, R2 );
PatternNode pA = new PatternNode();
PatternNode pAI = new PatternNode();
pA.createRelationshipTo( pAI, R1 );
PatternNode pB = new PatternNode();
PatternNode pBI = new PatternNode();
pB.createRelationshipTo( pBI, R1 );
PatternNode pC = new PatternNode();
PatternNode pCI = new PatternNode();
pC.createRelationshipTo( pCI, R1 );
PatternNode pD = new PatternNode();
PatternNode pDI = new PatternNode();
pD.createRelationshipTo( pDI, R1 );
PatternNode pE = new PatternNode();
PatternNode pEI = new PatternNode();
pE.createRelationshipTo( pEI, R1 );
pAI.createRelationshipTo( pBI, R2 );
pBI.createRelationshipTo( pCI, R2 );
pCI.createRelationshipTo( pDI, R2 );
pDI.createRelationshipTo( pEI, R2 );
int count = 0;
for ( PatternMatch match : doMatch( pA, aT ) )
{
assertEquals( match.getNodeFor( pA ), aT );
assertEquals( match.getNodeFor( pAI ), a1 );
assertEquals( match.getNodeFor( pB ), bT );
assertEquals( match.getNodeFor( pBI ), b1 );
assertEquals( match.getNodeFor( pC ), cT );
Node c = match.getNodeFor( pCI );
if ( !c.equals( c1 ) && !c.equals( c2 ) )
{
fail( "either c1 or c2" );
}
assertEquals( match.getNodeFor( pD ), dT );
Node d = match.getNodeFor( pDI );
if ( !d.equals( d1 ) && !d.equals( d2 ) )
{
fail( "either d1 or d2" );
}
assertEquals( match.getNodeFor( pE ), eT );
assertEquals( match.getNodeFor( pEI ), e1 );
count++;
}
assertEquals( 2, count );
count = 0;
for ( PatternMatch match : doMatch( pCI, c2 ) )
{
assertEquals( match.getNodeFor( pA ), aT );
assertEquals( match.getNodeFor( pAI ), a1 );
assertEquals( match.getNodeFor( pB ), bT );
assertEquals( match.getNodeFor( pBI ), b1 );
assertEquals( match.getNodeFor( pC ), cT );
assertEquals( match.getNodeFor( pCI ), c2 );
assertEquals( match.getNodeFor( pD ), dT );
assertEquals( match.getNodeFor( pDI ), d2 );
assertEquals( match.getNodeFor( pE ), eT );
assertEquals( match.getNodeFor( pEI ), e1 );
count++;
}
assertEquals( 1, count );
}
@Test
public void testNonCyclicABC()
{
Node a = createInstance( "A" );
Node b1 = createInstance( "B1" );
Node b2 = createInstance( "B2" );
Node b3 = createInstance( "B3" );
Node c = createInstance( "C" );
final RelationshipType R = MyRelTypes.R1;
Relationship rAB1 = a.createRelationshipTo( b1, R );
Relationship rAB2 = a.createRelationshipTo( b2, R );
Relationship rAB3 = a.createRelationshipTo( b3, R );
Relationship rB1C = b1.createRelationshipTo( c, R );
Relationship rB2C = b2.createRelationshipTo( c, R );
Relationship rB3C = b3.createRelationshipTo( c, R );
PatternNode pA = new PatternNode();
PatternNode pB = new PatternNode();
PatternNode pC = new PatternNode();
PatternRelationship pAB = pA.createRelationshipTo( pB, R );
PatternRelationship pBC = pB.createRelationshipTo( pC, R );
int count = 0;
for ( PatternMatch match : doMatch( pA, a ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node b = match.getNodeFor( pB );
if ( !b.equals( b1 ) && !b.equals( b2 ) && !b.equals( b3 ) )
{
fail( "either b1 or b2 or b3" );
}
Relationship rB = match.getRelationshipFor( pAB );
if ( !rAB1.equals( rB ) && !rAB2.equals( rB ) && !rAB3.equals( rB ) )
{
fail( "either rAB1, rAB2 or rAB3" );
}
assertEquals( match.getNodeFor( pC ), c );
Relationship rC = match.getRelationshipFor( pBC );
if ( !rB1C.equals( rC ) && !rB2C.equals( rC ) && !rB3C.equals( rC ) )
{
fail( "either rB1C, rB2C or rB3C" );
}
count++;
}
assertEquals( 3, count );
count = 0;
for ( PatternMatch match : doMatch( pB, b2 ) )
{
assertEquals( match.getNodeFor( pA ), a );
assertEquals( match.getNodeFor( pB ), b2 );
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 1, count );
}
@Test
public void testCyclicABC()
{
Node a = createInstance( "A" );
Node b1 = createInstance( "B1" );
Node b2 = createInstance( "B2" );
Node b3 = createInstance( "B3" );
Node c = createInstance( "C" );
final RelationshipType R = MyRelTypes.R1;
a.createRelationshipTo( b1, R );
a.createRelationshipTo( b2, R );
a.createRelationshipTo( b3, R );
b1.createRelationshipTo( c, R );
b2.createRelationshipTo( c, R );
b3.createRelationshipTo( c, R );
c.createRelationshipTo( a, R );
PatternNode pA = new PatternNode();
PatternNode pB = new PatternNode();
PatternNode pC = new PatternNode();
pA.createRelationshipTo( pB, R );
pB.createRelationshipTo( pC, R );
pC.createRelationshipTo( pA, R );
int count = 0;
for ( PatternMatch match : doMatch( pA, a ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node b = match.getNodeFor( pB );
if ( !b.equals( b1 ) && !b.equals( b2 ) && !b.equals( b3 ) )
{
fail( "either b1 or b2 or b3" );
}
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 3, count );
count = 0;
for ( PatternMatch match : doMatch( pB, b2 ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node b = match.getNodeFor( pB );
if ( !b.equals( b1 ) && !b.equals( b2 ) && !b.equals( b3 ) )
{
fail( "either b1 or b2 or b3" );
}
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 1, count );
}
@Test
public void testPropertyABC()
{
Node a = createInstance( "A" );
a.setProperty( "hasProperty", true );
Node b1 = createInstance( "B1" );
b1.setProperty( "equals", 1 );
b1.setProperty( "name", "Thomas Anderson" );
Node b2 = createInstance( "B2" );
b2.setProperty( "equals", 1 );
b2.setProperty( "name", "Thomas Anderson" );
Node b3 = createInstance( "B3" );
b3.setProperty( "equals", 2 );
Node c = createInstance( "C" );
final RelationshipType R = MyRelTypes.R1;
a.createRelationshipTo( b1, R );
a.createRelationshipTo( b2, R );
a.createRelationshipTo( b3, R );
b1.createRelationshipTo( c, R );
b2.createRelationshipTo( c, R );
b3.createRelationshipTo( c, R );
PatternNode pA = new PatternNode();
pA.addPropertyConstraint( "hasProperty", CommonValueMatchers.has() );
PatternNode pB = new PatternNode();
pB.addPropertyConstraint( "equals", CommonValueMatchers.exact( 1 ) );
pB.addPropertyConstraint( "name", CommonValueMatchers.regex( Pattern.compile( "^Thomas.*" ) ) );
PatternNode pC = new PatternNode();
pA.createRelationshipTo( pB, R );
pB.createRelationshipTo( pC, R );
int count = 0;
for ( PatternMatch match : doMatch( pA, a ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node b = match.getNodeFor( pB );
if ( !b.equals( b1 ) && !b.equals( b2 ) )
{
fail( "either b1 or b2" );
}
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 2, count );
count = 0;
for ( PatternMatch match : doMatch( pB, b2 ) )
{
assertEquals( match.getNodeFor( pA ), a );
assertEquals( match.getNodeFor( pB ), b2 );
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 1, count );
}
@Test
public void testOptional()
{
Node a = createInstance( "A" );
Node b1 = createInstance( "B1" );
Node b2 = createInstance( "B2" );
Node c = createInstance( "C" );
Node d1 = createInstance( "D1" );
Node d2 = createInstance( "D2" );
Node e1 = createInstance( "E1" );
Node e2 = createInstance( "E2" );
Node f1 = createInstance( "F1" );
Node f2 = createInstance( "F2" );
Node f3 = createInstance( "F3" );
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
final RelationshipType R3 = MyRelTypes.R3;
a.createRelationshipTo( b1, R1 );
a.createRelationshipTo( b2, R1 );
a.createRelationshipTo( c, R2 );
a.createRelationshipTo( f1, R3 );
a.createRelationshipTo( f2, R3 );
a.createRelationshipTo( f3, R3 );
c.createRelationshipTo( d1, R1 );
c.createRelationshipTo( d2, R1 );
d1.createRelationshipTo( e1, R2 );
d1.createRelationshipTo( e2, R2 );
// Required part of the graph
PatternNode pA = new PatternNode( "pA" );
PatternNode pC = new PatternNode( "pC" );
pA.createRelationshipTo( pC, R2 );
// First optional branch
PatternNode oA1 = new PatternNode( "pA" );
PatternNode oB1 = new PatternNode( "pB" );
oA1.createOptionalRelationshipTo( oB1, R1 );
// // Second optional branch
PatternNode oA2 = new PatternNode( "pA" );
PatternNode oF2 = new PatternNode( "pF" );
oA2.createOptionalRelationshipTo( oF2, R3 );
// Third optional branch
PatternNode oC3 = new PatternNode( "pC" );
PatternNode oD3 = new PatternNode( "pD" );
PatternNode oE3 = new PatternNode( "pE" );
oC3.createOptionalRelationshipTo( oD3, R1 );
oD3.createOptionalRelationshipTo( oE3, R2 );
// Test that all permutations are there and that multiple optional
// branches work.
int count = 0;
for ( PatternMatch match : doMatch( pA, a, oA1, oA2, oC3 ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node bMatch = match.getNodeFor( oB1 );
if ( !bMatch.equals( b1 ) && !bMatch.equals( b2 ) )
{
fail( "either b1 or b2" );
}
Node fMatch = match.getNodeFor( oF2 );
if ( !fMatch.equals( f1 ) && !fMatch.equals( f2 ) && !fMatch.equals( f3 ) )
{
fail( "either f1, f2 or f3" );
}
assertEquals( match.getNodeFor( pC ), c );
assertEquals( match.getNodeFor( oD3 ), d1 );
Node eMatch = match.getNodeFor( oE3 );
assertTrue( eMatch.equals( e1 ) || eMatch.equals( e2 ) );
count++;
}
assertEquals( count, 12 );
// Test that unmatched optional branches are ignored.
PatternNode pI = new PatternNode( "pI" );
PatternNode pJ = new PatternNode( "pJ" );
PatternNode pK = new PatternNode( "pK" );
PatternNode pL = new PatternNode( "pL" );
pI.createOptionalRelationshipTo( pJ, R1 );
pI.createRelationshipTo( pK, R2 );
pK.createOptionalRelationshipTo( pL, R2 );
count = 0;
for ( PatternMatch match : doMatch( pI, a, pI, pK ) )
{
assertEquals( match.getNodeFor( pI ), a );
Node jMatch = match.getNodeFor( pJ );
if ( !jMatch.equals( b1 ) && !jMatch.equals( b2 ) )
{
fail( "either b1 or b2" );
}
assertEquals( match.getNodeFor( pK ), c );
assertEquals( match.getNodeFor( pL ), null );
count++;
}
assertEquals( count, 2 );
}
@Test
public void testOptional2()
{
Node a = createInstance( "A" );
Node b1 = createInstance( "B1" );
Node b2 = createInstance( "B2" );
Node b3 = createInstance( "B3" );
Node c1 = createInstance( "C1" );
Node c3 = createInstance( "C3" );
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
a.createRelationshipTo( b1, R1 );
a.createRelationshipTo( b2, R1 );
a.createRelationshipTo( b3, R1 );
b1.createRelationshipTo( c1, R2 );
b3.createRelationshipTo( c3, R2 );
// Required part of the graph
PatternNode pA = new PatternNode( "pA" );
PatternNode pB = new PatternNode( "pB" );
pA.createRelationshipTo( pB, R1 );
// Optional part of the graph
PatternNode oB = new PatternNode( "pB" );
PatternNode oC = new PatternNode( "oC" );
oB.createOptionalRelationshipTo( oC, R2 );
int count = 0;
for ( PatternMatch match : doMatch( pA, a, oB ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node bMatch = match.getNodeFor( pB );
Node optionalBMatch = match.getNodeFor( oB );
Node optionalCMatch = match.getNodeFor( oC );
if ( !bMatch.equals( b1 ) && !bMatch.equals( b2 ) && !bMatch.equals( b3 ) )
{
fail( "either b1, b2 or b3" );
}
if ( optionalBMatch != null )
{
assertEquals( bMatch, optionalBMatch );
if ( optionalBMatch.equals( b1 ) )
{
assertEquals( optionalCMatch, c1 );
}
else if ( optionalBMatch.equals( b3 ) )
{
assertEquals( optionalCMatch, c3 );
}
else
{
assertEquals( optionalCMatch, null );
}
}
count++;
}
assertEquals( count, 3 );
}
@Test
public void testArrayPropertyValues()
{
Node a = createInstance( "A" );
a.setProperty( "hasProperty", true );
Node b1 = createInstance( "B1" );
b1.setProperty( "equals", new Integer[] { 19, 1 } );
Node b2 = createInstance( "B2" );
b2.setProperty( "equals", new Integer[] { 1, 10, 12 } );
Node b3 = createInstance( "B3" );
b3.setProperty( "equals", 2 );
Node c = createInstance( "C" );
final RelationshipType R = MyRelTypes.R1;
a.createRelationshipTo( b1, R );
a.createRelationshipTo( b2, R );
a.createRelationshipTo( b3, R );
b1.createRelationshipTo( c, R );
b2.createRelationshipTo( c, R );
b3.createRelationshipTo( c, R );
PatternNode pA = new PatternNode();
pA.addPropertyConstraint( "hasProperty", CommonValueMatchers.has() );
PatternNode pB = new PatternNode();
pB.addPropertyConstraint( "equals", CommonValueMatchers.exactAny( 1 ) );
PatternNode pC = new PatternNode();
pA.createRelationshipTo( pB, R );
pB.createRelationshipTo( pC, R );
int count = 0;
for ( PatternMatch match : doMatch( pA, a ) )
{
assertEquals( match.getNodeFor( pA ), a );
Node b = match.getNodeFor( pB );
if ( !b.equals( b1 ) && !b.equals( b2 ) )
{
fail( "either b1 or b2" );
}
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 2, count );
count = 0;
for ( PatternMatch match : doMatch( pB, b2 ) )
{
assertEquals( match.getNodeFor( pA ), a );
assertEquals( match.getNodeFor( pB ), b2 );
assertEquals( match.getNodeFor( pC ), c );
count++;
}
assertEquals( 1, count );
}
@Test
public void testDiamond()
{
// C
// / \
// B---D
// \ /
// A
Node a = createInstance( "A" );
Node b = createInstance( "B" );
Node c = createInstance( "C" );
Node d = createInstance( "D" );
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
a.createRelationshipTo( b, R1 );
a.createRelationshipTo( d, R1 );
b.createRelationshipTo( d, R2 );
c.createRelationshipTo( b, R1 );
c.createRelationshipTo( d, R1 );
PatternNode pA = new PatternNode();
PatternNode pB = new PatternNode();
PatternNode pC = new PatternNode();
PatternNode pD = new PatternNode();
pA.createRelationshipTo( pB, R1, Direction.BOTH );
pB.createRelationshipTo( pC, R2, Direction.BOTH );
pC.createRelationshipTo( pD, R1, Direction.BOTH );
int count = 0;
for ( PatternMatch match : doMatch( pA, a ) )
{
count++;
}
assertEquals( 4, count );
}
@Test
@Graph( {
"User1 hasRoleInGroup U1G1R12",
"U1G1R12 hasGroup Group1",
"U1G1R12 hasRole Role1",
"U1G1R12 hasRole Role2",
"User1 hasRoleInGroup U1G2R23",
"U1G2R23 hasGroup Group2",
"U1G2R23 hasRole Role2",
"U1G2R23 hasRole Role3",
"User1 hasRoleInGroup U1G3R34",
"U1G3R34 hasGroup Group3",
"U1G3R34 hasRole Role3",
"U1G3R34 hasRole Role4",
"User2 hasRoleInGroup U2G1R25",
"U2G1R25 hasGroup Group1",
"U2G1R25 hasRole Role2",
"U2G1R25 hasRole Role5",
"User2 hasRoleInGroup U2G2R34",
"U2G2R34 hasGroup Group2",
"U2G2R34 hasRole Role3",
"U2G2R34 hasRole Role4",
"User2 hasRoleInGroup U2G3R56",
"U2G3R56 hasGroup Group3",
"U2G3R56 hasRole Role5",
"U2G3R56 hasRole Role6"
} )
public void testHyperedges()
{
Map<String, Node> nodeMap = data.get();
Node user1 = nodeMap.get( "User1" );
PatternNode u1 = new PatternNode( "U1" );
PatternNode u2 = new PatternNode( "U2" );
PatternNode hyperEdge1 = new PatternNode( "UGR1" );
PatternNode hyperEdge2 = new PatternNode( "UGR2" );
PatternNode group = new PatternNode( "G" );
PatternNode role = new PatternNode( "R" );
u1.createRelationshipTo( hyperEdge1, MyRelTypes.hasRoleInGroup, Direction.OUTGOING );
u2.createRelationshipTo( hyperEdge2, MyRelTypes.hasRoleInGroup, Direction.OUTGOING );
hyperEdge1.createRelationshipTo( group, MyRelTypes.hasGroup, Direction.OUTGOING );
hyperEdge1.createRelationshipTo( role, MyRelTypes.hasRole, Direction.OUTGOING );
hyperEdge2.createRelationshipTo( group, MyRelTypes.hasGroup, Direction.OUTGOING );
hyperEdge2.createRelationshipTo( role, MyRelTypes.hasRole, Direction.OUTGOING );
u1.setAssociation( nodeMap.get( "User1" ) );
u2.setAssociation( nodeMap.get( "User2" ) );
List<Node> expected = new ArrayList<Node>( asList( nodeMap.get( "Group1" ), nodeMap.get( "Group2" ) ) );
for ( PatternMatch match : doMatch( u1, nodeMap.get( "User1" ) ) )
{
Node matchedNode = match.getNodeFor( group );
boolean remove = expected.remove( matchedNode );
assertTrue( "Unexpected node matched: " + matchedNode.getProperty( "name" ), remove );
}
assertTrue( "Not all nodes were found", expected.isEmpty() );
}
private void execAndWait( String... args ) throws Exception
{
Process process = Runtime.getRuntime().exec( args );
new ProcessStreamHandler( process, true ).waitForResult();
}
@Test
public void testDiamondWithAssociation()
{
// C
// / \
// B---D
// \ /
// A
Node a = createInstance( "A" );
Node b = createInstance( "B" );
Node c = createInstance( "C" );
Node d = createInstance( "D" );
final RelationshipType R1 = MyRelTypes.R1;
final RelationshipType R2 = MyRelTypes.R2;
a.createRelationshipTo( b, R1 );
Relationship relAD = a.createRelationshipTo( d, R1 );
b.createRelationshipTo( d, R2 );
c.createRelationshipTo( b, R1 );
c.createRelationshipTo( d, R1 );
PatternNode pA = new PatternNode();
PatternNode pB = new PatternNode();
PatternNode pC = new PatternNode();
PatternNode pD = new PatternNode();
pA.createRelationshipTo( pB, R1, Direction.BOTH );
pB.createRelationshipTo( pC, R2, Direction.BOTH );
PatternRelationship lastRel = pC.createRelationshipTo( pD, R1, Direction.BOTH );
pA.setAssociation( a );
pB.setAssociation( b );
pC.setAssociation( d );
pD.setAssociation( a );
int count = 0;
for ( PatternMatch match : doMatch( pA ) )
{
count++;
}
assertEquals( 1, count );
pD.setAssociation( null );
count = 0;
for ( PatternMatch match : doMatch( pA ) )
{
count++;
}
assertEquals( 2, count );
lastRel.setAssociation( relAD );
count = 0;
for ( PatternMatch match : doMatch( pA ) )
{
count++;
}
assertEquals( 1, count );
}
}
| false
|
community_graph-matching_src_test_java_matching_TestPatternMatching.java
|
3,961
|
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( message );
}
};
| false
|
community_graph-matching_src_test_java_matching_TestMatchingOfCircularPattern.java
|
3,962
|
private static class VisibleMessagesByFollowedUsers implements
Iterable<Node>
{
private final PatternNode start = new PatternNode();
private final PatternNode message = new PatternNode();
private final Node startNode;
public VisibleMessagesByFollowedUsers( Node startNode )
{
this.startNode = startNode;
if ( !STATIC_PATTERN ) start.setAssociation( startNode );
PatternNode user = new PatternNode();
start.createRelationshipTo( user, withName( "FOLLOWS" ) );
user.createRelationshipTo( message, withName( "CREATED" ) );
message.createRelationshipTo( start, withName( "IS_VISIBLE_BY" ) );
}
@Override
public Iterator<Node> iterator()
{
Iterable<PatternMatch> matches = PatternMatcher.getMatcher().match(
start, startNode );
return new IteratorWrapper<Node, PatternMatch>( matches.iterator() )
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( message );
}
};
}
}
| false
|
community_graph-matching_src_test_java_matching_TestMatchingOfCircularPattern.java
|
3,963
|
{
@Override
public boolean isStopNode( TraversalPosition currentPos )
{
return currentPos.depth() >= depth;
}
};
| false
|
community_graph-matching_src_test_java_matching_TestMatchingOfCircularPattern.java
|
3,964
|
{
@Override
public boolean isReturnableNode( TraversalPosition pos )
{
Node node = pos.currentNode();
return isMessage( node )
&& isVisibleTo( node, startNode );
}
}, withName( "FOLLOWS" ), Direction.OUTGOING,
| false
|
community_graph-matching_src_test_java_matching_TestMatchingOfCircularPattern.java
|
3,965
|
public class TestMatchingOfCircularPattern
{
static private final boolean STATIC_PATTERN = false;
private static class VisibleMessagesByFollowedUsers implements
Iterable<Node>
{
private final PatternNode start = new PatternNode();
private final PatternNode message = new PatternNode();
private final Node startNode;
public VisibleMessagesByFollowedUsers( Node startNode )
{
this.startNode = startNode;
if ( !STATIC_PATTERN ) start.setAssociation( startNode );
PatternNode user = new PatternNode();
start.createRelationshipTo( user, withName( "FOLLOWS" ) );
user.createRelationshipTo( message, withName( "CREATED" ) );
message.createRelationshipTo( start, withName( "IS_VISIBLE_BY" ) );
}
@Override
public Iterator<Node> iterator()
{
Iterable<PatternMatch> matches = PatternMatcher.getMatcher().match(
start, startNode );
return new IteratorWrapper<Node, PatternMatch>( matches.iterator() )
{
@Override
protected Node underlyingObjectToObject( PatternMatch match )
{
return match.getNodeFor( message );
}
};
}
}
private static final int EXPECTED_VISIBLE_MESSAGE_COUNT = 3;
private static Node user;
public static void setupGraph()
{
user = graphdb.createNode();
Node user1 = graphdb.createNode(), user2 = graphdb.createNode(), user3 = graphdb.createNode();
user.createRelationshipTo( user1, withName( "FOLLOWS" ) );
user1.createRelationshipTo( user3, withName( "FOLLOWS" ) );
user.createRelationshipTo( user2, withName( "FOLLOWS" ) );
createMessage( user, "invisible", user1, user2 );
createMessage( user1, "visible", user, user2, user3 );
createMessage( user1, "visible", user );
createMessage( user2, "visible", user, user1 );
createMessage( user2, "invisible", user1, user3 );
createMessage( user3, "invisible", user1, user2 );
createMessage( user3, "invisible", user );
}
private static void createMessage( Node creator, String text,
Node... visibleBy )
{
Node message = graphdb.createNode();
message.setProperty( "text", text );
creator.createRelationshipTo( message, withName( "CREATED" ) );
for ( Node user : visibleBy )
{
message.createRelationshipTo( user, withName( "IS_VISIBLE_BY" ) );
}
}
@Test
public void straightPathsWork()
{
Node start = graphdb.createNode();
Node u1 = graphdb.createNode(), u2 = graphdb.createNode(), u3 = graphdb.createNode();
start.createRelationshipTo( u1, withName( "FOLLOWS" ) );
start.createRelationshipTo( u2, withName( "FOLLOWS" ) );
start.createRelationshipTo( u3, withName( "FOLLOWS" ) );
createMessage( u1, "visible", start );
createMessage( u2, "visible", start );
createMessage( u3, "visible", start );
for ( Node message : new VisibleMessagesByFollowedUsers( start ) )
{
verifyMessage( message );
}
tx.success();
}
@Test
public void messageNodesAreOnlyReturnedOnce()
{
Map<Node, Integer> counts = new HashMap<Node, Integer>();
for ( Node message : new VisibleMessagesByFollowedUsers( user ) )
{
Integer seen = counts.get( message );
counts.put( message, seen == null ? 1 : ( seen + 1 ) );
count++;
}
StringBuilder duplicates = null;
for ( Map.Entry<Node, Integer> seen : counts.entrySet() )
{
if ( seen.getValue() > 1 )
{
if ( duplicates == null )
{
duplicates = new StringBuilder(
"These nodes occured multiple times (expected once): " );
}
else
{
duplicates.append( ", " );
}
duplicates.append( seen.getKey() );
duplicates.append( " (" );
duplicates.append( seen.getValue() );
duplicates.append( " times)" );
}
}
if ( duplicates != null )
{
fail( duplicates.toString() );
}
tx.success();
}
@Test
public void canFindMessageNodesThroughGraphMatching()
{
for ( Node message : new VisibleMessagesByFollowedUsers( user ) )
{
verifyMessage( message );
}
tx.success();
}
@Test
public void canFindMessageNodesThroughTraversing()
{
for ( Node message : traverse( user ) )
{
verifyMessage( message );
}
tx.success();
}
private void verifyMessage( Node message )
{
assertNotNull( message );
assertEquals( "visible", message.getProperty( "text", null ) );
count++;
}
private int count;
private Transaction tx;
@Before
public void resetCount()
{
count = 0;
tx = graphdb.beginTx();
}
@After
public void verifyCount()
{
tx.finish();
tx = null;
assertEquals( EXPECTED_VISIBLE_MESSAGE_COUNT, count );
}
private static Iterable<Node> traverse( final Node startNode )
{
return startNode.traverse( Order.BREADTH_FIRST, stopAtDepth( 2 ),
new ReturnableEvaluator()
{
@Override
public boolean isReturnableNode( TraversalPosition pos )
{
Node node = pos.currentNode();
return isMessage( node )
&& isVisibleTo( node, startNode );
}
}, withName( "FOLLOWS" ), Direction.OUTGOING,
withName( "CREATED" ), Direction.OUTGOING );
}
public static StopEvaluator stopAtDepth( final int depth )
{
return new StopEvaluator()
{
@Override
public boolean isStopNode( TraversalPosition currentPos )
{
return currentPos.depth() >= depth;
}
};
}
static boolean isMessage( Node node )
{
return node.hasProperty( "text" );
}
static boolean isVisibleTo( Node message, Node user )
{
for ( Relationship visibility : message.getRelationships(
withName( "IS_VISIBLE_BY" ), Direction.OUTGOING ) )
{
if ( visibility.getEndNode().equals( user ) )
{
return true;
}
}
return false;
}
private static GraphDatabaseService graphdb;
@BeforeClass
public static void setUpDb()
{
graphdb = new GraphDatabaseFactory().newEmbeddedDatabase( TargetDirectory.forTest( TestMatchingOfCircularPattern.class ).makeGraphDbDir().getAbsolutePath() );
Transaction tx = graphdb.beginTx();
try
{
setupGraph();
tx.success();
}
finally
{
tx.finish();
}
}
@AfterClass
public static void stopGraphdb()
{
graphdb.shutdown();
graphdb = null;
}
}
| false
|
community_graph-matching_src_test_java_matching_TestMatchingOfCircularPattern.java
|
3,966
|
{
{
put( "java.lang.String", "String" );
put( "java.util.List", "List (java.util.List)" );
put( "java.util.Date", "Date (java.util.Date)" );
}
};
| false
|
enterprise_ha_src_test_java_jmx_JmxDocTest.java
|
3,967
|
{
{
add( "JMX Server" );
}
};
| false
|
enterprise_ha_src_test_java_jmx_JmxDocTest.java
|
3,968
|
public class JmxDocTest
{
private static final String IFDEF_HTMLOUTPUT = "ifndef::nonhtmloutput[]\n";
private static final String IFDEF_NONHTMLOUTPUT = "ifdef::nonhtmloutput[]\n";
private static final String ENDIF = "endif::nonhtmloutput[]\n";
private static final String BEAN_NAME0 = "name0";
private static final String BEAN_NAME = "name";
private static final List<String> QUERIES = Arrays.asList( new String[]{"org.neo4j:*"} );
private static final String JAVADOC_URL = "http://components.neo4j.org/neo4j-enterprise/{neo4j-version}/apidocs/";
private static final int EXPECTED_NUMBER_OF_BEANS = 13;
private static final Set<String> EXCLUDES = new HashSet<String>()
{
{
add( "JMX Server" );
}
};
private static final Map<String, String> TYPES = new HashMap<String, String>()
{
{
put( "java.lang.String", "String" );
put( "java.util.List", "List (java.util.List)" );
put( "java.util.Date", "Date (java.util.Date)" );
}
};
private static final TargetDirectory dir = TargetDirectory.forTest( JmxDocTest.class );
private static GraphDatabaseService d1b;
@BeforeClass
public static void startDb() throws Exception
{
File storeDir = dir.makeGraphDbDir( /*clean=*/ );
CreateEmptyDb.at( storeDir );
d1b = new HighlyAvailableGraphDatabaseFactory().
newHighlyAvailableDatabaseBuilder( storeDir.getAbsolutePath() )
.setConfig( ClusterSettings.server_id, "1" ).setConfig( "jmx.port", "9913" ).
setConfig( ClusterSettings.initial_hosts, ":5001" ).newGraphDatabase();
}
@AfterClass
public static void stopDb() throws Exception
{
if ( d1b != null )
{
d1b.shutdown();
}
d1b = null;
dir.cleanup();
}
@Test
public void dumpJmxInfo() throws Exception
{
StringBuilder beanList = new StringBuilder( 4096 );
StringBuilder altBeanList = new StringBuilder( 2048 );
altBeanList.append( IFDEF_NONHTMLOUTPUT );
beanList.append( "[[jmx-list]]\n" + ".MBeans exposed by Neo4j\n"
+ IFDEF_HTMLOUTPUT
+ "[options=\"header\", cols=\"m,\"]\n" + "|===\n"
+ "|Name|Description\n" );
MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer();
SortedMap<String, ObjectName> neo4jBeans = new TreeMap<String, ObjectName>(
String.CASE_INSENSITIVE_ORDER );
for ( String query : QUERIES )
{
Set<ObjectInstance> beans = mBeanServer.queryMBeans(
new ObjectName( query ), null );
for ( ObjectInstance bean : beans )
{
ObjectName objectName = bean.getObjectName();
String name = objectName.getKeyProperty( BEAN_NAME );
if ( EXCLUDES.contains( name ) )
{
continue;
}
String name0 = objectName.getKeyProperty( BEAN_NAME0 );
if ( name0 != null )
{
name += '/' + name0;
}
neo4jBeans.put( name, bean.getObjectName() );
}
}
assertEquals( "Sanity checking the number of beans found;",
EXPECTED_NUMBER_OF_BEANS, neo4jBeans.size() );
for ( Map.Entry<String, ObjectName> beanEntry : neo4jBeans.entrySet() )
{
ObjectName objectName = beanEntry.getValue();
String name = beanEntry.getKey();
Set<ObjectInstance> mBeans = mBeanServer.queryMBeans( objectName,
null );
if ( mBeans.size() != 1 )
{
throw new IllegalStateException( "Unexpected size ["
+ mBeans.size()
+ "] of query result for ["
+ objectName + "]." );
}
ObjectInstance bean = mBeans.iterator()
.next();
MBeanInfo info = mBeanServer.getMBeanInfo( objectName );
String description = info.getDescription()
.replace( '\n', ' ' );
String id = getId( name );
beanList.append( "|<<" )
.append( id )
.append( ',' )
.append( name )
.append( ">>|" )
.append( description )
.append( '\n' );
altBeanList.append( "* <<" )
.append( id )
.append( ',' )
.append( name )
.append( ">>: " )
.append( description )
.append( '\n' );
writeDetailsToFile( id, objectName, bean, info, description );
}
beanList.append( "|===\n" )
.append( ENDIF );
altBeanList.append( ENDIF )
.append( "\n" );
beanList.append( altBeanList.toString() );
Writer fw = null;
try
{
fw = AsciiDocGenerator.getFW( "target/docs/ops", "JMX List" );
fw.write( beanList.toString() );
}
finally
{
if ( fw != null )
{
fw.close();
}
}
}
private String getId( String name )
{
return "jmx-" + name.replace( ' ', '-' )
.replace( '/', '-' )
.toLowerCase();
}
private void writeDetailsToFile( String id, ObjectName objectName,
ObjectInstance bean, MBeanInfo info, String description )
throws IOException
{
StringBuilder beanInfo = new StringBuilder( 2048 );
String name = objectName.getKeyProperty( BEAN_NAME );
String name0 = objectName.getKeyProperty( BEAN_NAME0 );
if ( name0 != null )
{
name += "/" + name0;
}
MBeanAttributeInfo[] attributes = info.getAttributes();
if ( attributes.length > 0 )
{
beanInfo.append( "[[" )
.append( id )
.append( "]]\n" + ".MBean " )
.append( name )
.append( " (" )
.append( bean.getClassName() )
.append( ") Attributes\n" );
writeAttributesTable( description, beanInfo, attributes, false );
writeAttributesTable( description, beanInfo, attributes, true );
beanInfo.append( "\n" );
}
MBeanOperationInfo[] operations = info.getOperations();
if ( operations.length > 0 )
{
beanInfo.append( ".MBean " )
.append( name )
.append( " (" )
.append( bean.getClassName() )
.append( ") Operations\n" );
writeOperationsTable( beanInfo, operations, false );
writeOperationsTable( beanInfo, operations, true );
beanInfo.append( "\n" );
}
if ( beanInfo.length() > 0 )
{
Writer fw = null;
try
{
fw = AsciiDocGenerator.getFW( "target/docs/ops", id );
fw.write( beanInfo.toString() );
}
finally
{
if ( fw != null )
{
fw.close();
}
}
}
}
private void writeAttributesTable( String description,
StringBuilder beanInfo, MBeanAttributeInfo[] attributes,
boolean nonHtml )
{
addNonHtmlCondition( beanInfo, nonHtml );
beanInfo.append(
"[options=\"header\", cols=\"20m,36,20m,7,7\"]\n" + "|===\n"
+ "|Name|Description|Type|Read|Write\n" + "5.1+^e|" )
.append( description )
.append( '\n' );
SortedSet<String> attributeInfo = new TreeSet<String>(
String.CASE_INSENSITIVE_ORDER );
for ( MBeanAttributeInfo attrInfo : attributes )
{
StringBuilder attributeRow = new StringBuilder( 512 );
String type = getType( attrInfo.getType() );
Descriptor descriptor = attrInfo.getDescriptor();
type = getCompositeType( type, descriptor, nonHtml );
attributeRow.append( '|' )
.append( makeBreakable( attrInfo.getName(), nonHtml ) )
.append( '|' )
.append( attrInfo.getDescription()
.replace( '\n', ' ' ) )
.append( '|' )
.append( type )
.append( '|' )
.append( attrInfo.isReadable() ? "yes" : "no" )
.append( '|' )
.append( attrInfo.isWritable() ? "yes" : "no" )
.append( '\n' );
attributeInfo.add( attributeRow.toString() );
}
for ( String row : attributeInfo )
{
beanInfo.append( row );
}
beanInfo.append( "|===\n" );
beanInfo.append( ENDIF );
}
private void addNonHtmlCondition( StringBuilder beanInfo, boolean nonHtml )
{
if ( nonHtml )
{
beanInfo.append( IFDEF_NONHTMLOUTPUT );
}
else
{
beanInfo.append( IFDEF_HTMLOUTPUT );
}
}
private void writeOperationsTable( StringBuilder beanInfo,
MBeanOperationInfo[] operations, boolean nonHtml )
{
addNonHtmlCondition( beanInfo, nonHtml );
beanInfo.append( "[options=\"header\", cols=\"20m,40,20m,20m\"]\n"
+ "|===\n"
+ "|Name|Description|ReturnType|Signature\n" );
SortedSet<String> operationInfo = new TreeSet<String>(
String.CASE_INSENSITIVE_ORDER );
for ( MBeanOperationInfo operInfo : operations )
{
StringBuilder operationRow = new StringBuilder( 512 );
String type = getType( operInfo.getReturnType() );
Descriptor descriptor = operInfo.getDescriptor();
type = getCompositeType( type, descriptor, nonHtml );
operationRow.append( '|' )
.append( operInfo.getName() )
.append( '|' )
.append( operInfo.getDescription()
.replace( '\n', ' ' ) )
.append( '|' )
.append( type )
.append( '|' );
MBeanParameterInfo[] params = operInfo.getSignature();
if ( params.length > 0 )
{
for ( int i = 0; i < params.length; i++ )
{
MBeanParameterInfo param = params[i];
operationRow.append( param.getType() );
if ( i != (params.length - 1) )
{
operationRow.append( ',' );
}
}
}
else
{
operationRow.append( "(no parameters)" );
}
operationRow.append( '\n' );
operationInfo.add( operationRow.toString() );
}
for ( String row : operationInfo )
{
beanInfo.append( row );
}
beanInfo.append( "|===\n" );
beanInfo.append( ENDIF );
}
private String getCompositeType( String type, Descriptor descriptor,
boolean nonHtml )
{
String newType = type;
if ( "javax.management.openmbean.CompositeData[]".equals( type ) )
{
Object originalType = descriptor.getFieldValue( "originalType" );
if ( originalType != null )
{
newType = getLinkedType( getType( (String) originalType ),
nonHtml );
if ( nonHtml )
{
newType += " as CompositeData[]";
}
else
{
newType += " as http://docs.oracle.com/javase/7/docs/api/javax/management/openmbean/CompositeData.html"
+ "[CompositeData][]";
}
}
}
return newType;
}
private String getType( String type )
{
if ( TYPES.containsKey( type ) )
{
return TYPES.get( type );
}
else if ( type.endsWith( ";" ) )
{
if ( type.startsWith( "[L" ) )
{
return type.substring( 2, type.length() - 1 ) + "[]";
}
else
{
throw new IllegalArgumentException(
"Don't know how to parse this type: " + type );
}
}
return type;
}
private String getLinkedType( String type, boolean nonHtml )
{
if ( !type.startsWith( "org.neo4j" ) )
{
if ( !type.startsWith( "java.util.List<org.neo4j." ) )
{
return type;
}
else
{
String typeInList = type.substring( 15, type.length() - 1 );
return "java.util.List<" + getLinkedType( typeInList, nonHtml )
+ ">";
}
}
else if ( nonHtml )
{
return type;
}
else
{
StringBuilder url = new StringBuilder( 160 );
url.append( JAVADOC_URL );
String typeString = type;
if ( type.endsWith( "[]" ) )
{
typeString = type.substring( 0, type.length() - 2 );
}
url.append( typeString.replace( '.', '/' ) )
.append( ".html[" )
.append( typeString )
.append( "]" );
if ( type.endsWith( "[]" ) )
{
url.append( "[]" );
}
return url.toString();
}
}
private String makeBreakable( String name, boolean nonHtml )
{
if ( nonHtml )
{
return name.replace( "_", "_\u200A" )
.replace( "NumberOf", "NumberOf\u200A" )
.replace( "InUse", "\u200AInUse" )
.replace( "Transactions", "\u200ATransactions" );
}
else
{
return name;
}
}
}
| false
|
enterprise_ha_src_test_java_jmx_JmxDocTest.java
|
3,969
|
{
@Override
public boolean accept( ClusterMemberInfo item )
{
return item.isAlive() == alive;
}
};
| false
|
enterprise_ha_src_test_java_jmx_HaBeanIT.java
|
3,970
|
{
@Override
public boolean accept( ClusterMemberInfo item )
{
return item.isAvailable() == available;
}
};
| false
|
enterprise_ha_src_test_java_jmx_HaBeanIT.java
|
3,971
|
{
@Override
public URI apply( String from )
{
return URI.create( from );
}
}, Arrays.asList( slave.getUris() ) ) ).getPort() );
| false
|
enterprise_ha_src_test_java_jmx_HaBeanIT.java
|
3,972
|
{
@Override
public URI apply( String from )
{
return URI.create( from );
}
}, Arrays.asList( master.getUris() ) ) ).getPort() );
| false
|
enterprise_ha_src_test_java_jmx_HaBeanIT.java
|
3,973
|
{
@Override
protected void config( GraphDatabaseBuilder builder, String clusterName, int serverId )
{
builder.setConfig( "jmx.port", "" + ( 9912 + serverId ) );
builder.setConfig( HaSettings.ha_server, ":" + ( 1136 + serverId ) );
builder.setConfig( GraphDatabaseSettings.forced_kernel_id, testName.getMethodName() + serverId );
}
};
| false
|
enterprise_ha_src_test_java_jmx_HaBeanIT.java
|
3,974
|
public class HaBeanIT
{
@Rule
public final TestName testName = new TestName();
private static final TargetDirectory dir = TargetDirectory.forTest( HaBeanIT.class );
private ManagedCluster cluster;
private ClusterManager clusterManager;
public void startCluster( int size ) throws Throwable
{
clusterManager = new ClusterManager( clusterOfSize( size ), dir.cleanDirectory( testName.getMethodName() ), MapUtil.stringMap() )
{
@Override
protected void config( GraphDatabaseBuilder builder, String clusterName, int serverId )
{
builder.setConfig( "jmx.port", "" + ( 9912 + serverId ) );
builder.setConfig( HaSettings.ha_server, ":" + ( 1136 + serverId ) );
builder.setConfig( GraphDatabaseSettings.forced_kernel_id, testName.getMethodName() + serverId );
}
};
clusterManager.start();
cluster = clusterManager.getDefaultCluster();
cluster.await( ClusterManager.allSeesAllAsAvailable() );
}
@After
public void stopCluster() throws Throwable
{
clusterManager.stop();
}
public Neo4jManager beans( HighlyAvailableGraphDatabase db )
{
return new Neo4jManager( db.getDependencyResolver().resolveDependency( JmxKernelExtension
.class ).getSingleManagementBean( Kernel.class ) );
}
public HighAvailability ha( HighlyAvailableGraphDatabase db )
{
return beans( db ).getHighAvailabilityBean();
}
@Test
public void canGetHaBean() throws Throwable
{
startCluster( 1 );
HighAvailability ha = ha( cluster.getMaster() );
assertNotNull( "could not get ha bean", ha );
assertMasterInformation( ha );
}
private void assertMasterInformation( HighAvailability ha )
{
assertTrue( "single instance should be master and available", ha.isAvailable() );
assertEquals( "single instance should be master", HighAvailabilityModeSwitcher.MASTER, ha.getRole() );
ClusterMemberInfo info = ha.getInstancesInCluster()[0];
assertEquals( "single instance should be the returned instance id", "1", info.getInstanceId() );
}
@Test
public void testLatestTxInfoIsCorrect() throws Throwable
{
startCluster( 1 );
HighlyAvailableGraphDatabase db = cluster.getMaster();
HighAvailability masterHa = ha( db );
long lastCommitted = masterHa.getLastCommittedTxId();
Transaction tx = db.beginTx();
db.createNode();
tx.success();
tx.finish();
assertEquals( lastCommitted + 1, masterHa.getLastCommittedTxId() );
}
@Test
public void testUpdatePullWorksAndUpdatesLastUpdateTime() throws Throwable
{
startCluster( 2 );
HighlyAvailableGraphDatabase master = cluster.getMaster();
HighlyAvailableGraphDatabase slave = cluster.getAnySlave();
Transaction tx = master.beginTx();
master.createNode();
tx.success();
tx.finish();
HighAvailability slaveBean = ha( slave );
DateFormat format = new SimpleDateFormat( "yyyy-MM-DD kk:mm:ss.SSSZZZZ" );
// To begin with, no updates
assertEquals( "N/A", slaveBean.getLastUpdateTime() );
slaveBean.update();
long timeUpdated = format.parse( slaveBean.getLastUpdateTime() ).getTime();
assertTrue( timeUpdated > 0 );
}
@Test
public void testAfterGentleMasterSwitchClusterInfoIsCorrect() throws Throwable
{
startCluster( 3 );
RepairKit masterShutdown = cluster.shutdown( cluster.getMaster() );
cluster.await( ClusterManager.masterAvailable() );
cluster.await( ClusterManager.masterSeesSlavesAsAvailable( 1 ) );
for ( HighlyAvailableGraphDatabase db : cluster.getAllMembers() )
{
assertEquals( 2, ha( db ).getInstancesInCluster().length );
}
masterShutdown.repair();
cluster.await( ClusterManager.allSeesAllAsAvailable() );
for ( HighlyAvailableGraphDatabase db : cluster.getAllMembers() )
{
HighAvailability bean = ha( db );
assertEquals( 3, bean.getInstancesInCluster().length );
for ( ClusterMemberInfo info : bean.getInstancesInCluster() )
{
assertTrue( "every instance should be available", info.isAvailable() );
assertTrue( "every instances should have at least one role", info.getRoles().length > 0 );
if ( HighAvailabilityModeSwitcher.MASTER.equals( info.getRoles()[0] ) )
{
assertEquals( "coordinator should be master",
HighAvailabilityModeSwitcher.MASTER, info.getHaRole() );
}
else
{
assertEquals( "Either master or slave, no other way",
HighAvailabilityModeSwitcher.SLAVE, info.getRoles()[0] );
assertEquals( "instance " + info.getInstanceId() + " is cluster slave but HA master",
HighAvailabilityModeSwitcher.SLAVE, info.getHaRole() );
}
for ( String uri : info.getUris() )
{
assertTrue( "roles should contain URIs", uri.startsWith( "ha://" ) );
}
}
}
}
@Test
public void testAfterHardMasterSwitchClusterInfoIsCorrect() throws Throwable
{
startCluster( 3 );
RepairKit masterShutdown = cluster.fail( cluster.getMaster() );
cluster.await( ClusterManager.masterAvailable() );
cluster.await( ClusterManager.masterSeesSlavesAsAvailable( 1 ) );
for ( HighlyAvailableGraphDatabase db : cluster.getAllMembers() )
{
if ( db.getInstanceState().equals( HighAvailabilityMemberState.PENDING.name() ))
{
continue;
}
// Instance that was hard killed will still be in the cluster
assertEquals( 3, ha( db ).getInstancesInCluster().length );
}
masterShutdown.repair();
cluster.await( ClusterManager.masterAvailable() );
cluster.await( ClusterManager.masterSeesSlavesAsAvailable( 2 ) );
for ( HighlyAvailableGraphDatabase db : cluster.getAllMembers() )
{
int mastersFound = 0;
HighAvailability bean = ha( db );
assertEquals( 3, bean.getInstancesInCluster().length );
for ( ClusterMemberInfo info : bean.getInstancesInCluster() )
{
assertTrue( bean.getInstanceId() + ": every instance should be available: " + info.getInstanceId(),
info.isAvailable() );
for ( String role : info.getRoles() )
{
if (role.equals( HighAvailabilityModeSwitcher.MASTER )) mastersFound++;
}
}
assertEquals( 1, mastersFound );
}
}
@Test
public void canGetBranchedStoreBean() throws Throwable
{
startCluster( 1 );
BranchedStore bs = beans( cluster.getMaster() ).getBranchedStoreBean();
assertNotNull( "could not get branched store bean", bs );
assertEquals( "no branched stores for new db", 0,
bs.getBranchedStores().length );
}
@Test
@Ignore //Temporary ignore since this doesn't work well on Linux 2011-04-08
public void canGetInstanceConnectionInformation() throws Throwable
{
startCluster( 1 );
ClusterMemberInfo[] clusterMembers = ha( cluster.getMaster() ).getInstancesInCluster();
assertNotNull( clusterMembers );
assertEquals( 1, clusterMembers.length );
ClusterMemberInfo clusterMember = clusterMembers[0];
assertNotNull( clusterMember );
// String address = clusterMember.getAddress();
// assertNotNull( "No JMX address for instance", address );
String id = clusterMember.getInstanceId();
assertNotNull( "No instance id", id );
}
@Test
@Ignore //Temporary ignore since this doesn't work well on Linux 2011-04-08
public void canConnectToInstance() throws Throwable
{
startCluster( 1 );
ClusterMemberInfo[] clusterMembers = ha( cluster.getMaster() ).getInstancesInCluster();
assertNotNull( clusterMembers );
assertEquals( 1, clusterMembers.length );
ClusterMemberInfo clusterMember = clusterMembers[0];
assertNotNull( clusterMember );
Pair<Neo4jManager, HighAvailability> proc = clusterMember.connect();
assertNotNull( "could not connect", proc );
Neo4jManager neo4j = proc.first();
HighAvailability ha = proc.other();
assertNotNull( neo4j );
assertNotNull( ha );
clusterMembers = ha.getInstancesInCluster();
assertNotNull( clusterMembers );
assertEquals( 1, clusterMembers.length );
// assertEquals( clusterMember.getAddress(), clusterMembers[0].getAddress() );
assertEquals( clusterMember.getInstanceId(), clusterMembers[0].getInstanceId() );
}
@Test
public void joinedInstanceShowsUpAsSlave() throws Throwable
{
startCluster( 2 );
ClusterMemberInfo[] instancesInCluster = ha( cluster.getMaster() ).getInstancesInCluster();
assertEquals( 2, instancesInCluster.length );
ClusterMemberInfo[] secondInstancesInCluster = ha( cluster.getAnySlave() ).getInstancesInCluster();
assertEquals( 2, secondInstancesInCluster.length );
assertMasterAndSlaveInformation( instancesInCluster );
assertMasterAndSlaveInformation( secondInstancesInCluster );
}
@Test
public void leftInstanceDisappearsFromMemberList() throws Throwable
{
// Start the second db and make sure it's visible in the member list.
// Then shut it down to see if it disappears from the member list again.
startCluster( 3 );
assertEquals( 3, ha( cluster.getAnySlave() ).getInstancesInCluster().length );
cluster.shutdown( cluster.getAnySlave() );
cluster.await( masterSeesMembers( 2 ) );
assertEquals( 2, ha( cluster.getMaster() ).getInstancesInCluster().length );
assertMasterInformation( ha( cluster.getMaster() ) );
}
@Test
public void failedMemberIsStillInMemberListAlthoughFailed() throws Throwable
{
startCluster( 3 );
assertEquals( 3, ha( cluster.getAnySlave() ).getInstancesInCluster().length );
// Fail the instance
HighlyAvailableGraphDatabase failedDb = cluster.getAnySlave();
RepairKit dbFailure = cluster.fail( failedDb );
await( ha( cluster.getMaster() ), dbAlive( false ) );
await( ha( cluster.getAnySlave( failedDb )), dbAlive( false ) );
// Repair the failure and come back
dbFailure.repair();
for ( HighlyAvailableGraphDatabase db : cluster.getAllMembers() )
{
await( ha( db ), dbAvailability( true ) );
await( ha( db ), dbAlive( true ) );
}
}
private void assertMasterAndSlaveInformation( ClusterMemberInfo[] instancesInCluster ) throws Exception
{
ClusterMemberInfo master = member( instancesInCluster, 1 );
assertEquals( 1137, ServerUtil.getUriForScheme( "ha", Iterables.map( new Function<String, URI>()
{
@Override
public URI apply( String from )
{
return URI.create( from );
}
}, Arrays.asList( master.getUris() ) ) ).getPort() );
assertEquals( HighAvailabilityModeSwitcher.MASTER, master.getHaRole() );
ClusterMemberInfo slave = member( instancesInCluster, 2 );
assertEquals( 1138, ServerUtil.getUriForScheme( "ha", Iterables.map( new Function<String, URI>()
{
@Override
public URI apply( String from )
{
return URI.create( from );
}
}, Arrays.asList( slave.getUris() ) ) ).getPort() );
assertEquals( HighAvailabilityModeSwitcher.SLAVE, slave.getHaRole() );
assertTrue( "Slave not available", slave.isAvailable() );
}
private ClusterMemberInfo member( ClusterMemberInfo[] members, int instanceId )
{
for ( ClusterMemberInfo member : members )
{
if ( member.getInstanceId().equals( Integer.toString( instanceId ) ) )
{
return member;
}
}
fail( "Couldn't find cluster member with cluster URI port " + instanceId + " among " + Arrays.toString(
members ) );
return null; // it will never get here.
}
private void await( HighAvailability ha, Predicate<ClusterMemberInfo> predicate ) throws InterruptedException
{
long end = System.currentTimeMillis() + SECONDS.toMillis( 300 );
boolean conditionMet = false;
while ( System.currentTimeMillis() < end )
{
conditionMet = predicate.accept( member( ha.getInstancesInCluster(), 2 ) );
if ( conditionMet )
{
return;
}
Thread.sleep( 500 );
}
fail( "Failed instance didn't show up as such in JMX" );
}
private Predicate<ClusterMemberInfo> dbAvailability( final boolean available )
{
return new Predicate<ClusterMemberInfo>()
{
@Override
public boolean accept( ClusterMemberInfo item )
{
return item.isAvailable() == available;
}
};
}
private Predicate<ClusterMemberInfo> dbAlive( final boolean alive )
{
return new Predicate<ClusterMemberInfo>()
{
@Override
public boolean accept( ClusterMemberInfo item )
{
return item.isAlive() == alive;
}
};
}
}
| false
|
enterprise_ha_src_test_java_jmx_HaBeanIT.java
|
3,975
|
{
@Override
public Node create( GraphDatabaseService graphdb )
{
Node me = graphdb.createNode();
Node stockholm = graphdb.createNode(), gothenburg = graphdb.createNode();
stockholm.setProperty( "name", "Stockholm" );
gothenburg.setProperty( "name", "Gothenburg" );
Node andy = friend( me, graphdb.createNode(), "Andy", 10,
stockholm );
friend( me, graphdb.createNode(), "Bob", 5, stockholm );
Node cecilia = friend( me, graphdb.createNode(), "Cecilia", 2,
stockholm );
andy.createRelationshipTo( cecilia, FriendshipTypes.FRIEND ).setProperty(
"since", yearsAgo( 10 ) );
friend( me, graphdb.createNode(), "David", 10, gothenburg );
return me;
}
Node friend( Node me, Node friend, String name, int knownForYears,
Node place )
{
friend.setProperty( "name", name );
me.createRelationshipTo( friend, FriendshipTypes.FRIEND ).setProperty(
"since", yearsAgo( knownForYears ) );
friend.createRelationshipTo( place, FriendshipTypes.LIVES_IN );
return friend;
}
Calendar calendar = Calendar.getInstance();
long yearsAgo( int years )
{
return new GregorianCalendar( calendar.get( Calendar.YEAR )
- years,
calendar.get( Calendar.MONTH ),
calendar.get( Calendar.DATE ) ).getTime().getTime();
}
} );
| false
|
community_graph-matching_src_test_java_examples_TestSiteIndexExamples.java
|
3,976
|
{
@Override
public Node[] create( GraphDatabaseService graphdb )
{
Node[] nodes = new Node[5];
for ( int i = 0; i < nodes.length; i++ )
{
nodes[i] = graphdb.createNode();
}
for ( int i = 0; i < 3; i++ )
{
Node node = graphdb.createNode();
for ( int j = 0; j < nodes.length; j++ )
{
nodes[j].createRelationshipTo( node, type );
}
}
return nodes;
}
} );
| false
|
community_graph-matching_src_test_java_examples_TestSiteIndexExamples.java
|
3,977
|
public class OnlineBackupKernelExtension implements Lifecycle
{
public interface BackupProvider
{
TheBackupInterface newBackup();
}
// This is the role used to announce that a cluster member can handle backups
public static final String BACKUP = "backup";
// In this context, the IPv4 zero-address is understood as "any address on this host."
public static final String INADDR_ANY = "0.0.0.0";
private Config config;
private GraphDatabaseAPI graphDatabaseAPI;
private Logging logging;
private final Monitors monitors;
private BackupServer server;
private final BackupProvider backupProvider;
private volatile URI me;
public OnlineBackupKernelExtension( Config config, final GraphDatabaseAPI graphDatabaseAPI, final XaDataSourceManager
xaDataSourceManager, final KernelPanicEventGenerator kpeg, final Logging logging, final Monitors monitors )
{
this(config, graphDatabaseAPI, new BackupProvider()
{
@Override
public TheBackupInterface newBackup()
{
return new BackupImpl( logging.getMessagesLog( BackupImpl.class ), new BackupImpl.SPI()
{
@Override
public String getStoreDir()
{
return graphDatabaseAPI.getStoreDir();
}
@Override
public StoreId getStoreId()
{
return graphDatabaseAPI.storeId();
}
}, xaDataSourceManager, kpeg, monitors );
}
}, monitors, logging);
}
public OnlineBackupKernelExtension( Config config, GraphDatabaseAPI graphDatabaseAPI, BackupProvider provider,
Monitors monitors, Logging logging )
{
this.config = config;
this.graphDatabaseAPI = graphDatabaseAPI;
this.backupProvider = provider;
this.monitors = monitors;
this.logging = logging;
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
if ( config.<Boolean>get( OnlineBackupSettings.online_backup_enabled ) )
{
try
{
server = new BackupServer( backupProvider.newBackup(), config.get( online_backup_server ),
logging, monitors );
server.init();
server.start();
try
{
graphDatabaseAPI.getDependencyResolver().resolveDependency( ClusterMemberEvents.class).addClusterMemberListener(
new StartBindingListener() );
graphDatabaseAPI.getDependencyResolver().resolveDependency( BindingNotifier.class ).addBindingListener( new BindingListener()
{
@Override
public void listeningAt( URI myUri )
{
me = myUri;
}
} );
}
catch ( NoClassDefFoundError e )
{
// Not running HA
}
catch ( IllegalArgumentException e ) // NOPMD
{
// HA available, but not used
}
}
catch ( Throwable t )
{
throw new RuntimeException( t );
}
}
}
@Override
public void stop() throws Throwable
{
if ( server != null )
{
server.stop();
server.shutdown();
server = null;
try
{
ClusterMemberAvailability client = getClusterMemberAvailability();
client.memberIsUnavailable( BACKUP );
}
catch ( NoClassDefFoundError e )
{
// Not running HA
}
catch ( IllegalArgumentException e ) // NOPMD
{
// HA available, but not used
}
}
}
@Override
public void shutdown() throws Throwable
{
}
private class StartBindingListener extends ClusterMemberListener.Adapter
{
@Override
public void memberIsAvailable( String role, InstanceId available, URI availableAtUri )
{
if ( graphDatabaseAPI.getDependencyResolver().resolveDependency( ClusterClient.class ).
getServerId().equals( available ) && "master".equals( role ) )
{
// It was me and i am master - yey!
{
try
{
URI backupUri = createBackupURI();
ClusterMemberAvailability ha = getClusterMemberAvailability();
ha.memberIsAvailable( BACKUP, backupUri );
}
catch ( Throwable t )
{
throw new RuntimeException( t );
}
}
}
}
@Override
public void memberIsUnavailable( String role, InstanceId unavailableId )
{
if ( graphDatabaseAPI.getDependencyResolver().resolveDependency( ClusterClient.class ).
getServerId().equals( unavailableId ) && "master".equals( role ) )
{
// It was me and i am master - yey!
{
try
{
ClusterMemberAvailability ha = getClusterMemberAvailability();
ha.memberIsUnavailable( BACKUP );
}
catch ( Throwable t )
{
throw new RuntimeException( t );
}
}
}
}
}
private ClusterMemberAvailability getClusterMemberAvailability() {
return graphDatabaseAPI.getDependencyResolver().resolveDependency( ClusterMemberAvailability.class );
}
private URI createBackupURI() {
String hostString = ServerUtil.getHostString( server.getSocketAddress() );
String host = hostString.contains( INADDR_ANY ) ? me.getHost() : hostString;
int port = server.getSocketAddress().getPort();
return URI.create("backup://" + host + ":" + port);
}
}
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_OnlineBackupKernelExtension.java
|
3,978
|
{
@Override
public TheBackupInterface newBackup()
{
return new BackupImpl( logging.getMessagesLog( BackupImpl.class ), new BackupImpl.SPI()
{
@Override
public String getStoreDir()
{
return graphDatabaseAPI.getStoreDir();
}
@Override
public StoreId getStoreId()
{
return graphDatabaseAPI.storeId();
}
}, xaDataSourceManager, kpeg, monitors );
}
}, monitors, logging);
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_OnlineBackupKernelExtension.java
|
3,979
|
{
@Override
public String getStoreDir()
{
return graphDatabaseAPI.getStoreDir();
}
@Override
public StoreId getStoreId()
{
return graphDatabaseAPI.storeId();
}
}, xaDataSourceManager, kpeg, monitors );
| false
|
enterprise_backup_src_main_java_org_neo4j_backup_OnlineBackupKernelExtension.java
|
3,980
|
public class Clusters
{
private final List<Cluster> clusters = new ArrayList<Cluster>();
private long timestamp;
public void setTimestamp( long timestamp )
{
this.timestamp = timestamp;
}
public long getTimestamp()
{
return timestamp;
}
public List<Cluster> getClusters()
{
return clusters;
}
public Cluster getCluster( String name )
{
for ( Cluster cluster : clusters )
{
if ( cluster.getName().equals( name ) )
{
return cluster;
}
}
return null;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
Clusters clusters1 = (Clusters) o;
if ( !clusters.equals( clusters1.clusters ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
return clusters.hashCode();
}
/**
* Represents the cluster tag in the discovery XML file.
*/
public static class Cluster
{
private final String name;
private final List<Member> members = new ArrayList<Member>();
public Cluster( String name )
{
this.name = name;
}
public String getName()
{
return name;
}
public List<Member> getMembers()
{
return members;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
Cluster cluster = (Cluster) o;
if ( !name.equals( cluster.name ) )
{
return false;
}
if ( !members.equals( cluster.members ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
int result = name.hashCode();
result = 31 * result + members.hashCode();
return result;
}
public boolean contains( URI serverId )
{
for ( Member member : members )
{
if ( serverId.toString().contains( member.getHost() ) )
{
return true;
}
}
return false;
}
public Member getByUri( URI serverId )
{
for ( Member member : members )
{
if ( serverId.toString().contains( member.getHost() ) )
{
return member;
}
}
return null;
}
}
/**
* Represents a member tag in the discovery XML file.
*/
public static class Member
{
private String host;
private boolean fullHaMember;
public Member( int port, boolean fullHaMember )
{
this( localhost() + ":" + port, fullHaMember );
}
public Member( String host )
{
this( host, true );
}
public Member( String host, boolean fullHaMember )
{
this.host = host;
this.fullHaMember = fullHaMember;
}
public boolean isFullHaMember()
{
return fullHaMember;
}
private static String localhost()
{
try
{
return InetAddress.getLocalHost().getHostAddress();
}
catch ( UnknownHostException e )
{
throw new RuntimeException( e );
}
}
public String getHost()
{
return host;
}
@Override
public boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o == null || getClass() != o.getClass() )
{
return false;
}
Member member = (Member) o;
if ( !host.equals( member.host ) )
{
return false;
}
return true;
}
@Override
public int hashCode()
{
return host.hashCode();
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_Clusters.java
|
3,981
|
{
@Override
public URI apply( HostnamePort member )
{
return URI.create( "cluster://" + resolvePortOnlyHost( member ) );
}
}, hosts));
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterJoin.java
|
3,982
|
{
@Override
public void leftCluster()
{
cluster.removeClusterListener( this );
semaphore.release();
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterJoin.java
|
3,983
|
public class ClusterJoin
extends LifecycleAdapter
{
public interface Configuration
{
List<HostnamePort> getInitialHosts();
String getClusterName();
boolean isAllowedToCreateCluster();
long getClusterJoinTimeout();
}
private final Configuration config;
private final ProtocolServer protocolServer;
private final StringLogger logger;
private final ConsoleLogger console;
private Cluster cluster;
public ClusterJoin( Configuration config, ProtocolServer protocolServer, Logging logger )
{
this.config = config;
this.protocolServer = protocolServer;
this.logger = logger.getMessagesLog( getClass() );
this.console = logger.getConsoleLog( getClass() );
}
@Override
public void init() throws Throwable
{
cluster = protocolServer.newClient( Cluster.class );
}
@Override
public void start() throws Throwable
{
cluster = protocolServer.newClient( Cluster.class );
joinByConfig();
}
@Override
public void stop()
{
final Semaphore semaphore = new Semaphore( 0 );
cluster.addClusterListener( new ClusterListener.Adapter()
{
@Override
public void leftCluster()
{
cluster.removeClusterListener( this );
semaphore.release();
}
} );
cluster.leave();
try
{
if ( !semaphore.tryAcquire( 60, TimeUnit.SECONDS ) )
{
logger.info( "Unable to leave cluster, timeout" );
}
}
catch ( InterruptedException e )
{
Thread.interrupted();
logger.warn( "Unable to leave cluster, interrupted", e );
}
}
private void joinByConfig() throws TimeoutException
{
List<HostnamePort> hosts = config.getInitialHosts();
cluster.addClusterListener( new UnknownJoiningMemberWarning( hosts ) );
if ( hosts == null || hosts.size() == 0 )
{
console.log( "No cluster hosts specified. Creating cluster " + config.getClusterName() );
cluster.create( config.getClusterName() );
}
else
{
URI[] memberURIs = Iterables.toArray(URI.class,
Iterables.map( new Function<HostnamePort, URI>()
{
@Override
public URI apply( HostnamePort member )
{
return URI.create( "cluster://" + resolvePortOnlyHost( member ) );
}
}, hosts));
while( true )
{
console.log( "Attempting to join cluster of " + hosts.toString() );
Future<ClusterConfiguration> clusterConfig =
cluster.join( this.config.getClusterName(), memberURIs );
if (config.getClusterJoinTimeout() > 0)
{
try
{
console.log( "Joined cluster:" + clusterConfig.get(config.getClusterJoinTimeout(), TimeUnit.MILLISECONDS ));
return;
}
catch ( InterruptedException e )
{
console.log( "Could not join cluster, interrupted. Retrying..." );
}
catch ( ExecutionException e )
{
logger.debug( "Could not join cluster " + this.config.getClusterName() );
if ( e.getCause() instanceof IllegalStateException )
{
throw ((IllegalStateException) e.getCause());
}
if ( config.isAllowedToCreateCluster() )
{
// Failed to join cluster, create new one
console.log( "Could not join cluster of " + hosts.toString() );
console.log( format( "Creating new cluster with name [%s]...", config.getClusterName() ) );
cluster.create( config.getClusterName() );
break;
}
console.log( "Could not join cluster, timed out. Retrying..." );
}
} else
{
try
{
console.log( "Joined cluster:" + clusterConfig.get() );
return;
}
catch ( InterruptedException e )
{
console.log( "Could not join cluster, interrupted. Retrying..." );
}
catch ( ExecutionException e )
{
logger.debug( "Could not join cluster " + this.config.getClusterName() );
if ( e.getCause() instanceof IllegalStateException )
{
throw ((IllegalStateException) e.getCause());
}
if ( config.isAllowedToCreateCluster() )
{
// Failed to join cluster, create new one
console.log( "Could not join cluster of " + hosts.toString() );
console.log( format( "Creating new cluster with name [%s]...", config.getClusterName() ) );
cluster.create( config.getClusterName() );
break;
}
console.log( "Could not join cluster, timed out. Retrying..." );
}
}
}
}
}
private String resolvePortOnlyHost( HostnamePort host )
{
try
{
return host.toString( InetAddress.getLocalHost().getHostAddress() );
}
catch ( UnknownHostException e )
{
throw new RuntimeException( e );
}
}
private class UnknownJoiningMemberWarning extends ClusterListener.Adapter
{
private final List<HostnamePort> initialHosts;
private UnknownJoiningMemberWarning( List<HostnamePort> initialHosts )
{
this.initialHosts = initialHosts;
}
@Override
public void joinedCluster( InstanceId member, URI uri )
{
for ( HostnamePort host : initialHosts )
{
if ( host.matches( uri ) )
{
return;
}
}
logger.info( "Member " + member + "("+uri+") joined cluster but was not part of initial hosts (" +
initialHosts + ")" );
}
@Override
public void leftCluster()
{
cluster.removeClusterListener( this );
}
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterJoin.java
|
3,984
|
{
@Override
public void run()
{
long now = System.currentTimeMillis();
server.getTimeouts().tick( now );
}
}, 0, 10, TimeUnit.MILLISECONDS );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java
|
3,985
|
public class TimeoutTrigger implements Lifecycle
{
private ScheduledExecutorService scheduler;
private ScheduledFuture<?> tickFuture;
@Override
public void init() throws Throwable
{
server.getTimeouts().tick( System.currentTimeMillis() );
}
@Override
public void start() throws Throwable
{
scheduler = Executors.newSingleThreadScheduledExecutor(
new DaemonThreadFactory( "timeout-clusterClient" ) );
tickFuture = scheduler.scheduleWithFixedDelay( new Runnable()
{
@Override
public void run()
{
long now = System.currentTimeMillis();
server.getTimeouts().tick( now );
}
}, 0, 10, TimeUnit.MILLISECONDS );
}
@Override
public void stop() throws Throwable
{
tickFuture.cancel( true );
scheduler.shutdownNow();
}
@Override
public void shutdown() throws Throwable
{
}
}
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java
|
3,986
|
{
@Override
public List<HostnamePort> getInitialHosts()
{
return config.getInitialHosts();
}
@Override
public String getClusterName()
{
return config.getClusterName();
}
@Override
public boolean isAllowedToCreateCluster()
{
return config.isAllowedToCreateCluster();
}
@Override
public long getClusterJoinTimeout()
{
return config.clusterJoinTimeout();
}
}, server, logging ) );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java
|
3,987
|
{
@Override
public ExecutorService newInstance()
{
return Executors.newSingleThreadExecutor( new NamedThreadFactory( "State machine" ) );
}
} );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java
|
3,988
|
{
@Override
public HostnamePort clusterServer()
{
return config.getAddress();
}
@Override
public int defaultPort()
{
return 5001;
}
@Override
public String name()
{
return config.name();
}
}, logging );
| false
|
enterprise_cluster_src_main_java_org_neo4j_cluster_client_ClusterClient.java
|
3,989
|
public class VerifyInstanceConfiguration
{
public final List<URI> members;
public final Map<String, InstanceId> roles;
public final Set<InstanceId> failed;
public VerifyInstanceConfiguration( List<URI> members, Map<String, InstanceId> roles, Set<InstanceId> failed )
{
this.members = members;
this.roles = roles;
this.failed = failed;
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_VerifyInstanceConfiguration.java
|
3,990
|
public class TestMessageSource
implements MessageSource, MessageProcessor
{
Iterable<MessageProcessor> listeners = Listeners.newListeners();
@Override
public void addMessageProcessor( MessageProcessor listener )
{
listeners = Listeners.addListener( listener, listeners );
}
@Override
public boolean process( Message<? extends MessageType> message )
{
for ( MessageProcessor listener : listeners )
{
if ( !listener.process( message ) )
{
return false;
}
}
return true;
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_TestProtocolServer.java
|
3,991
|
public class TestMessageSender implements MessageSender
{
List<Message> messages = new ArrayList<Message>();
@Override
public void process( List<Message<? extends MessageType>> messages )
{
for ( Message<? extends MessageType> message : messages )
{
process( message );
}
}
@Override
public boolean process( Message<? extends MessageType> message )
{
message.setHeader( Message.FROM, serverUri.toASCIIString() );
messages.add( message );
return true;
}
public List<Message> getMessages()
{
return messages;
}
public void sendMessages( List<Message> output )
{
output.addAll( messages );
messages.clear();
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_TestProtocolServer.java
|
3,992
|
public class TestProtocolServer
implements MessageProcessor
{
protected final TestMessageSource receiver;
protected final TestMessageSender sender;
protected ProtocolServer server;
private final DelayedDirectExecutor stateMachineExecutor;
private URI serverUri;
public TestProtocolServer( TimeoutStrategy timeoutStrategy, ProtocolServerFactory factory, URI serverUri,
InstanceId instanceId, AcceptorInstanceStore acceptorInstanceStore,
ElectionCredentialsProvider electionCredentialsProvider )
{
this.serverUri = serverUri;
this.receiver = new TestMessageSource();
this.sender = new TestMessageSender();
stateMachineExecutor = new DelayedDirectExecutor();
server = factory.newProtocolServer( instanceId, timeoutStrategy, receiver, sender, acceptorInstanceStore,
electionCredentialsProvider, stateMachineExecutor, new ObjectStreamFactory(), new ObjectStreamFactory() );
server.listeningAt( serverUri );
}
public ProtocolServer getServer()
{
return server;
}
public Timeouts getTimeouts()
{
return server.getTimeouts();
}
@Override
public boolean process( Message message )
{
return receiver.process( message );
}
public void sendMessages( List<Message> output )
{
sender.sendMessages( output );
}
public <T> T newClient( Class<T> clientProxyInterface )
{
return server.newClient( clientProxyInterface );
}
public TestProtocolServer addStateTransitionListener( StateTransitionListener listener )
{
server.addStateTransitionListener( listener );
return this;
}
public void tick( long now )
{
// Time passes - check timeouts
server.getTimeouts().tick( now );
stateMachineExecutor.drain();
}
@Override
public String toString()
{
return server.getServerId() + ": " + sender.getMessages().size() + server.toString();
}
public class TestMessageSender implements MessageSender
{
List<Message> messages = new ArrayList<Message>();
@Override
public void process( List<Message<? extends MessageType>> messages )
{
for ( Message<? extends MessageType> message : messages )
{
process( message );
}
}
@Override
public boolean process( Message<? extends MessageType> message )
{
message.setHeader( Message.FROM, serverUri.toASCIIString() );
messages.add( message );
return true;
}
public List<Message> getMessages()
{
return messages;
}
public void sendMessages( List<Message> output )
{
output.addAll( messages );
messages.clear();
}
}
public class TestMessageSource
implements MessageSource, MessageProcessor
{
Iterable<MessageProcessor> listeners = Listeners.newListeners();
@Override
public void addMessageProcessor( MessageProcessor listener )
{
listeners = Listeners.addListener( listener, listeners );
}
@Override
public boolean process( Message<? extends MessageType> message )
{
for ( MessageProcessor listener : listeners )
{
if ( !listener.process( message ) )
{
return false;
}
}
return true;
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_TestProtocolServer.java
|
3,993
|
private class ExpectingStateTransitionListener implements StateTransitionListener
{
private final Deque<String> transitionHistory = new LinkedList<String>();
private final Deque<ExpectedTransition> transitions;
private volatile boolean valid = true;
private final Object id;
private final boolean includeUnchanged;
ExpectingStateTransitionListener( Deque<ExpectedTransition> transitions, boolean includeUnchanged, Object id )
{
this.transitions = transitions;
this.includeUnchanged = includeUnchanged;
this.id = id;
}
@Override
public void stateTransition( StateTransition transition )
{
if (valid || transitions.isEmpty())
{
if ( !includeUnchanged && transition.getOldState().equals( transition.getNewState() ) )
return;
if ( transitions.isEmpty())
{
valid = false;
transitionHistory.add( "UNEXPECTED:" + transition );
} else
{
ExpectedTransition expected = transitions.pop();
if ( expected.matches( transition ) )
{
transitionHistory.add( expected.toString() );
} else
{
transitionHistory.add( "EXPECTED " + expected + ", GOT " + transition );
valid = false;
}
}
}
}
void transitionHistory(StringBuilder builder)
{
if (valid && transitions.isEmpty())
return;
builder.append( "\n=== Failed state transition expectations for " ).append( id );
for( String transition : transitionHistory )
{
builder.append( "\n " ).append( transition );
}
if (valid)
{
for( ExpectedTransition transition : transitions )
{
builder.append( "\n " ).append( "MISSING ").append( transition );
}
}
}
void printRemaining( StringBuilder builder )
{
builder.append( "== " ).append( id ).append( "\n" );
for( ExpectedTransition transition : transitions )
{
builder.append( transition.toString() ).append( "\n" );
}
}
public boolean isFulfilled()
{
return valid && transitions.isEmpty();
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateTransitionExpectations.java
|
3,994
|
private class ExpectedTransition
{
private final MessageType messageToGetHere;
private final State state;
ExpectedTransition( MessageType messageToGetHere, State state )
{
this.messageToGetHere = messageToGetHere;
this.state = state;
}
public boolean matches( StateTransition transition )
{
return state.equals( transition.getNewState() ) && messageToGetHere.equals( transition.getMessage().getMessageType() );
}
@Override
public String toString()
{
return messageToGetHere + "->" + state;
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateTransitionExpectations.java
|
3,995
|
public class ExpectationsBuilder
{
private final Deque<ExpectedTransition> transitions = new LinkedList<ExpectedTransition>();
private boolean includeUnchanged;
public ExpectationsBuilder expect( MessageType messageToGetHere, State state )
{
transitions.add( new ExpectedTransition( messageToGetHere, state ) );
return this;
}
public ExpectationsBuilder includeUnchangedStates()
{
this.includeUnchanged = true;
return this;
}
public StateTransitionListener build( Object id )
{
ExpectingStateTransitionListener listener = new ExpectingStateTransitionListener( new LinkedList<ExpectedTransition>( transitions ), includeUnchanged, id );
expectations.add( listener );
return listener;
}
public void assertNoMoreExpectations()
{
if ( !transitions.isEmpty() )
throw new IllegalStateException( "Unsatisfied transitions: " + transitions );
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateTransitionExpectations.java
|
3,996
|
{
@Override
public void stateTransition( StateTransition messagetypeStateTransition )
{
}
};
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateTransitionExpectations.java
|
3,997
|
public class StateTransitionExpectations<CONTEXT,MESSAGETYPE extends Enum<MESSAGETYPE> & MessageType>
{
public final StateTransitionListener NO_EXPECTATIONS = new StateTransitionListener()
{
@Override
public void stateTransition( StateTransition messagetypeStateTransition )
{
}
};
private final List<ExpectingStateTransitionListener> expectations = new ArrayList<ExpectingStateTransitionListener>();
public ExpectationsBuilder newExpectations( Enum<?>... initialAlternatingExpectedMessageAndState )
{
ExpectationsBuilder builder = new ExpectationsBuilder();
for ( int i = 0; i < initialAlternatingExpectedMessageAndState.length; i++ )
builder.expect( (MessageType) initialAlternatingExpectedMessageAndState[i++], (State) initialAlternatingExpectedMessageAndState[i] );
return builder;
}
public void verify()
{
StringBuilder builder = new StringBuilder( );
for ( ExpectingStateTransitionListener listener : expectations )
listener.transitionHistory( builder );
if (builder.length() != 0)
{
throw new IllegalStateException( "Failed expectations:"+builder.toString() );
}
}
public boolean areFulfilled()
{
for ( ExpectingStateTransitionListener listener : expectations )
if ( !listener.isFulfilled() )
return false;
return true;
}
public void printRemaining( Logger logger )
{
StringBuilder builder = new StringBuilder( );
builder.append( "=== Remaining state transitions ===\n" );
for( ExpectingStateTransitionListener expectation : expectations )
{
expectation.printRemaining(builder);
}
logger.info( builder.toString() );
}
public class ExpectationsBuilder
{
private final Deque<ExpectedTransition> transitions = new LinkedList<ExpectedTransition>();
private boolean includeUnchanged;
public ExpectationsBuilder expect( MessageType messageToGetHere, State state )
{
transitions.add( new ExpectedTransition( messageToGetHere, state ) );
return this;
}
public ExpectationsBuilder includeUnchangedStates()
{
this.includeUnchanged = true;
return this;
}
public StateTransitionListener build( Object id )
{
ExpectingStateTransitionListener listener = new ExpectingStateTransitionListener( new LinkedList<ExpectedTransition>( transitions ), includeUnchanged, id );
expectations.add( listener );
return listener;
}
public void assertNoMoreExpectations()
{
if ( !transitions.isEmpty() )
throw new IllegalStateException( "Unsatisfied transitions: " + transitions );
}
}
private class ExpectingStateTransitionListener implements StateTransitionListener
{
private final Deque<String> transitionHistory = new LinkedList<String>();
private final Deque<ExpectedTransition> transitions;
private volatile boolean valid = true;
private final Object id;
private final boolean includeUnchanged;
ExpectingStateTransitionListener( Deque<ExpectedTransition> transitions, boolean includeUnchanged, Object id )
{
this.transitions = transitions;
this.includeUnchanged = includeUnchanged;
this.id = id;
}
@Override
public void stateTransition( StateTransition transition )
{
if (valid || transitions.isEmpty())
{
if ( !includeUnchanged && transition.getOldState().equals( transition.getNewState() ) )
return;
if ( transitions.isEmpty())
{
valid = false;
transitionHistory.add( "UNEXPECTED:" + transition );
} else
{
ExpectedTransition expected = transitions.pop();
if ( expected.matches( transition ) )
{
transitionHistory.add( expected.toString() );
} else
{
transitionHistory.add( "EXPECTED " + expected + ", GOT " + transition );
valid = false;
}
}
}
}
void transitionHistory(StringBuilder builder)
{
if (valid && transitions.isEmpty())
return;
builder.append( "\n=== Failed state transition expectations for " ).append( id );
for( String transition : transitionHistory )
{
builder.append( "\n " ).append( transition );
}
if (valid)
{
for( ExpectedTransition transition : transitions )
{
builder.append( "\n " ).append( "MISSING ").append( transition );
}
}
}
void printRemaining( StringBuilder builder )
{
builder.append( "== " ).append( id ).append( "\n" );
for( ExpectedTransition transition : transitions )
{
builder.append( transition.toString() ).append( "\n" );
}
}
public boolean isFulfilled()
{
return valid && transitions.isEmpty();
}
}
private class ExpectedTransition
{
private final MessageType messageToGetHere;
private final State state;
ExpectedTransition( MessageType messageToGetHere, State state )
{
this.messageToGetHere = messageToGetHere;
this.state = state;
}
public boolean matches( StateTransition transition )
{
return state.equals( transition.getNewState() ) && messageToGetHere.equals( transition.getMessage().getMessageType() );
}
@Override
public String toString()
{
return messageToGetHere + "->" + state;
}
}
}
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateTransitionExpectations.java
|
3,998
|
test
{
@Override
public State<?, ?> handle( List context, Message<TestMessage> message, MessageHolder outgoing ) throws Throwable
{
context.add(message.getMessageType());
switch ( message.getMessageType() )
{
case message1:
{
outgoing.offer( internal( TestMessage.message2 ) );
outgoing.offer( internal( TestMessage.message3 ) );
break;
}
case message2:
{
outgoing.offer( internal( TestMessage.message4 ) );
outgoing.offer( internal( TestMessage.message5 ) );
break;
}
}
return this;
}
};
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateMachinesTest.java
|
3,999
|
{
@Override
public Object answer( InvocationOnMock invocation ) throws Throwable
{
Message message = (Message) invocation.getArguments()[0];
MessageHolder holder = (MessageHolder) invocation.getArguments()[1];
message.setHeader( Message.TO, "to://neverland" );
holder.offer( message );
return null;
}
} ).when( machine ).handle( any( Message.class ), any( MessageHolder.class ) );
| false
|
enterprise_cluster_src_test_java_org_neo4j_cluster_StateMachinesTest.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.