Unnamed: 0
int64 0
6.7k
| func
stringlengths 12
89.6k
| target
bool 2
classes | project
stringlengths 45
151
|
|---|---|---|---|
4,400
|
public abstract class ResourcePool<R>
{
public interface Monitor<R>
{
public void updatedCurrentPeakSize( int currentPeakSize );
public void updatedTargetSize( int targetSize );
public void created( R resource );
public void acquired( R resource );
public void disposed( R resource );
public class Adapter<R> implements Monitor<R>
{
@Override
public void updatedCurrentPeakSize( int currentPeakSize )
{
}
@Override
public void updatedTargetSize( int targetSize )
{
}
@Override
public void created( R resource )
{
}
@Override
public void acquired( R resource )
{
}
@Override
public void disposed( R resource )
{
}
}
}
public interface CheckStrategy
{
public boolean shouldCheck();
public class TimeoutCheckStrategy implements CheckStrategy
{
private final long interval;
private long lastCheckTime;
private final Clock clock;
public TimeoutCheckStrategy( long interval, Clock clock )
{
this.interval = interval;
this.lastCheckTime = clock.currentTimeMillis();
this.clock = clock;
}
@Override
public boolean shouldCheck()
{
long currentTime = clock.currentTimeMillis();
if ( currentTime > lastCheckTime + interval )
{
lastCheckTime = currentTime;
return true;
}
return false;
}
}
}
public static final int DEFAULT_CHECK_INTERVAL = 60 * 1000;
private final LinkedList<R> unused = new LinkedList<R>();
private final Map<Thread, R> current = new ConcurrentHashMap<Thread, R>();
private final Monitor monitor;
private final int minSize;
private final CheckStrategy checkStrategy;
// Guarded by nothing. Those are estimates, losing some values doesn't matter much
private int currentPeakSize;
private int targetSize;
protected ResourcePool( int minSize )
{
this( minSize, new CheckStrategy.TimeoutCheckStrategy( DEFAULT_CHECK_INTERVAL, SYSTEM_CLOCK ), new Monitor.Adapter() );
}
protected ResourcePool( int minSize, CheckStrategy strategy, Monitor monitor )
{
this.minSize = minSize;
this.currentPeakSize = 0;
this.targetSize = minSize;
this.checkStrategy = strategy;
this.monitor = monitor;
}
protected abstract R create();
protected void dispose( R resource )
{
}
protected int currentSize()
{
return current.size();
}
protected boolean isAlive( R resource )
{
return true;
}
public final R acquire()
{
Thread thread = Thread.currentThread();
R resource = current.get( thread );
if ( resource == null )
{
List<R> garbage = null;
synchronized ( unused )
{
for (; ; )
{
resource = unused.poll();
if ( resource == null )
{
break;
}
if ( isAlive( resource ) )
{
break;
}
if ( garbage == null )
{
garbage = new LinkedList<R>();
}
garbage.add( resource );
}
}
if ( resource == null )
{
resource = create();
monitor.created( resource );
}
current.put( thread, resource );
monitor.acquired( resource );
if ( garbage != null )
{
for ( R dead : garbage )
{
dispose( dead );
monitor.disposed( dead );
}
}
}
currentPeakSize = Math.max( currentPeakSize, current.size() );
if ( checkStrategy.shouldCheck() )
{
targetSize = Math.max( minSize, currentPeakSize );
monitor.updatedCurrentPeakSize( currentPeakSize );
currentPeakSize = 0;
monitor.updatedTargetSize( targetSize );
}
return resource;
}
public final void release()
{
Thread thread = Thread.currentThread();
R resource = current.remove( thread );
if ( resource != null )
{
boolean dead = false;
synchronized ( unused )
{
if ( unused.size() < targetSize )
{
unused.add( resource );
}
else
{
dead = true;
}
}
if ( dead )
{
dispose( resource );
monitor.disposed( resource );
}
}
}
public final void close( boolean force )
{
List<R> dead = new LinkedList<R>();
synchronized ( unused )
{
dead.addAll( unused );
unused.clear();
}
if ( force )
{
dead.addAll( current.values() );
}
for ( R resource : dead )
{
dispose( resource );
}
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ResourcePool.java
|
4,401
|
public static class Tx
{
private final String dataSourceName;
private final long txId;
public Tx( String dataSourceName, long txId )
{
this.dataSourceName = dataSourceName;
this.txId = txId;
}
public String getDataSourceName()
{
return dataSourceName;
}
public long getTxId()
{
return txId;
}
@Override
public String toString()
{
return dataSourceName + "/" + txId;
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_RequestContext.java
|
4,402
|
public final class RequestContext
{
public static class Tx
{
private final String dataSourceName;
private final long txId;
public Tx( String dataSourceName, long txId )
{
this.dataSourceName = dataSourceName;
this.txId = txId;
}
public String getDataSourceName()
{
return dataSourceName;
}
public long getTxId()
{
return txId;
}
@Override
public String toString()
{
return dataSourceName + "/" + txId;
}
}
public static Tx lastAppliedTx( String dataSourceName, long txId )
{
return new Tx( dataSourceName, txId );
}
private final int machineId;
private final Tx[] lastAppliedTransactions;
private final int eventIdentifier;
private final int hashCode;
private final long epoch;
private final int masterId;
private final long checksum;
public RequestContext( long epoch, int machineId, int eventIdentifier,
Tx[] lastAppliedTransactions, int masterId, long checksum )
{
this.epoch = epoch;
this.machineId = machineId;
this.eventIdentifier = eventIdentifier;
this.lastAppliedTransactions = lastAppliedTransactions;
this.masterId = masterId;
this.checksum = checksum;
long hash = epoch;
hash = ( 31 * hash ) ^ eventIdentifier;
hash = ( 31 * hash ) ^ machineId;
this.hashCode = (int) ( ( hash >>> 32 ) ^ hash );
}
public int machineId()
{
return machineId;
}
public Tx[] lastAppliedTransactions()
{
return lastAppliedTransactions;
}
public int getEventIdentifier()
{
return eventIdentifier;
}
public long getEpoch()
{
return epoch;
}
public int getMasterId()
{
return masterId;
}
public long getChecksum()
{
return checksum;
}
@Override
public String toString()
{
return "RequestContext[session: " + epoch + ", ID:" + machineId + ", eventIdentifier:" + eventIdentifier
+ ", " + Arrays.asList( lastAppliedTransactions ) + "]";
}
@Override
public boolean equals( Object obj )
{
if ( !( obj instanceof RequestContext ) )
{
return false;
}
RequestContext o = (RequestContext) obj;
return o.eventIdentifier == eventIdentifier && o.machineId == machineId && o.epoch == epoch;
}
@Override
public int hashCode()
{
return this.hashCode;
}
public static final RequestContext EMPTY = new RequestContext( -1, -1, -1, new Tx[0], -1, -1 );
public static RequestContext anonymous( Tx[] lastAppliedTransactions )
{
return new RequestContext( EMPTY.epoch, EMPTY.machineId, EMPTY.eventIdentifier,
lastAppliedTransactions, EMPTY.masterId, EMPTY.checksum );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_RequestContext.java
|
4,403
|
{
@Override
public Channel getChannel()
{
return RecordingChannel.this;
}
@Override
public boolean isDone()
{
return true;
}
@Override
public boolean isCancelled()
{
return false;
}
@Override
public boolean isSuccess()
{
return true;
}
@Override
public Throwable getCause()
{
return null;
}
@Override
public boolean cancel()
{
return false;
}
@Override
public boolean setSuccess()
{
return true;
}
@Override
public boolean setFailure( Throwable cause )
{
return false;
}
@Override
public boolean setProgress( long amount, long current, long total )
{
return false;
}
@Override
public void addListener( ChannelFutureListener listener )
{
try
{
listener.operationComplete( this );
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
@Override
public void removeListener( ChannelFutureListener listener )
{
}
@Override
public ChannelFuture rethrowIfFailed() throws Exception
{
return null;
}
@Override
public ChannelFuture sync() throws InterruptedException
{
return null;
}
@Override
public ChannelFuture syncUninterruptibly()
{
return null;
}
@Override
public ChannelFuture await() throws InterruptedException
{
return null;
}
@Override
public ChannelFuture awaitUninterruptibly()
{
return null;
}
@Override
public boolean await( long timeout, TimeUnit unit ) throws InterruptedException
{
return false;
}
@Override
public boolean await( long timeoutMillis ) throws InterruptedException
{
return false;
}
@Override
public boolean awaitUninterruptibly( long timeout, TimeUnit unit )
{
return false;
}
@Override
public boolean awaitUninterruptibly( long timeoutMillis )
{
return false;
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_RecordingChannel.java
|
4,404
|
{
@Override
public ChannelBuffer read() throws IOException, InterruptedException
{
return recievedMessages.poll();
}
@Override
public ChannelBuffer read( long timeout, TimeUnit unit ) throws IOException, InterruptedException
{
return read();
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_RecordingChannel.java
|
4,405
|
public class RecordingChannel implements Channel
{
private Queue<ChannelBuffer> recievedMessages = new LinkedList<ChannelBuffer>();
@Override
public ChannelFuture write( Object message )
{
if(message instanceof ChannelBuffer )
{
ChannelBuffer buffer = (ChannelBuffer)message;
recievedMessages.offer( buffer.duplicate() );
}
return immediateFuture;
}
@Override
public ChannelFuture write( Object message, SocketAddress remoteAddress )
{
write(message);
return immediateFuture;
}
@Override
public Integer getId()
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFactory getFactory()
{
throw new UnsupportedOperationException( );
}
@Override
public Channel getParent()
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelConfig getConfig()
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelPipeline getPipeline()
{
throw new UnsupportedOperationException( );
}
@Override
public boolean isOpen()
{
return true;
}
@Override
public boolean isBound()
{
return true;
}
@Override
public boolean isConnected()
{
return true;
}
@Override
public SocketAddress getLocalAddress()
{
throw new UnsupportedOperationException( );
}
@Override
public SocketAddress getRemoteAddress()
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFuture bind( SocketAddress localAddress )
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFuture connect( SocketAddress remoteAddress )
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFuture disconnect()
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFuture unbind()
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFuture close()
{
return null;
}
@Override
public ChannelFuture getCloseFuture()
{
throw new UnsupportedOperationException( );
}
@Override
public int getInterestOps()
{
throw new UnsupportedOperationException( );
}
@Override
public boolean isReadable()
{
return false;
}
@Override
public boolean isWritable()
{
return true;
}
@Override
public ChannelFuture setInterestOps( int interestOps )
{
throw new UnsupportedOperationException( );
}
@Override
public ChannelFuture setReadable( boolean readable )
{
throw new UnsupportedOperationException( );
}
@Override
public Object getAttachment()
{
throw new UnsupportedOperationException( );
}
@Override
public void setAttachment( Object attachment )
{
throw new UnsupportedOperationException( );
}
@Override
public int compareTo( Channel o )
{
return 0;
}
// This is due to a tight coupling of the netty pipeline and message deserialization, we can't deserialize without
// this pipeline item yet. We should refactor the serialization/deserialzation code appropriately such that it is
// not tied like this to components it should not be aware of.
public BlockingReadHandler<ChannelBuffer> asBlockingReadHandler()
{
return new BlockingReadHandler<ChannelBuffer>()
{
@Override
public ChannelBuffer read() throws IOException, InterruptedException
{
return recievedMessages.poll();
}
@Override
public ChannelBuffer read( long timeout, TimeUnit unit ) throws IOException, InterruptedException
{
return read();
}
};
}
private ChannelFuture immediateFuture = new ChannelFuture()
{
@Override
public Channel getChannel()
{
return RecordingChannel.this;
}
@Override
public boolean isDone()
{
return true;
}
@Override
public boolean isCancelled()
{
return false;
}
@Override
public boolean isSuccess()
{
return true;
}
@Override
public Throwable getCause()
{
return null;
}
@Override
public boolean cancel()
{
return false;
}
@Override
public boolean setSuccess()
{
return true;
}
@Override
public boolean setFailure( Throwable cause )
{
return false;
}
@Override
public boolean setProgress( long amount, long current, long total )
{
return false;
}
@Override
public void addListener( ChannelFutureListener listener )
{
try
{
listener.operationComplete( this );
}
catch ( Exception e )
{
throw new RuntimeException( e );
}
}
@Override
public void removeListener( ChannelFutureListener listener )
{
}
@Override
public ChannelFuture rethrowIfFailed() throws Exception
{
return null;
}
@Override
public ChannelFuture sync() throws InterruptedException
{
return null;
}
@Override
public ChannelFuture syncUninterruptibly()
{
return null;
}
@Override
public ChannelFuture await() throws InterruptedException
{
return null;
}
@Override
public ChannelFuture awaitUninterruptibly()
{
return null;
}
@Override
public boolean await( long timeout, TimeUnit unit ) throws InterruptedException
{
return false;
}
@Override
public boolean await( long timeoutMillis ) throws InterruptedException
{
return false;
}
@Override
public boolean awaitUninterruptibly( long timeout, TimeUnit unit )
{
return false;
}
@Override
public boolean awaitUninterruptibly( long timeoutMillis )
{
return false;
}
};
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_RecordingChannel.java
|
4,406
|
public static class FileStreamsDeserializer implements Deserializer<Void>
{
private final StoreWriter writer;
public FileStreamsDeserializer( StoreWriter writer )
{
this.writer = writer;
}
// NOTICE: this assumes a "smart" ChannelBuffer that continues to next chunk
public Void read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
int pathLength;
while ( 0 != ( pathLength = buffer.readUnsignedShort() ) )
{
String path = readString( buffer, pathLength );
boolean hasData = buffer.readByte() == 1;
writer.write( path, hasData ? new BlockLogReader( buffer ) : null, temporaryBuffer, hasData );
}
writer.done();
return null;
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,407
|
{
public void write( ChannelBuffer buffer ) throws IOException
{
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,408
|
{
public Void read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return null;
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,409
|
{
public Integer read( ChannelBuffer buffer, ByteBuffer temporaryBuffer ) throws IOException
{
return buffer.readInt();
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,410
|
{
public void write( Void responseObject, ChannelBuffer result ) throws IOException
{
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,411
|
{
@SuppressWarnings( "boxing" )
public void write( Long responseObject, ChannelBuffer result ) throws IOException
{
result.writeLong( responseObject );
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Protocol.java
|
4,412
|
private class StatefulMonitor implements ResourcePool.Monitor<Something>
{
public AtomicInteger currentPeakSize = new AtomicInteger(-1);
public AtomicInteger targetSize = new AtomicInteger( -1 );
public AtomicInteger created = new AtomicInteger( 0 );
public AtomicInteger acquired = new AtomicInteger( 0 );
public AtomicInteger disposed = new AtomicInteger( 0 );
@Override
public void updatedCurrentPeakSize( int currentPeakSize )
{
this.currentPeakSize.set( currentPeakSize );
}
@Override
public void updatedTargetSize( int targetSize )
{
this.targetSize.set( targetSize );
}
@Override
public void created( Something something )
{
this.created.incrementAndGet();
}
@Override
public void acquired( Something something )
{
this.acquired.incrementAndGet();
}
@Override
public void disposed( Something something )
{
this.disposed.incrementAndGet();
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ResourcePoolTest.java
|
4,413
|
{
public void release()
{
// What it says on the box
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ResourceReleaser.java
|
4,414
|
public class Response<T> implements AutoCloseable
{
private final T response;
private final StoreId storeId;
private final TransactionStream transactions;
private final ResourceReleaser releaser;
public Response( T response, StoreId storeId,
TransactionStream transactions, ResourceReleaser releaser )
{
this.storeId = storeId;
this.response = response;
this.transactions = transactions;
this.releaser = releaser;
}
public T response() throws ServerFailureException
{
return response;
}
public StoreId getStoreId()
{
return storeId;
}
public TransactionStream transactions()
{
return transactions;
}
@Override
public void close()
{
try
{
transactions.close();
}
finally
{
releaser.release();
}
}
public static final Response<Void> EMPTY = new Response<Void>( null, new StoreId( -1, -1, -1 ),
TransactionStream.EMPTY, ResourceReleaser.NO_OP );
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_Response.java
|
4,415
|
{
@Override
public ReadableByteChannel extract()
{
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
extract( buffer );
return buffer;
}
@Override
public void extract( LogBuffer buffer )
{
try
{
long extractedTxId = finalLogExtractor.extractNext( buffer );
if ( extractedTxId == -1 )
{
throw new RuntimeException(
"Transaction "
+ finalTxId
+ " is missing and can't be extracted from "
+ dataSource.getName()
+ ". Was about to extract "
+ startTxId + " to "
+ endTxId );
}
if ( extractedTxId != finalTxId )
{
throw new RuntimeException(
"Expected txId " + finalTxId
+ ", but was "
+ extractedTxId );
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,416
|
private class StubFileSystemAbstraction implements FileSystemAbstraction
{
private final List<File> files = new ArrayList<>();
@Override
public StoreChannel open( File fileName, String mode ) throws IOException
{
if ( files.contains( fileName ) )
{
return new AbstractStoreChannel() {
@Override
public void close() throws IOException
{
}
};
}
throw new FileNotFoundException( fileName.getPath() );
}
@Override
public OutputStream openAsOutputStream( File fileName, boolean append ) throws IOException
{
return null;
}
@Override
public InputStream openAsInputStream( File fileName ) throws IOException
{
return null;
}
@Override
public Reader openAsReader( File fileName, String encoding ) throws IOException
{
return null;
}
@Override
public Writer openAsWriter( File fileName, String encoding, boolean append ) throws IOException
{
return null;
}
@Override
public FileLock tryLock( File fileName, StoreChannel channel ) throws IOException
{
return null;
}
@Override
public StoreChannel create( File fileName ) throws IOException
{
files.add( fileName );
return null;
}
@Override
public boolean fileExists( File fileName )
{
return files.contains( fileName );
}
@Override
public boolean mkdir( File fileName )
{
return false;
}
@Override
public void mkdirs( File fileName ) throws IOException
{
}
@Override
public long getFileSize( File fileName )
{
return 0;
}
@Override
public boolean deleteFile( File fileName )
{
files.remove( fileName );
return false;
}
@Override
public void deleteRecursively( File directory ) throws IOException
{
}
@Override
public boolean renameFile( File from, File to ) throws IOException
{
return false;
}
@Override
public File[] listFiles( File directory )
{
return new File[0];
}
@Override
public boolean isDirectory( File file )
{
return false;
}
@Override
public void moveToDirectory( File file, File toDirectory ) throws IOException
{
}
@Override
public void copyFile( File from, File to ) throws IOException
{
}
@Override
public void copyRecursively( File fromDirectory, File toDirectory ) throws IOException
{
}
@Override
public <K extends ThirdPartyFileSystem> K getOrCreateThirdPartyFileSystem( Class<K> clazz, Function<Class<K>,
K> creator )
{
return null;
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ServerUtilTest.java
|
4,417
|
private static class FileResourceIterator implements ResourceIterator<File>
{
private final FileSystemAbstraction fs;
private final TargetDirectory.TestDirectory testDirectory;
private final Queue<String> files;
private String nextFilePath;
private final List<String> filesToDelete = new ArrayList<>();
public FileResourceIterator( FileSystemAbstraction fs, TargetDirectory.TestDirectory testDirectory,
String... files )
{
this.fs = fs;
this.testDirectory = testDirectory;
this.files = new ArrayBlockingQueue<>( files.length == 0 ? 1 : files.length, true, Arrays.asList( files ) );
}
@Override
public void close()
{
}
@Override
public boolean hasNext()
{
nextFilePath = files.poll();
return nextFilePath != null;
}
@Override
public File next()
{
File file = new File( String.format( "%s/%s", testDirectory.directory(), nextFilePath ) );
try
{
fs.create( file );
}
catch ( IOException e )
{
e.printStackTrace();
}
if ( filesToDelete.contains( nextFilePath ) )
{
fs.deleteFile( file );
}
return file;
}
@Override
public void remove()
{
}
public void deleteBeforeCopy( String filePath )
{
filesToDelete.add( filePath );
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ServerUtilTest.java
|
4,418
|
public class ServerUtilTest
{
@Rule
public TargetDirectory.TestDirectory testDirectory = TargetDirectory.testDirForTest( getClass() );
@Test
public void shouldIgnoreLogicalLogsWhenCopyingFilesForBackup() throws IOException
{
// given
final FileSystemAbstraction fs = new StubFileSystemAbstraction();
XaDataSource dataSource = mock( XaDataSource.class );
FileResourceIterator storeFiles = new FileResourceIterator( fs, testDirectory, "neostore.nodestore.db" );
FileResourceIterator logicalLogs = new FileResourceIterator( fs, testDirectory, "nioneo_logical.log.v0" );
when( dataSource.listStoreFiles() ).thenReturn( storeFiles );
when( dataSource.listLogicalLogs() ).thenReturn( logicalLogs );
when( dataSource.getBranchId() ).thenReturn( "branch".getBytes() );
when( dataSource.getName() ).thenReturn( "branch" );
XaContainer xaContainer = mock( XaContainer.class );
when( dataSource.getXaContainer() ).thenReturn( xaContainer );
XaLogicalLog xaLogicalLog = mock( XaLogicalLog.class );
when( xaContainer.getLogicalLog() ).thenReturn( xaLogicalLog );
XaResourceManager xaResourceManager = mock( XaResourceManager.class );
when( xaContainer.getResourceManager() ).thenReturn( xaResourceManager );
XaDataSourceManager dsManager = new XaDataSourceManager( StringLogger.DEV_NULL );
dsManager.registerDataSource( dataSource );
KernelPanicEventGenerator kernelPanicEventGenerator = mock( KernelPanicEventGenerator.class );
StoreWriter storeWriter = mock( StoreWriter.class );
// when
ServerUtil.rotateLogsAndStreamStoreFiles( testDirectory.absolutePath(), dsManager, kernelPanicEventGenerator,
StringLogger.DEV_NULL, false, storeWriter, fs, BackupMonitor.NONE );
// then
verify( storeWriter ).write( eq( "neostore.nodestore.db" ), any( ReadableByteChannel.class ),
any( ByteBuffer.class ), any( Boolean.class ) );
verify( storeWriter, never() ).write( eq( "nioneo_logical.log.v0" ), any( ReadableByteChannel.class ),
any( ByteBuffer.class ), any( Boolean.class ) );
}
@Test
public void shouldCopyLogicalLogFile() throws IOException
{
// given
final FileSystemAbstraction fs = new StubFileSystemAbstraction();
XaDataSource dataSource = mock( XaDataSource.class );
FileResourceIterator storeFiles = new FileResourceIterator( fs, testDirectory );
FileResourceIterator logicalLogs = new FileResourceIterator( fs, testDirectory, "nioneo_logical.log.v0" );
when( dataSource.listStoreFiles() ).thenReturn( storeFiles );
when( dataSource.listLogicalLogs() ).thenReturn( logicalLogs );
when( dataSource.getBranchId() ).thenReturn( "branch".getBytes() );
when( dataSource.getName() ).thenReturn( "branch" );
XaContainer xaContainer = mock( XaContainer.class );
when( dataSource.getXaContainer() ).thenReturn( xaContainer );
XaLogicalLog xaLogicalLog = mock( XaLogicalLog.class );
when( xaContainer.getLogicalLog() ).thenReturn( xaLogicalLog );
XaResourceManager xaResourceManager = mock( XaResourceManager.class );
when( xaContainer.getResourceManager() ).thenReturn( xaResourceManager );
XaDataSourceManager dsManager = new XaDataSourceManager( StringLogger.DEV_NULL );
dsManager.registerDataSource( dataSource );
KernelPanicEventGenerator kernelPanicEventGenerator = mock( KernelPanicEventGenerator.class );
StoreWriter storeWriter = mock( StoreWriter.class );
// when
ServerUtil.rotateLogsAndStreamStoreFiles( testDirectory.absolutePath(), dsManager, kernelPanicEventGenerator,
StringLogger.DEV_NULL, true, storeWriter, fs, BackupMonitor.NONE );
// then
verify( storeWriter ).write( eq( "nioneo_logical.log.v0" ), any( ReadableByteChannel.class ),
any( ByteBuffer.class ), any( Boolean.class ) );
}
@Test
public void shouldNotThrowFileNotFoundExceptionWhenTryingToCopyAMissingLogicalLogFile() throws IOException
{
// given
final FileSystemAbstraction fs = new StubFileSystemAbstraction();
XaDataSource dataSource = mock( XaDataSource.class );
FileResourceIterator storeFiles = new FileResourceIterator( fs, testDirectory, "neostore.nodestore.db" );
FileResourceIterator logicalLogs = new FileResourceIterator( fs, testDirectory, "nioneo_logical.log.v0" );
logicalLogs.deleteBeforeCopy( "nioneo_logical.log.v0" );
when( dataSource.listStoreFiles() ).thenReturn( storeFiles );
when( dataSource.listLogicalLogs() ).thenReturn( logicalLogs );
when( dataSource.getBranchId() ).thenReturn( "branch".getBytes() );
when( dataSource.getName() ).thenReturn( "branch" );
XaContainer xaContainer = mock( XaContainer.class );
when( dataSource.getXaContainer() ).thenReturn( xaContainer );
XaResourceManager xaResourceManager = mock( XaResourceManager.class );
when( xaContainer.getResourceManager() ).thenReturn( xaResourceManager );
XaDataSourceManager dsManager = new XaDataSourceManager( StringLogger.DEV_NULL );
dsManager.registerDataSource( dataSource );
KernelPanicEventGenerator kernelPanicEventGenerator = mock( KernelPanicEventGenerator.class );
StoreWriter storeWriter = mock( StoreWriter.class );
// when
ServerUtil.rotateLogsAndStreamStoreFiles( testDirectory.absolutePath(), dsManager, kernelPanicEventGenerator,
StringLogger.DEV_NULL, true, storeWriter, fs, BackupMonitor.NONE );
// then
verify( storeWriter ).write( eq( "neostore.nodestore.db" ), any( ReadableByteChannel.class ),
any( ByteBuffer.class ), any( Boolean.class ) );
}
@Test
public void shouldThrowFileNotFoundExceptionWhenTryingToCopyAStoreFileWhichDoesNotExist() throws IOException
{
// given
final FileSystemAbstraction fs = new StubFileSystemAbstraction();
XaDataSource dataSource = mock( XaDataSource.class );
FileResourceIterator storeFiles = new FileResourceIterator( fs, testDirectory, "neostore.nodestore.db" );
storeFiles.deleteBeforeCopy( "neostore.nodestore.db" );
FileResourceIterator logicalLogs = new FileResourceIterator( fs, testDirectory );
when( dataSource.listStoreFiles() ).thenReturn( storeFiles );
when( dataSource.listLogicalLogs() ).thenReturn( logicalLogs );
when( dataSource.getBranchId() ).thenReturn( "branch".getBytes() );
when( dataSource.getName() ).thenReturn( "branch" );
XaContainer xaContainer = mock( XaContainer.class );
when( dataSource.getXaContainer() ).thenReturn( xaContainer );
XaResourceManager xaResourceManager = mock( XaResourceManager.class );
when( xaContainer.getResourceManager() ).thenReturn( xaResourceManager );
XaDataSourceManager dsManager = new XaDataSourceManager( StringLogger.DEV_NULL );
dsManager.registerDataSource( dataSource );
KernelPanicEventGenerator kernelPanicEventGenerator = mock( KernelPanicEventGenerator.class );
StoreWriter storeWriter = mock( StoreWriter.class );
// when
try
{
ServerUtil.rotateLogsAndStreamStoreFiles( testDirectory.absolutePath(), dsManager,
kernelPanicEventGenerator,
StringLogger.DEV_NULL, true, storeWriter, fs, BackupMonitor.NONE );
fail( "should have thrown exception" );
}
catch ( ServerFailureException e )
{
// then
assertEquals( java.io.FileNotFoundException.class, e.getCause().getClass() );
}
}
private static class FileResourceIterator implements ResourceIterator<File>
{
private final FileSystemAbstraction fs;
private final TargetDirectory.TestDirectory testDirectory;
private final Queue<String> files;
private String nextFilePath;
private final List<String> filesToDelete = new ArrayList<>();
public FileResourceIterator( FileSystemAbstraction fs, TargetDirectory.TestDirectory testDirectory,
String... files )
{
this.fs = fs;
this.testDirectory = testDirectory;
this.files = new ArrayBlockingQueue<>( files.length == 0 ? 1 : files.length, true, Arrays.asList( files ) );
}
@Override
public void close()
{
}
@Override
public boolean hasNext()
{
nextFilePath = files.poll();
return nextFilePath != null;
}
@Override
public File next()
{
File file = new File( String.format( "%s/%s", testDirectory.directory(), nextFilePath ) );
try
{
fs.create( file );
}
catch ( IOException e )
{
e.printStackTrace();
}
if ( filesToDelete.contains( nextFilePath ) )
{
fs.deleteFile( file );
}
return file;
}
@Override
public void remove()
{
}
public void deleteBeforeCopy( String filePath )
{
filesToDelete.add( filePath );
}
}
private class StubFileSystemAbstraction implements FileSystemAbstraction
{
private final List<File> files = new ArrayList<>();
@Override
public StoreChannel open( File fileName, String mode ) throws IOException
{
if ( files.contains( fileName ) )
{
return new AbstractStoreChannel() {
@Override
public void close() throws IOException
{
}
};
}
throw new FileNotFoundException( fileName.getPath() );
}
@Override
public OutputStream openAsOutputStream( File fileName, boolean append ) throws IOException
{
return null;
}
@Override
public InputStream openAsInputStream( File fileName ) throws IOException
{
return null;
}
@Override
public Reader openAsReader( File fileName, String encoding ) throws IOException
{
return null;
}
@Override
public Writer openAsWriter( File fileName, String encoding, boolean append ) throws IOException
{
return null;
}
@Override
public FileLock tryLock( File fileName, StoreChannel channel ) throws IOException
{
return null;
}
@Override
public StoreChannel create( File fileName ) throws IOException
{
files.add( fileName );
return null;
}
@Override
public boolean fileExists( File fileName )
{
return files.contains( fileName );
}
@Override
public boolean mkdir( File fileName )
{
return false;
}
@Override
public void mkdirs( File fileName ) throws IOException
{
}
@Override
public long getFileSize( File fileName )
{
return 0;
}
@Override
public boolean deleteFile( File fileName )
{
files.remove( fileName );
return false;
}
@Override
public void deleteRecursively( File directory ) throws IOException
{
}
@Override
public boolean renameFile( File from, File to ) throws IOException
{
return false;
}
@Override
public File[] listFiles( File directory )
{
return new File[0];
}
@Override
public boolean isDirectory( File file )
{
return false;
}
@Override
public void moveToDirectory( File file, File toDirectory ) throws IOException
{
}
@Override
public void copyFile( File from, File to ) throws IOException
{
}
@Override
public void copyRecursively( File fromDirectory, File toDirectory ) throws IOException
{
}
@Override
public <K extends ThirdPartyFileSystem> K getOrCreateThirdPartyFileSystem( Class<K> clazz, Function<Class<K>,
K> creator )
{
return null;
}
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ServerUtilTest.java
|
4,419
|
{
@Override
public boolean accept( URI item )
{
return item.getScheme().equals( scheme );
}
}, uris ) );
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,420
|
{
private final Set<String> visitedDataSources = new HashSet<String>();
@Override
public void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource )
{
if ( visitedDataSources.add( tx.first() ) )
{
dataSource.setLastCommittedTxId( tx.second() - 1 );
}
}
@Override
public void done()
{ // Do nothing
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,421
|
{
@Override
public void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource )
{ // Do nothing
}
@Override
public void done()
{ // Do nothing
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,422
|
{
@Override
public boolean accept( Long item )
{
return true;
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,423
|
{
private final Iterator<Triplet<String, Long, TxExtractor>> iterator = stream.iterator();
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
return iterator.hasNext() ? iterator.next() : null;
}
@Override
public void close()
{
for ( LogExtractor extractor : logExtractors )
{
extractor.close();
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,424
|
public class ServerUtil
{
private static File getBaseDir( String storeDir )
{
File file = new File( storeDir );
try
{
return file.getCanonicalFile().getAbsoluteFile();
}
catch ( IOException e )
{
return file.getAbsoluteFile();
}
}
/**
* Given a directory and a path under it, return filename of the path
* relative to the directory.
*
* @param baseDir The base directory, containing the storeFile
* @param storeFile The store file path, must be contained under
* <code>baseDir</code>
* @return The relative path of <code>storeFile</code> to
* <code>baseDir</code>
* @throws IOException As per {@link File#getCanonicalPath()}
*/
private static String relativePath( File baseDir, File storeFile )
throws IOException
{
String prefix = baseDir.getCanonicalPath();
String path = storeFile.getCanonicalPath();
if ( !path.startsWith( prefix ) )
{
throw new FileNotFoundException();
}
path = path.substring( prefix.length() );
if ( path.startsWith( File.separator ) )
{
return path.substring( 1 );
}
return path;
}
public static Tx[] rotateLogs( XaDataSourceManager dsManager, KernelPanicEventGenerator kernelPanicEventGenerator, StringLogger logger )
{
Collection<XaDataSource> sources = dsManager.getAllRegisteredDataSources();
Tx[] appliedTransactions = new Tx[sources.size()];
int i = 0;
for ( XaDataSource ds : sources )
{
try
{
appliedTransactions[i++] = RequestContext.lastAppliedTx( ds.getName(), ds.rotateLogicalLog() );
}
catch ( IOException e )
{
// TODO: what about error message?
logger.logMessage( "Unable to rotate log for " + ds, e );
// TODO If we do it in rotate() the transaction semantics for such a failure will change
// slightly and that has got to be verified somehow. But to have it in there feels much better.
kernelPanicEventGenerator.generateEvent( ErrorState.TX_MANAGER_NOT_OK, new Throwable() );
throw new ServerFailureException( e );
}
}
return appliedTransactions;
}
public static RequestContext rotateLogsAndStreamStoreFiles( String storeDir,
XaDataSourceManager dsManager,
KernelPanicEventGenerator kernelPanicEventGenerator,
StringLogger logger,
boolean includeLogicalLogs,
StoreWriter writer,
FileSystemAbstraction fs,
BackupMonitor backupMonitor )
{
File baseDir = getBaseDir( storeDir );
RequestContext context = RequestContext.anonymous( rotateLogs( dsManager, kernelPanicEventGenerator, logger ) );
backupMonitor.finishedRotatingLogicalLogs();
ByteBuffer temporaryBuffer = ByteBuffer.allocateDirect( 1024 * 1024 );
for ( XaDataSource ds : dsManager.getAllRegisteredDataSources() )
{
copyStoreFiles( writer, fs, baseDir, temporaryBuffer, ds, backupMonitor );
if ( includeLogicalLogs )
{
copyLogicalLogs( writer, fs, baseDir, temporaryBuffer, ds, backupMonitor );
}
}
return context;
}
private static void copyLogicalLogs( StoreWriter writer, FileSystemAbstraction fs, File baseDir,
ByteBuffer temporaryBuffer, XaDataSource ds, BackupMonitor backupMonitor )
{
try ( ResourceIterator<File> files = ds.listLogicalLogs() )
{
while ( files.hasNext() )
{
File storeFile = files.next();
try
{
copyFile( writer, fs, baseDir, temporaryBuffer, storeFile, backupMonitor );
}
catch ( FileNotFoundException ignored )
{
// swallow this - log pruning may have happened since we got list of files to copy
}
}
}
catch ( IOException e )
{
throw new ServerFailureException( e );
}
}
private static void copyStoreFiles( StoreWriter writer, FileSystemAbstraction fs, File baseDir,
ByteBuffer temporaryBuffer, XaDataSource ds, BackupMonitor backupMonitor )
{
try ( ResourceIterator<File> files = ds.listStoreFiles() )
{
while ( files.hasNext() )
{
File storeFile = files.next();
copyFile( writer, fs, baseDir, temporaryBuffer, storeFile, backupMonitor );
}
}
catch ( IOException e )
{
throw new ServerFailureException( e );
}
}
private static void copyFile( StoreWriter writer, FileSystemAbstraction fs, File baseDir,
ByteBuffer temporaryBuffer, File storeFile, BackupMonitor backupMonitor ) throws IOException
{
backupMonitor.streamingFile( storeFile );
try ( StoreChannel fileChannel = fs.open( storeFile, "r" ) )
{
writer.write( relativePath( baseDir, storeFile ), fileChannel, temporaryBuffer,
storeFile.length() > 0 );
}
backupMonitor.streamedFile( storeFile );
}
/**
* For a given {@link XaDataSource} it extracts the transaction stream from
* startTxId up to endTxId (inclusive) in the provided {@link List} and
* returns the {@link LogExtractor} used to create the stream.
*
* @param dataSource The {@link XaDataSource} from which to extract the
* transactions
* @param startTxId The first tx id in the stream
* @param endTxId The last tx id in the stream
* @param stream A list to contain the transaction stream - can already
* contain transactions from other data sources.
* @return The {@link LogExtractor} used to create the transaction stream.
*/
private static LogExtractor getTransactionStreamForDatasource(
final XaDataSource dataSource, final long startTxId,
final long endTxId,
final List<Triplet<String, Long, TxExtractor>> stream,
Predicate<Long> filter )
{
LogExtractor logExtractor = null;
try
{
final long serverLastTx = dataSource.getLastCommittedTxId();
if ( serverLastTx < endTxId )
{
throw new RuntimeException(
"Was requested to extract transaction ids " + startTxId
+ " to " + endTxId + " from data source "
+ dataSource.getName()
+ " but largest transaction id in server is "
+ serverLastTx );
}
try
{
// TODO check here for startTxId >= endTxId and exit early
logExtractor = dataSource.getLogExtractor( startTxId, endTxId );
}
catch ( IOException ioe )
{
throw new RuntimeException( ioe );
}
final LogExtractor finalLogExtractor = logExtractor;
for ( long txId = startTxId; txId <= endTxId; txId++ )
{
if ( filter.accept( txId ) )
{
final long finalTxId = txId;
TxExtractor extractor = new TxExtractor()
{
@Override
public ReadableByteChannel extract()
{
InMemoryLogBuffer buffer = new InMemoryLogBuffer();
extract( buffer );
return buffer;
}
@Override
public void extract( LogBuffer buffer )
{
try
{
long extractedTxId = finalLogExtractor.extractNext( buffer );
if ( extractedTxId == -1 )
{
throw new RuntimeException(
"Transaction "
+ finalTxId
+ " is missing and can't be extracted from "
+ dataSource.getName()
+ ". Was about to extract "
+ startTxId + " to "
+ endTxId );
}
if ( extractedTxId != finalTxId )
{
throw new RuntimeException(
"Expected txId " + finalTxId
+ ", but was "
+ extractedTxId );
}
}
catch ( IOException e )
{
throw new RuntimeException( e );
}
}
};
stream.add( Triplet.of( dataSource.getName(), txId,
extractor ) );
}
}
return logExtractor;
}
catch ( Throwable t )
{
/*
* If there's an error in here then close the log extractors,
* otherwise if we're successful the TransactionStream will close it.
*/
if ( logExtractor != null )
{
logExtractor.close();
}
throw Exceptions.launderedException( t );
}
}
/**
* After having created the response for a slave, this method compares its
* context against the local (server's) context and creates a transaction
* stream containing all the transactions the slave does not currently
* have. This way every response returned acts as an update for the slave.
*
* @param <T> The type of the response
* @param context The slave context
* @param response The response being packed
* @param txFilter A {@link Predicate} to apply on each txid, selecting only
* those that evaluate to true
* @return The response, packed with the latest transactions
*/
// TODO update javadoc of ServerUtil.packResponse
public static <T> Response<T> packResponse( StoreId storeId, XaDataSourceManager dsManager,
RequestContext context, T response, Predicate<Long> txFilter )
{
List<Triplet<String, Long, TxExtractor>> stream = new ArrayList<Triplet<String, Long, TxExtractor>>();
Set<String> resourceNames = new HashSet<>();
final List<LogExtractor> logExtractors = new ArrayList<LogExtractor>();
try
{
for ( Tx txEntry : context.lastAppliedTransactions() )
{
String resourceName = txEntry.getDataSourceName();
final XaDataSource dataSource = dsManager.getXaDataSource( resourceName );
if ( dataSource == null )
{
throw new RuntimeException( "No data source '" + resourceName + "' found" );
}
resourceNames.add( resourceName );
final long serverLastTx = dataSource.getLastCommittedTxId();
if ( txEntry.getTxId() >= serverLastTx )
{
continue;
}
LogExtractor logExtractor = getTransactionStreamForDatasource(
dataSource, txEntry.getTxId() + 1, serverLastTx, stream,
txFilter );
logExtractors.add( logExtractor );
}
return new Response<>( response, storeId, createTransactionStream( resourceNames,
stream, logExtractors ), ResourceReleaser.NO_OP );
}
catch ( Throwable t )
{ // If there's an error in here then close the log extractors, otherwise if we're
// successful the TransactionStream will close it.
for ( LogExtractor extractor : logExtractors )
{
extractor.close();
}
throw Exceptions.launderedException( t );
}
}
/**
* Given a data source name, a start and an end tx, this method extracts
* these transactions (inclusive) in a transaction stream and encapsulates
* them in a {@link Response} object, ready to be returned to the slave.
*
* @param graphDb The graph database to use
* @param dataSourceName The name of the data source to extract transactions
* from
* @param startTx The first tx in the returned stream
* @param endTx The last tx in the returned stream
* @return A {@link Response} object containing a transaction stream with
* the requested transactions from the specified data source.
*/
public static Response<Void> getTransactions( GraphDatabaseAPI graphDb,
String dataSourceName, long startTx, long endTx )
{
List<Triplet<String, Long, TxExtractor>> stream = new ArrayList<>();
XaDataSourceManager dsManager = dsManager( graphDb );
final XaDataSource dataSource = dsManager.getXaDataSource( dataSourceName );
if ( dataSource == null )
{
throw new RuntimeException( "No data source '" + dataSourceName
+ "' found" );
}
List<LogExtractor> extractors = startTx < endTx ? Collections.singletonList(
getTransactionStreamForDatasource( dataSource, startTx, endTx, stream, ServerUtil.ALL ) ) :
Collections.<LogExtractor>emptyList();
return new Response<>( null, graphDb.storeId(), createTransactionStream(
Collections.singletonList( dataSourceName ), stream,
extractors ), ResourceReleaser.NO_OP );
}
private static XaDataSourceManager dsManager( GraphDatabaseAPI graphDb )
{
return graphDb.getDependencyResolver().resolveDependency( XaDataSourceManager.class );
}
private static TransactionStream createTransactionStream( Collection<String> resourceNames,
final List<Triplet<String, Long, TxExtractor>> stream,
final List<LogExtractor> logExtractors )
{
return new TransactionStream( resourceNames.toArray( new String[resourceNames.size()] ) )
{
private final Iterator<Triplet<String, Long, TxExtractor>> iterator = stream.iterator();
@Override
protected Triplet<String, Long, TxExtractor> fetchNextOrNull()
{
return iterator.hasNext() ? iterator.next() : null;
}
@Override
public void close()
{
for ( LogExtractor extractor : logExtractors )
{
extractor.close();
}
}
};
}
public static <T> Response<T> packResponseWithoutTransactionStream( StoreId storeId, T response )
{
return new Response<T>( response, storeId, TransactionStream.EMPTY,
ResourceReleaser.NO_OP );
}
public static final Predicate<Long> ALL = new Predicate<Long>()
{
@Override
public boolean accept( Long item )
{
return true;
}
};
public static <T> void applyReceivedTransactions( Response<T> response, XaDataSourceManager xaDsm,
TxHandler txHandler ) throws IOException
{
try
{
for ( Triplet<String, Long, TxExtractor> tx : IteratorUtil.asIterable( response.transactions() ) )
{
String resourceName = tx.first();
XaDataSource dataSource = xaDsm.getXaDataSource( resourceName );
txHandler.accept( tx, dataSource );
ReadableByteChannel txStream = tx.third().extract();
try
{
dataSource.applyCommittedTransaction( tx.second(), txStream );
}
finally
{
txStream.close();
}
}
txHandler.done();
}
finally
{
response.close();
}
}
public interface TxHandler
{
void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource );
void done();
}
public static final TxHandler NO_ACTION = new TxHandler()
{
@Override
public void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource )
{ // Do nothing
}
@Override
public void done()
{ // Do nothing
}
};
public static TxHandler txHandlerForFullCopy()
{
return new TxHandler()
{
private final Set<String> visitedDataSources = new HashSet<String>();
@Override
public void accept( Triplet<String, Long, TxExtractor> tx, XaDataSource dataSource )
{
if ( visitedDataSources.add( tx.first() ) )
{
dataSource.setLastCommittedTxId( tx.second() - 1 );
}
}
@Override
public void done()
{ // Do nothing
}
};
}
public static URI getUriForScheme( final String scheme, Iterable<URI> uris )
{
return first( filter( new Predicate<URI>()
{
@Override
public boolean accept( URI item )
{
return item.getScheme().equals( scheme );
}
}, uris ) );
}
/**
* Figure out the host string of a given socket address, similar to the Java 7 InetSocketAddress.getHostString().
*
* Calls to this should be replace once Neo4j is Java 7 only.
*
* @param socketAddress
* @return
*/
public static String getHostString(InetSocketAddress socketAddress )
{
if (socketAddress.isUnresolved())
{
return socketAddress.getHostName();
}
else
{
return socketAddress.getAddress().getHostAddress();
}
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerUtil.java
|
4,425
|
public abstract class Server<T, R> extends SimpleChannelHandler implements ChannelPipelineFactory, Lifecycle
{
private final ByteCounterMonitor byteCounterMonitor;
private final RequestMonitor requestMonitor;
private InetSocketAddress socketAddress;
private static final String INADDR_ANY = "0.0.0.0";
private final Clock clock;
public interface Configuration
{
long getOldChannelThreshold();
int getMaxConcurrentTransactions();
int getChunkSize();
HostnamePort getServerAddress();
}
static final byte INTERNAL_PROTOCOL_VERSION = 2;
// It's ok if there are more transactions, since these worker threads doesn't
// do any actual work themselves, but spawn off other worker threads doing the
// actual work. So this is more like a core Netty I/O pool worker size.
public final static int DEFAULT_MAX_NUMBER_OF_CONCURRENT_TRANSACTIONS = 200;
private ServerBootstrap bootstrap;
private final T requestTarget;
private ChannelGroup channelGroup;
private final Map<Channel, Pair<RequestContext, AtomicLong /*time last heard of*/>> connectedSlaveChannels =
new ConcurrentHashMap<Channel, Pair<RequestContext, AtomicLong>>();
private ExecutorService executor;
private ExecutorService workerExecutor;
private ExecutorService targetCallExecutor;
private final StringLogger msgLog;
private final Map<Channel, PartialRequest> partialRequests =
new ConcurrentHashMap<Channel, PartialRequest>();
private final Configuration config;
private final int frameLength;
private volatile boolean shuttingDown;
// Executor for channels that we know should be finished, but can't due to being
// active at the moment.
private ExecutorService unfinishedTransactionExecutor;
// This is because there's a bug in Netty causing some channelClosed/channelDisconnected
// events to not be sent. This is merely a safety net to catch the remained of the closed
// channels that netty doesn't tell us about.
private ScheduledExecutorService silentChannelExecutor;
private final byte applicationProtocolVersion;
private long oldChannelThresholdMillis;
private final TxChecksumVerifier txVerifier;
private int chunkSize;
public Server( T requestTarget, Configuration config, Logging logging, int frameLength,
byte applicationProtocolVersion, TxChecksumVerifier txVerifier, Clock clock, Monitors monitors )
{
this.requestTarget = requestTarget;
this.config = config;
this.frameLength = frameLength;
this.applicationProtocolVersion = applicationProtocolVersion;
this.msgLog = logging.getMessagesLog( getClass() );
this.txVerifier = txVerifier;
this.clock = clock;
this.byteCounterMonitor = monitors.newMonitor( ByteCounterMonitor.class, getClass() );
this.requestMonitor = monitors.newMonitor( RequestMonitor.class, getClass() );
}
@Override
public void init() throws Throwable
{
}
@Override
public void start() throws Throwable
{
this.oldChannelThresholdMillis = config.getOldChannelThreshold();
chunkSize = config.getChunkSize();
assertChunkSizeIsWithinFrameSize( chunkSize, frameLength );
executor = Executors.newCachedThreadPool( new NamedThreadFactory( "Server receiving" ) );
workerExecutor = Executors.newCachedThreadPool( new NamedThreadFactory( "Server receiving" ) );
targetCallExecutor = Executors.newCachedThreadPool( new NamedThreadFactory( getClass().getSimpleName() + ":"
+ config.getServerAddress().getPort() ) );
unfinishedTransactionExecutor = Executors.newScheduledThreadPool( 2, new NamedThreadFactory( "Unfinished " +
"transactions" ) );
silentChannelExecutor = Executors.newSingleThreadScheduledExecutor( new NamedThreadFactory( "Silent channel " +
"reaper" ) );
silentChannelExecutor.scheduleWithFixedDelay( silentChannelFinisher(), 5, 5, TimeUnit.SECONDS );
bootstrap = new ServerBootstrap( new NioServerSocketChannelFactory(
executor, workerExecutor, config.getMaxConcurrentTransactions() ) );
bootstrap.setPipelineFactory( this );
Channel channel = null;
socketAddress = null;
// Try binding to any port in the port range
int[] ports = config.getServerAddress().getPorts();
ChannelException ex = null;
for ( int port = ports[0]; port <= ports[1]; port++ )
{
if ( config.getServerAddress().getHost() == null || config.getServerAddress().getHost().equals( INADDR_ANY ))
{
socketAddress = new InetSocketAddress( port );
}
else
{
socketAddress = new InetSocketAddress( config.getServerAddress().getHost(), port );
}
try
{
channel = bootstrap.bind( socketAddress );
ex = null;
break;
}
catch ( ChannelException e )
{
ex = e;
}
}
if ( ex != null )
{
msgLog.logMessage( "Failed to bind server to " + socketAddress, ex );
executor.shutdown();
workerExecutor.shutdown();
throw new IOException( ex );
}
channelGroup = new DefaultChannelGroup();
channelGroup.add( channel );
msgLog.logMessage( getClass().getSimpleName() + " communication server started and bound to " + socketAddress );
}
@Override
public void stop() throws Throwable
{
// Close all open connections
shuttingDown = true;
targetCallExecutor.shutdown();
targetCallExecutor.awaitTermination( 10, TimeUnit.SECONDS );
unfinishedTransactionExecutor.shutdown();
unfinishedTransactionExecutor.awaitTermination( 10, TimeUnit.SECONDS );
silentChannelExecutor.shutdown();
silentChannelExecutor.awaitTermination( 10, TimeUnit.SECONDS );
channelGroup.close().awaitUninterruptibly();
bootstrap.releaseExternalResources();
}
@Override
public void shutdown() throws Throwable
{
}
public InetSocketAddress getSocketAddress()
{
return socketAddress;
}
private Runnable silentChannelFinisher()
{
// This poller is here because sometimes Netty doesn't tell us when channels are
// closed or disconnected. Most of the time it does, but this acts as a safety
// net for those we don't get notifications for. When the bug is fixed remove this.
return new Runnable()
{
@Override
public void run()
{
Map<Channel, Boolean/*starting to get old?*/> channels = new HashMap<Channel, Boolean>();
synchronized ( connectedSlaveChannels )
{
for ( Map.Entry<Channel, Pair<RequestContext, AtomicLong>> channel : connectedSlaveChannels
.entrySet() )
{ // Has this channel been silent for a while?
long age = System.currentTimeMillis() - channel.getValue().other().get();
if ( age > oldChannelThresholdMillis )
{
msgLog.logMessage( "Found a silent channel " + channel + ", " + age );
channels.put( channel.getKey(), Boolean.TRUE );
}
else if ( age > oldChannelThresholdMillis / 2 )
{ // Then add it to a list to check
channels.put( channel.getKey(), Boolean.FALSE );
}
}
}
for ( Map.Entry<Channel, Boolean> channel : channels.entrySet() )
{
if ( channel.getValue() || !channel.getKey().isOpen() || !channel.getKey().isConnected() ||
!channel.getKey().isBound() )
{
tryToFinishOffChannel( channel.getKey() );
}
}
}
};
}
/**
* Only exposed so that tests can control it. It's not configurable really.
*/
protected byte getInternalProtocolVersion()
{
return INTERNAL_PROTOCOL_VERSION;
}
@Override
public ChannelPipeline getPipeline() throws Exception
{
ChannelPipeline pipeline = Channels.pipeline();
addLengthFieldPipes( pipeline, frameLength );
pipeline.addLast( "serverHandler", this );
return pipeline;
}
@Override
public void channelOpen( ChannelHandlerContext ctx, ChannelStateEvent e ) throws Exception
{
channelGroup.add( e.getChannel() );
}
@Override
public void messageReceived( ChannelHandlerContext ctx, MessageEvent event )
throws Exception
{
try
{
ChannelBuffer message = (ChannelBuffer) event.getMessage();
handleRequest( message, event.getChannel() );
}
catch ( Throwable e )
{
msgLog.error( "Error handling request", e );
// Attempt to reply to the client
ChunkingChannelBuffer buffer = newChunkingBuffer( event.getChannel() );
buffer.clear( /* failure = */true );
writeFailureResponse( e, buffer );
ctx.getChannel().close();
tryToFinishOffChannel( ctx.getChannel() );
throw Exceptions.launderedException( e );
}
}
@Override
public void writeComplete( ChannelHandlerContext ctx, WriteCompletionEvent e ) throws Exception
{
/*
* This is here to ensure that channels that have stuff written to them for a long time, long transaction
* pulls and store copies (mainly the latter), will not timeout and have their transactions rolled back.
* This is actually not a problem, since both mentioned above have no transaction associated with them
* but it is more sanitary and leaves less exceptions in the logs
* Each time a write completes, simply update the corresponding channel's timestamp.
*/
Pair<RequestContext, AtomicLong> slave = connectedSlaveChannels.get( ctx.getChannel() );
if ( slave != null )
{
slave.other().set( clock.currentTimeMillis() );
super.writeComplete( ctx, e );
}
}
@Override
public void channelClosed( ChannelHandlerContext ctx, ChannelStateEvent e )
throws Exception
{
super.channelClosed( ctx, e );
if ( !ctx.getChannel().isOpen() )
{
tryToFinishOffChannel( ctx.getChannel() );
}
channelGroup.remove( e.getChannel() );
}
@Override
public void channelDisconnected( ChannelHandlerContext ctx, ChannelStateEvent e )
throws Exception
{
super.channelDisconnected( ctx, e );
if ( !ctx.getChannel().isConnected() )
{
tryToFinishOffChannel( ctx.getChannel() );
}
}
@Override
public void exceptionCaught( ChannelHandlerContext ctx, ExceptionEvent e ) throws Exception
{
msgLog.warn( "Exception from Netty", e.getCause() );
}
protected void tryToFinishOffChannel( Channel channel )
{
Pair<RequestContext, AtomicLong> slave = null;
slave = unmapSlave( channel );
if ( slave == null )
{
return;
}
tryToFinishOffChannel( channel, slave.first() );
}
protected void tryToFinishOffChannel( Channel channel, RequestContext slave )
{
try
{
finishOffChannel( channel, slave );
unmapSlave( channel );
}
catch ( Throwable failure ) // Unknown error trying to finish off the tx
{
submitSilent( unfinishedTransactionExecutor, newTransactionFinisher( slave ) );
if ( shouldLogFailureToFinishOffChannel( failure ) )
{
msgLog.logMessage( "Could not finish off dead channel", failure );
}
}
}
protected boolean shouldLogFailureToFinishOffChannel( Throwable failure )
{
return true;
}
private void submitSilent( ExecutorService service, Runnable job )
{
try
{
service.submit( job );
}
catch ( RejectedExecutionException e )
{ // Don't scream and shout if we're shutting down, because a rejected execution
// is expected at that time.
if ( !shuttingDown )
{
throw e;
}
}
}
private Runnable newTransactionFinisher( final RequestContext slave )
{
return new Runnable()
{
@Override
public void run()
{
try
{
finishOffChannel( null, slave );
}
catch ( Throwable e )
{
// Introduce some delay here. it becomes like a busy wait if it never succeeds
sleepNicely( 200 );
unfinishedTransactionExecutor.submit( this );
}
}
private void sleepNicely( int millis )
{
try
{
Thread.sleep( millis );
}
catch ( InterruptedException e )
{
Thread.interrupted();
}
}
};
}
protected void handleRequest( ChannelBuffer buffer, final Channel channel )
{
Byte continuation = readContinuationHeader( buffer, channel );
if ( continuation == null )
{
return;
}
if ( continuation == ChunkingChannelBuffer.CONTINUATION_MORE )
{
PartialRequest partialRequest = partialRequests.get( channel );
if ( partialRequest == null )
{
// This is the first chunk in a multi-chunk request
RequestType<T> type = getRequestContext( buffer.readByte() );
RequestContext context = readContext( buffer );
ChannelBuffer targetBuffer = mapSlave( channel, context );
partialRequest = new PartialRequest( type, context, targetBuffer );
partialRequests.put( channel, partialRequest );
}
partialRequest.add( buffer );
}
else
{
PartialRequest partialRequest = partialRequests.remove( channel );
RequestType<T> type;
RequestContext context;
ChannelBuffer targetBuffer;
ChannelBuffer bufferToReadFrom;
ChannelBuffer bufferToWriteTo;
if ( partialRequest == null )
{
// This is the one and single chunk in the request
type = getRequestContext( buffer.readByte() );
context = readContext( buffer );
targetBuffer = mapSlave( channel, context );
bufferToReadFrom = buffer;
bufferToWriteTo = targetBuffer;
}
else
{
// This is the last chunk in a multi-chunk request
type = partialRequest.type;
context = partialRequest.context;
targetBuffer = partialRequest.buffer;
partialRequest.add( buffer );
bufferToReadFrom = targetBuffer;
bufferToWriteTo = ChannelBuffers.dynamicBuffer();
}
bufferToWriteTo.clear();
final ChunkingChannelBuffer chunkingBuffer = new ChunkingChannelBuffer( bufferToWriteTo, channel, chunkSize,
getInternalProtocolVersion(), applicationProtocolVersion );
submitSilent( targetCallExecutor, targetCaller( type, channel, context, chunkingBuffer,
bufferToReadFrom ) );
}
}
private Byte readContinuationHeader( ChannelBuffer buffer, final Channel channel )
{
byte[] header = new byte[2];
buffer.readBytes( header );
try
{ // Read request header and assert correct internal/application protocol version
assertSameProtocolVersion( header, getInternalProtocolVersion(), applicationProtocolVersion );
}
catch ( final IllegalProtocolVersionException e )
{ // Version mismatch, fail with a good exception back to the client
submitSilent( targetCallExecutor, new Runnable()
{
@Override
public void run()
{
writeFailureResponse( e, newChunkingBuffer( channel ) );
}
});
return null;
}
return (byte) (header[0] & 0x1);
}
protected Runnable targetCaller( final RequestType<T> type, final Channel channel, final RequestContext context,
final ChunkingChannelBuffer targetBuffer, final ChannelBuffer bufferToReadFrom )
{
return new Runnable()
{
@Override
@SuppressWarnings("unchecked")
public void run()
{
Map<String, String> requestContext = new HashMap<String, String>();
requestContext.put( "type", type.toString() );
requestContext.put( "remoteClient", channel.getRemoteAddress().toString() );
requestContext.put( "slaveContext", context.toString() );
requestMonitor.beginRequest( requestContext );
Response<R> response = null;
Throwable failure = null;
try
{
unmapSlave( channel );
response = type.getTargetCaller().call( requestTarget, context, bufferToReadFrom, targetBuffer );
type.getObjectSerializer().write( response.response(), targetBuffer );
writeStoreId( response.getStoreId(), targetBuffer );
writeTransactionStreams( response.transactions(), targetBuffer, byteCounterMonitor );
targetBuffer.done();
responseWritten( type, channel, context );
}
catch ( Throwable e )
{
failure = e;
targetBuffer.clear( true );
writeFailureResponse( e, targetBuffer );
tryToFinishOffChannel( channel, context );
throw Exceptions.launderedException( e );
}
finally
{
if ( response != null )
{
response.close();
}
requestMonitor.endRequest( failure );
}
}
};
}
protected void writeFailureResponse( Throwable exception, ChunkingChannelBuffer buffer )
{
try
{
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
ObjectOutputStream out = new ObjectOutputStream( bytes );
out.writeObject( exception );
out.close();
buffer.writeBytes( bytes.toByteArray() );
buffer.done();
}
catch ( IOException e )
{
msgLog.logMessage( "Couldn't send cause of error to client", exception );
}
}
protected void responseWritten( RequestType<T> type, Channel channel, RequestContext context )
{
}
private static void writeStoreId( StoreId storeId, ChannelBuffer targetBuffer )
{
targetBuffer.writeBytes( storeId.serialize() );
}
private static void writeTransactionStreams( TransactionStream txStream, ChannelBuffer buffer, ByteCounterMonitor bufferMonitor )
{
if ( !txStream.hasNext() )
{
buffer.writeByte( 0 );
return;
}
String[] datasources = txStream.dataSourceNames();
assert datasources.length <= 255 : "too many data sources";
buffer.writeByte( datasources.length );
Map<String, Integer> datasourceId = new HashMap<String, Integer>();
for ( int i = 0; i < datasources.length; i++ )
{
String datasource = datasources[i];
writeString( buffer, datasource );
datasourceId.put( datasource, i + 1/*0 means "no more transactions"*/ );
}
for ( Triplet<String, Long, TxExtractor> tx : IteratorUtil.asIterable( txStream ) )
{
buffer.writeByte( datasourceId.get( tx.first() ) );
buffer.writeLong( tx.second() );
BlockLogBuffer blockBuffer = new BlockLogBuffer( buffer, bufferMonitor );
tx.third().extract( blockBuffer );
blockBuffer.done();
}
buffer.writeByte( 0/*no more transactions*/ );
}
protected RequestContext readContext( ChannelBuffer buffer )
{
long sessionId = buffer.readLong();
int machineId = buffer.readInt();
int eventIdentifier = buffer.readInt();
int txsSize = buffer.readByte();
Tx[] lastAppliedTransactions = new Tx[txsSize];
Tx neoTx = null;
for ( int i = 0; i < txsSize; i++ )
{
String ds = readString( buffer );
Tx tx = RequestContext.lastAppliedTx( ds, buffer.readLong() );
lastAppliedTransactions[i] = tx;
// Only perform checksum checks on the neo data source.
if ( ds.equals( NeoStoreXaDataSource.DEFAULT_DATA_SOURCE_NAME ) )
{
neoTx = tx;
}
}
int masterId = buffer.readInt();
long checksum = buffer.readLong();
// Only perform checksum checks on the neo data source. If there's none in the request
// then don't perform any such check.
if ( neoTx != null )
{
txVerifier.assertMatch( neoTx.getTxId(), masterId, checksum );
}
return new RequestContext( sessionId, machineId, eventIdentifier, lastAppliedTransactions, masterId, checksum );
}
protected abstract RequestType<T> getRequestContext( byte id );
protected ChannelBuffer mapSlave( Channel channel, RequestContext slave )
{
synchronized ( connectedSlaveChannels )
{
// Checking for machineId -1 excludes the "empty" slave contexts
// which some communication points pass in as context.
if ( slave != null && slave.machineId() != RequestContext.EMPTY.machineId() )
{
Pair<RequestContext, AtomicLong> previous = connectedSlaveChannels.get( channel );
if ( previous != null )
{
previous.other().set( System.currentTimeMillis() );
}
else
{
connectedSlaveChannels.put( channel, Pair.of( slave, new AtomicLong( System.currentTimeMillis() )
) );
}
}
}
return ChannelBuffers.dynamicBuffer();
}
protected Pair<RequestContext, AtomicLong> unmapSlave( Channel channel )
{
synchronized ( connectedSlaveChannels )
{
return connectedSlaveChannels.remove( channel );
}
}
protected T getRequestTarget()
{
return requestTarget;
}
protected abstract void finishOffChannel( Channel channel, RequestContext context );
public Map<Channel, RequestContext> getConnectedSlaveChannels()
{
Map<Channel, RequestContext> result = new HashMap<Channel, RequestContext>();
synchronized ( connectedSlaveChannels )
{
for ( Map.Entry<Channel, Pair<RequestContext, AtomicLong>> entry : connectedSlaveChannels.entrySet() )
{
result.put( entry.getKey(), entry.getValue().first() );
}
}
return result;
}
private ChunkingChannelBuffer newChunkingBuffer( Channel channel )
{
return new ChunkingChannelBuffer( ChannelBuffers.dynamicBuffer(),
channel,
chunkSize, getInternalProtocolVersion(), applicationProtocolVersion );
}
// =====================================================================
// Just some methods which aren't really used when running an HA cluster,
// but exposed so that other tools can reach that information.
// =====================================================================
private class PartialRequest
{
final RequestContext context;
final ChannelBuffer buffer;
final RequestType<T> type;
public PartialRequest( RequestType<T> type, RequestContext context, ChannelBuffer buffer )
{
this.type = type;
this.context = context;
this.buffer = buffer;
}
public void add( ChannelBuffer buffer )
{
this.buffer.writeBytes( buffer );
}
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_Server.java
|
4,426
|
{
@Override
protected RequestType<Object> getRequestContext( byte id )
{
return mock(RequestType.class);
}
@Override
protected void finishOffChannel( Channel channel, RequestContext context )
{
}
};
| false
|
enterprise_com_src_test_java_org_neo4j_com_ServerTest.java
|
4,427
|
public class ServerTest
{
private final Protocol protocol = new Protocol(1024, (byte)0, Server.INTERNAL_PROTOCOL_VERSION);
private final TxChecksumVerifier checksumVerifier = mock( TxChecksumVerifier.class );
private final RequestType reqType = mock( RequestType.class );
private final RecordingChannel channel = new RecordingChannel();
@Test
public void shouldSendExceptionBackToClientOnInvalidChecksum() throws Exception
{
// Given
Server<Object, Object> server = newServer( checksumVerifier );
RequestContext ctx = new RequestContext( 0, 1, 0, new Tx[]{ new Tx( DEFAULT_DATA_SOURCE_NAME, 1)}, -1, 12 );
doThrow(new IllegalStateException("123")).when(checksumVerifier).assertMatch( anyLong(), anyInt(), anyLong() );
// When
try
{
server.messageReceived( channelCtx( channel ), message( reqType, ctx, channel, EMPTY_SERIALIZER ) );
fail("Should have failed.");
}
catch(IllegalStateException e)
{
// Expected
}
// Then
try
{
protocol.deserializeResponse( channel.asBlockingReadHandler(), ByteBuffer.allocateDirect( 1024 ), 1,
VOID_DESERIALIZER, mock( ResourceReleaser.class ) );
fail("Should have failed.");
}
catch(IllegalStateException e)
{
assertThat(e.getMessage(), equalTo("123"));
}
}
private MessageEvent message( RequestType reqType, RequestContext ctx, Channel serverToClientChannel, Serializer payloadSerializer ) throws IOException
{
ByteBuffer backingBuffer = ByteBuffer.allocate( 1024 );
protocol.serializeRequest( new RecordingChannel(), new ByteBufferBackedChannelBuffer( backingBuffer ),
reqType, ctx,
payloadSerializer );
MessageEvent event = mock(MessageEvent.class);
when(event.getMessage()).thenReturn( new ByteBufferBackedChannelBuffer( backingBuffer ) );
when(event.getChannel()).thenReturn( serverToClientChannel );
return event;
}
private ChannelHandlerContext channelCtx( Channel channel )
{
ChannelHandlerContext ctx = mock( ChannelHandlerContext.class );
when(ctx.getChannel()).thenReturn( channel );
return ctx;
}
private Server<Object, Object> newServer( final TxChecksumVerifier checksumVerifier )
{
return new Server<Object, Object>(null, mock( Server.Configuration.class), new DevNullLoggingService(),
Protocol.DEFAULT_FRAME_LENGTH, (byte)0, checksumVerifier, new TickingClock( 0, 1 ), mock( Monitors.class) )
{
@Override
protected RequestType<Object> getRequestContext( byte id )
{
return mock(RequestType.class);
}
@Override
protected void finishOffChannel( Channel channel, RequestContext context )
{
}
};
}
}
| false
|
enterprise_com_src_test_java_org_neo4j_com_ServerTest.java
|
4,428
|
public class ServerFailureException extends RuntimeException
{
public ServerFailureException()
{
super();
}
public ServerFailureException( String message, Throwable cause )
{
super( message, cause );
}
public ServerFailureException( String message )
{
super( message );
}
public ServerFailureException( Throwable cause )
{
super( cause );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_ServerFailureException.java
|
4,429
|
private class PartialRequest
{
final RequestContext context;
final ChannelBuffer buffer;
final RequestType<T> type;
public PartialRequest( RequestType<T> type, RequestContext context, ChannelBuffer buffer )
{
this.type = type;
this.context = context;
this.buffer = buffer;
}
public void add( ChannelBuffer buffer )
{
this.buffer.writeBytes( buffer );
}
}
| false
|
enterprise_com_src_main_java_org_neo4j_com_Server.java
|
4,430
|
{
@Override
@SuppressWarnings("unchecked")
public void run()
{
Map<String, String> requestContext = new HashMap<String, String>();
requestContext.put( "type", type.toString() );
requestContext.put( "remoteClient", channel.getRemoteAddress().toString() );
requestContext.put( "slaveContext", context.toString() );
requestMonitor.beginRequest( requestContext );
Response<R> response = null;
Throwable failure = null;
try
{
unmapSlave( channel );
response = type.getTargetCaller().call( requestTarget, context, bufferToReadFrom, targetBuffer );
type.getObjectSerializer().write( response.response(), targetBuffer );
writeStoreId( response.getStoreId(), targetBuffer );
writeTransactionStreams( response.transactions(), targetBuffer, byteCounterMonitor );
targetBuffer.done();
responseWritten( type, channel, context );
}
catch ( Throwable e )
{
failure = e;
targetBuffer.clear( true );
writeFailureResponse( e, targetBuffer );
tryToFinishOffChannel( channel, context );
throw Exceptions.launderedException( e );
}
finally
{
if ( response != null )
{
response.close();
}
requestMonitor.endRequest( failure );
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Server.java
|
4,431
|
{
@Override
public void run()
{
writeFailureResponse( e, newChunkingBuffer( channel ) );
}
});
| false
|
enterprise_com_src_main_java_org_neo4j_com_Server.java
|
4,432
|
{
@Override
public void run()
{
try
{
finishOffChannel( null, slave );
}
catch ( Throwable e )
{
// Introduce some delay here. it becomes like a busy wait if it never succeeds
sleepNicely( 200 );
unfinishedTransactionExecutor.submit( this );
}
}
private void sleepNicely( int millis )
{
try
{
Thread.sleep( millis );
}
catch ( InterruptedException e )
{
Thread.interrupted();
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Server.java
|
4,433
|
{
@Override
public void run()
{
Map<Channel, Boolean/*starting to get old?*/> channels = new HashMap<Channel, Boolean>();
synchronized ( connectedSlaveChannels )
{
for ( Map.Entry<Channel, Pair<RequestContext, AtomicLong>> channel : connectedSlaveChannels
.entrySet() )
{ // Has this channel been silent for a while?
long age = System.currentTimeMillis() - channel.getValue().other().get();
if ( age > oldChannelThresholdMillis )
{
msgLog.logMessage( "Found a silent channel " + channel + ", " + age );
channels.put( channel.getKey(), Boolean.TRUE );
}
else if ( age > oldChannelThresholdMillis / 2 )
{ // Then add it to a list to check
channels.put( channel.getKey(), Boolean.FALSE );
}
}
}
for ( Map.Entry<Channel, Boolean> channel : channels.entrySet() )
{
if ( channel.getValue() || !channel.getKey().isOpen() || !channel.getKey().isConnected() ||
!channel.getKey().isBound() )
{
tryToFinishOffChannel( channel.getKey() );
}
}
}
};
| false
|
enterprise_com_src_main_java_org_neo4j_com_Server.java
|
4,434
|
{
@Override
public Object initialState( Path path )
{
return null;
}
public InitialBranchState reverse()
{
return this;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_graphdb_traversal_InitialBranchState.java
|
4,435
|
abstract class Adapter<STATE> implements InitialBranchState<STATE>
{
@Override
public InitialBranchState<STATE> reverse()
{
return this;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_graphdb_traversal_InitialBranchState.java
|
4,436
|
class State<STATE> extends Adapter<STATE>
{
private final STATE initialState;
private final STATE reversedInitialState;
public State( STATE initialState, STATE reversedInitialState )
{
this.initialState = initialState;
this.reversedInitialState = reversedInitialState;
}
@Override
public InitialBranchState<STATE> reverse()
{
return new State<STATE>( reversedInitialState, initialState );
}
@Override
public STATE initialState( Path path )
{
return initialState;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_graphdb_traversal_InitialBranchState.java
|
4,437
|
class ConfigHammer implements Runnable, ConfigurationChangeListener
{
private final Config config;
private final Random rand;
protected Throwable failure;
public ConfigHammer(Config config)
{
this.config = config;
this.rand = new Random( );
}
@Override
public void run()
{
try
{
int times = 500;
while ( times --> 0 )
{
config.addConfigurationChangeListener( this );
// Edit config a bit
Map<String,String> params = config.getParams();
params.put( "asd" + rand.nextInt( 10 ),"dsa" + rand.nextInt( 100000 ) );
config.applyChanges( params );
// Unregister listener
config.removeConfigurationChangeListener( this );
}
} catch(Throwable e)
{
this.failure = e;
}
}
@Override
public void notifyConfigurationChanges( Iterable<ConfigurationChange> change )
{
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfigConcurrency.java
|
4,438
|
public static class MySettingsWithDefaults
{
public static Setting<String> hello = setting( "hello", STRING, "Hello, World!" );
public static Setting<Boolean> boolSetting = setting( "bool_setting", BOOLEAN, Settings.TRUE );
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfig.java
|
4,439
|
{
{
add( new SpecificPropertyMigration( "old", "Old has been replaced by newer!" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
rawConfiguration.put( newer.name(), value );
}
} );
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfig.java
|
4,440
|
public static class MyMigratingSettings
{
@Migrator
public static ConfigurationMigrator migrator = new BaseConfigurationMigrator()
{
{
add( new SpecificPropertyMigration( "old", "Old has been replaced by newer!" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
rawConfiguration.put( newer.name(), value );
}
} );
}
};
public static Setting<String> newer = setting( "hello", STRING, "" );
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfig.java
|
4,441
|
{{
put( MySettingsWithDefaults.boolSetting.name(), "asd" );
}},
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfig.java
|
4,442
|
public class TestConfig
{
public static class MyMigratingSettings
{
@Migrator
public static ConfigurationMigrator migrator = new BaseConfigurationMigrator()
{
{
add( new SpecificPropertyMigration( "old", "Old has been replaced by newer!" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
rawConfiguration.put( newer.name(), value );
}
} );
}
};
public static Setting<String> newer = setting( "hello", STRING, "" );
}
public static class MySettingsWithDefaults
{
public static Setting<String> hello = setting( "hello", STRING, "Hello, World!" );
public static Setting<Boolean> boolSetting = setting( "bool_setting", BOOLEAN, Settings.TRUE );
}
@Test
public void shouldApplyDefaults()
{
Config config = new Config( new HashMap<String, String>(), MySettingsWithDefaults.class );
assertThat( config.get( MySettingsWithDefaults.hello ), is( "Hello, World!" ) );
}
@Test
public void shouldApplyMigrations()
{
Map<String, String> params = new HashMap<String, String>();
params.put( "old", "hello!" );
Config config = new Config( params, MyMigratingSettings.class );
assertThat( config.get( MyMigratingSettings.newer ), is( "hello!" ) );
}
@Test
public void shouldNotAllowSettingInvalidValues()
{
Config config = new Config( new HashMap<String, String>(), MySettingsWithDefaults.class );
try
{
Map<String, String> params = config.getParams();
params.put( MySettingsWithDefaults.boolSetting.name(), "asd" );
config.applyChanges( params );
fail( "Expected validation to fail." );
}
catch ( IllegalArgumentException e )
{
}
}
@Test
public void shouldNotAllowInvalidValuesInConstructor()
{
try
{
new Config( new HashMap<String, String>()
{{
put( MySettingsWithDefaults.boolSetting.name(), "asd" );
}},
MySettingsWithDefaults.class );
fail( "Expected validation to fail." );
}
catch ( IllegalArgumentException e )
{
}
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfig.java
|
4,443
|
public class SystemPropertiesConfigurationTest
{
@Test
public void testThatSetValidSystemPropertiesArePickedUp()
{
try
{
assertFalse( MapUtil.stringMap( GraphDatabaseSettings.read_only.name(), Settings.TRUE ).equals(
new SystemPropertiesConfiguration( GraphDatabaseSettings.class ).apply( MapUtil.stringMap() ) ) );
System.setProperty( GraphDatabaseSettings.read_only.name(), Settings.TRUE );
assertEquals( MapUtil.stringMap( GraphDatabaseSettings.read_only.name(), Settings.TRUE ),
new SystemPropertiesConfiguration( GraphDatabaseSettings.class )
.apply( MapUtil.stringMap() ) );
System.setProperty( GraphDatabaseSettings.read_only.name(), "foo" );
assertEquals( MapUtil.stringMap(),
new SystemPropertiesConfiguration( GraphDatabaseSettings.class )
.apply( MapUtil.stringMap() ) );
}
finally
{
System.clearProperty( GraphDatabaseSettings.read_only.name() );
}
}
@Test
public void testThatSetInvalidSystemPropertiesAreNotPickedUp()
{
System.setProperty( "foo", "bar" );
assertEquals( MapUtil.stringMap(),
new SystemPropertiesConfiguration( GraphDatabaseSettings.class ).apply( MapUtil.stringMap() ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_SystemPropertiesConfigurationTest.java
|
4,444
|
public class SystemPropertiesConfiguration
{
private Iterable<Class<?>> settingsClasses;
public SystemPropertiesConfiguration(Class<?>... settingsClasses)
{
this( Arrays.asList( settingsClasses ));
}
public SystemPropertiesConfiguration( Iterable<Class<?>> settingsClasses )
{
this.settingsClasses = settingsClasses;
}
public Map<String,String> apply(Map<String,String> config )
{
// Create test config with base plus system props on top
Map<String,String> systemProperties = new HashMap<String, String>( config );
for ( Map.Entry<Object, Object> prop : System.getProperties().entrySet() )
{
systemProperties.put( prop.getKey().toString(), prop.getValue().toString() );
}
// For each system property, see if it passes validation
// If so, add it to result set
Map<String, String> result = new HashMap<String, String>( config );
Function<String, String> systemPropertiesFunction = Functions.map(systemProperties);
for( Map.Entry<Object, Object> prop : System.getProperties().entrySet() )
{
String key = (String) prop.getKey();
for( Class<?> settingsClass : settingsClasses )
{
for( Field field : settingsClass.getFields() )
{
try
{
Setting<Object> setting = (Setting<Object>) field.get( null );
if (setting.name().equals( key ))
{
setting.apply( systemPropertiesFunction );
// Valid setting, copy it from system properties
result.put( key, (String) prop.getValue() );
}
}
catch( Throwable e )
{
continue;
}
}
}
}
return result;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_SystemPropertiesConfiguration.java
|
4,445
|
{
@Override
public boolean accept( String item )
{
return item.startsWith( configurationNamePrefix );
}
}, life );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_RestartOnChange.java
|
4,446
|
{
@Override
public Predicate<String> apply( Field method )
{
try
{
Setting setting = (Setting) method.get( null );
return Predicates.in( setting.name() );
}
catch ( IllegalAccessException e )
{
return Predicates.not( Predicates.<String>TRUE() );
}
}
}, Arrays.asList( settingsClass.getFields() ) ) ), life );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_RestartOnChange.java
|
4,447
|
public class RestartOnChange
implements ConfigurationChangeListener
{
private final Predicate<String> restartSpecification;
private final Lifecycle life;
public RestartOnChange( Class<?> settingsClass, Lifecycle life )
{
this( or( map( new Function<Field, Predicate<String>>()
{
@Override
public Predicate<String> apply( Field method )
{
try
{
Setting setting = (Setting) method.get( null );
return Predicates.in( setting.name() );
}
catch ( IllegalAccessException e )
{
return Predicates.not( Predicates.<String>TRUE() );
}
}
}, Arrays.asList( settingsClass.getFields() ) ) ), life );
}
public RestartOnChange( final String configurationNamePrefix, Lifecycle life )
{
this( new Predicate<String>()
{
@Override
public boolean accept( String item )
{
return item.startsWith( configurationNamePrefix );
}
}, life );
}
public RestartOnChange( Predicate<String> restartSpecification, Lifecycle life )
{
this.restartSpecification = restartSpecification;
this.life = life;
}
@Override
public void notifyConfigurationChanges( Iterable<ConfigurationChange> change )
{
boolean restart = false;
for ( ConfigurationChange configurationChange : change )
{
restart |= restartSpecification.accept( configurationChange.getName() );
}
if ( restart )
{
try
{
life.stop();
life.start();
}
catch ( Throwable throwable )
{
throwable.printStackTrace();
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_RestartOnChange.java
|
4,448
|
public class GraphDatabaseConfigurationMigrator extends BaseConfigurationMigrator
{
private static final String KEEP_LOGICAL_LOGS = "keep_logical_logs";
{
add( new SpecificPropertyMigration( "enable_online_backup",
"enable_online_backup has been replaced with online_backup_enabled and online_backup_port" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
if ( value != null )
{
String port = null;
// Backup is configured
if ( value.contains( "=" ) )
{ // Multi-value config, which means we have to parse the port
Args args = parseMapFromConfigValue( "enable_online_backup", value );
port = args.get( "port", "6362" );
port = "0.0.0.0:"+port;
}
else if ( Boolean.parseBoolean( value ) )
{ // Single-value config, true/false
port = "0.0.0.0:6362-6372";
}
if ( port != null )
{
rawConfiguration.put( "online_backup_server", port );
rawConfiguration.put( "online_backup_enabled", Settings.TRUE );
}
}
}
} );
add( new SpecificPropertyMigration( "online_backup_port",
"online_backup_port has been replaced with online_backup_server, which is a hostname:port setting" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
if ( value != null )
{
rawConfiguration.put( "online_backup_server", "0.0.0.0:"+value );
}
}
} );
add( new SpecificPropertyMigration( "neo4j.ext.udc.disable", "neo4j.ext.udc.disable has been replaced with " +
"neo4j.ext.udc.enabled" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
if ( "true".equalsIgnoreCase( value ) )
{
rawConfiguration.put( "neo4j.ext.udc.enabled", "false" );
}
else
{
rawConfiguration.put( "neo4j.ext.udc.enabled", "true" );
}
}
} );
add( new SpecificPropertyMigration( "enable_remote_shell",
"enable_remote_shell has been replaced with remote_shell_enabled" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
if ( configValueContainsMultipleParameters( value ) )
{
rawConfiguration.put( "remote_shell_enabled", Settings.TRUE );
Args parsed = parseMapFromConfigValue( "enable_remote_shell", value );
Map<String, String> map = new HashMap<String, String>();
map.put( "remote_shell_port", parsed.get( "port", "1337" ) );
map.put( "remote_shell_name", parsed.get( "name", "shell" ) );
map.put( "remote_shell_read_only", parsed.get( "readonly", "false" ) );
rawConfiguration.putAll( map );
}
else
{
rawConfiguration.put( "remote_shell_enabled", Boolean.parseBoolean( value ) ? Settings.TRUE :
Settings.FALSE );
}
}
} );
add( new SpecificPropertyMigration( KEEP_LOGICAL_LOGS, "multi-value configuration of keep_logical_logs" +
" has been removed, any configuration specified will apply to all data sources" )
{
@Override
public boolean appliesTo( Map<String, String> rawConfiguration )
{
return configValueContainsMultipleParameters( rawConfiguration.get( KEEP_LOGICAL_LOGS ) );
}
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
boolean keep = false;
Args map = parseMapFromConfigValue( KEEP_LOGICAL_LOGS, value );
for ( Map.Entry<String, String> entry : map.asMap().entrySet() )
{
if ( Boolean.parseBoolean( entry.getValue() ) )
{
keep = true;
break;
}
}
rawConfiguration.put( KEEP_LOGICAL_LOGS, String.valueOf( keep ) );
}
} );
add( new SpecificPropertyMigration( "lucene_writer_cache_size", "cannot configure writers and searchers " +
"individually since they go together" )
{
@Override
public void setValueWithOldSetting( String value, Map<String, String> rawConfiguration )
{
}
} );
add( new ConfigValueChanged( "cache_type", "gcr", "hpc",
"'gcr' cache type has been renamed to 'hpc', High Performance Cache." ));
}
@Deprecated
public static boolean configValueContainsMultipleParameters( String configValue )
{
return configValue != null && configValue.contains( "=" );
}
@Deprecated
public static Args parseMapFromConfigValue( String name, String configValue )
{
Map<String, String> result = new HashMap<>();
for ( String part : configValue.split( quote( "," ) ) )
{
String[] tokens = part.split( quote( "=" ) );
if ( tokens.length != 2 )
{
throw new RuntimeException( "Invalid configuration value '" + configValue +
"' for " + name + ". The format is [true/false] or [key1=value1,key2=value2...]" );
}
result.put( tokens[0], tokens[1] );
}
return new Args( result );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_GraphDatabaseConfigurationMigrator.java
|
4,449
|
public class ConfigurationValidator
{
private AnnotatedFieldHarvester fieldHarvester = new AnnotatedFieldHarvester();
private Map<String, Setting<?>> settings;
public ConfigurationValidator( Iterable<Class<?>> settingsClasses )
{
this.settings = getSettingsFrom( settingsClasses );
}
public void validate( Map<String, String> rawConfig )
{
for ( Setting<?> setting : settings.values() )
{
setting.apply( Functions.map( rawConfig ) );
}
}
@SuppressWarnings("rawtypes")
private Map<String, Setting<?>> getSettingsFrom( Iterable<Class<?>> settingsClasses )
{
Map<String, Setting<?>> settings = new HashMap<String, Setting<?>>();
for ( Class<?> clazz : settingsClasses )
{
for ( Pair<Field, Setting> field : fieldHarvester.findStatic( clazz, Setting.class ) )
{
settings.put( field.other().name(), field.other() );
}
}
return settings;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_ConfigurationValidator.java
|
4,450
|
public class ConfigurationChange
{
private String name;
private String oldValue;
private String newValue;
public ConfigurationChange( String name, String oldValue, String newValue )
{
this.name = name;
this.oldValue = oldValue;
this.newValue = newValue;
}
public String getName()
{
return name;
}
public String getOldValue()
{
return oldValue;
}
public String getNewValue()
{
return newValue;
}
@Override
public String toString()
{
return name+":"+oldValue+"->"+newValue;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_ConfigurationChange.java
|
4,451
|
final class Conversion
{
/**
* Create a new configuration map from a set of {@link ConfigParam} objects.
*
* @param params the parameters to add to the map.
* @return a map containing the specified configuration parameters.
*/
public static Map<String, String> create( ConfigParam... params )
{
return update( new HashMap<String, String>(), params );
}
/**
* Updates a configuration map with the specified configuration parameters.
*
* @param config the map to update.
* @param params the configuration parameters to update the map with.
* @return the same configuration map as passed in.
*/
public static Map<String, String> update( Map<String, String> config, ConfigParam... params )
{
if ( params != null ) for ( ConfigParam param : params )
if ( param != null ) param.configure( config );
return config;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_ConfigParam.java
|
4,452
|
public class ConfigAsciiDocGenerator {
public String generateDocsFor(Class<? extends SettingsResourceBundle> settingsResource)
{
return generateDocsFor(settingsResource.getName());
}
public String generateDocsFor(String settingsResource)
{
ResourceBundle bundle = ResourceBundle.getBundle( settingsResource );
StringBuilder sb = new StringBuilder();
List<String> keys = new ArrayList<String>(bundle.keySet());
Collections.sort(keys);
for( String property : keys )
{
if (property.endsWith( ".description" ))
{
String name = property.substring( 0, property.lastIndexOf( "." ) );
sb.append("."+bundle.getString( name+".title" )+"\n");
String minmax = "";
if (bundle.containsKey( name+".min" ) && bundle.containsKey( name+".max" ))
minmax=",\"minmax\"";
else if (bundle.containsKey( name+".min" ))
minmax=",\"min\"";
else if (bundle.containsKey( name+".max" ))
minmax=",\"max\"";
sb.append( "[\"configsetting\""+minmax+"]\n");
sb.append( "----\n" );
String defaultKey = name + ".default";
if (bundle.containsKey( defaultKey ))
{
sb.append( name+": "+bundle.getString( defaultKey )+"\n");
} else
{
sb.append( name+"\n");
}
sb.append( bundle.getString( property )+"\n");
// Output optional options
String optionsKey = name+".options";
if (bundle.containsKey( optionsKey ))
{
String[] options = bundle.getString( optionsKey ).split( "," );
if (bundle.containsKey( name+".option."+options[0] ))
{
for( String option : options )
{
String description = bundle.getString( name + ".option." + option );
char[] spaces = new char[ option.length() + 2 ];
Arrays.fill( spaces,' ' );
description = description.replace( "\n", " ");
sb.append(option+": "+ description+"\n");
}
} else
{
sb.append(bundle.getString( optionsKey ).replace( ","," \n" )+"\n");
}
}
if (bundle.containsKey( name+".min" ))
sb.append(bundle.getString( name+".min" )+"\n");
if (bundle.containsKey( name+".max" ))
sb.append(bundle.getString( name+".max" )+"\n");
sb.append( "----\n" );
sb.append( "\n" );
}
}
return sb.toString();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_ConfigAsciiDocGenerator.java
|
4,453
|
{
@Override
public String apply( Map.Entry<String, String> stringStringEntry )
{
return stringStringEntry.getKey() + "=" + stringStringEntry.getValue();
}
}, params.entrySet() ) );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_Config.java
|
4,454
|
public class Config implements DiagnosticsProvider
{
private final List<ConfigurationChangeListener> listeners = new CopyOnWriteArrayList<ConfigurationChangeListener>();
private final Map<String, String> params = new ConcurrentHashMap<String, String>( );
private final ConfigurationMigrator migrator;
// Messages to this log get replayed into a real logger once logging has been
// instantiated.
private StringLogger log = new BufferingLogger();
private final ConfigurationValidator validator;
private final Function<String, String> settingsFunction;
private final Iterable<Class<?>> settingsClasses;
public Config()
{
this( new HashMap<String, String>(), Collections.<Class<?>>emptyList() );
}
public Config( Map<String, String> inputParams )
{
this( inputParams, Collections.<Class<?>>emptyList() );
}
public Config( Map<String, String> inputParams, Class<?>... settingsClasses )
{
this( inputParams, Arrays.asList( settingsClasses ) );
}
public Config( Map<String, String> inputParams, Iterable<Class<?>> settingsClasses )
{
this.settingsClasses = settingsClasses;
settingsFunction = Functions.map( params );
this.migrator = new AnnotationBasedConfigurationMigrator( settingsClasses );
this.validator = new ConfigurationValidator( settingsClasses );
this.applyChanges( inputParams );
}
// TODO: Get rid of this, to allow us to have something more
// elaborate as internal storage (eg. something that can keep meta data with
// properties).
public Map<String, String> getParams()
{
return new HashMap<String, String>( this.params );
}
/**
* Retrieve a configuration property.
*/
public <T> T get( Setting<T> setting )
{
return setting.apply( settingsFunction );
}
/**
* Replace the current set of configuration parameters with another one.
*/
public synchronized void applyChanges( Map<String, String> newConfiguration )
{
newConfiguration = migrator.apply( newConfiguration, log );
// Make sure all changes are valid
validator.validate( newConfiguration );
// Figure out what changed
if ( listeners.isEmpty() )
{
// Make the change
params.clear();
params.putAll( newConfiguration );
}
else
{
List<ConfigurationChange> configurationChanges = new ArrayList<ConfigurationChange>();
for ( Map.Entry<String, String> stringStringEntry : newConfiguration.entrySet() )
{
String oldValue = params.get( stringStringEntry.getKey() );
String newValue = stringStringEntry.getValue();
if ( !(oldValue == null && newValue == null) &&
(oldValue == null || newValue == null || !oldValue.equals( newValue )) )
{
configurationChanges.add( new ConfigurationChange( stringStringEntry.getKey(), oldValue,
newValue ) );
}
}
// Make the change
params.clear();
params.putAll( newConfiguration );
// Notify listeners
for ( ConfigurationChangeListener listener : listeners )
{
listener.notifyConfigurationChanges( configurationChanges );
}
}
}
public Iterable<Class<?>> getSettingsClasses()
{
return settingsClasses;
}
public void setLogger( StringLogger log )
{
if ( this.log instanceof BufferingLogger )
{
((BufferingLogger) this.log).replayInto( log );
}
this.log = log;
}
public void addConfigurationChangeListener( ConfigurationChangeListener listener )
{
listeners.add( listener );
}
public void removeConfigurationChangeListener( ConfigurationChangeListener listener )
{
listeners.remove( listener );
}
@Override
public String getDiagnosticsIdentifier()
{
return getClass().getName();
}
@Override
public void acceptDiagnosticsVisitor( Object visitor )
{
// nothing visits configuration
}
@Override
public void dump( DiagnosticsPhase phase, StringLogger log )
{
if ( phase.isInitialization() || phase.isExplicitlyRequested() )
{
log.logLongMessage( "Neo4j Kernel properties:", Iterables.map( new Function<Map.Entry<String, String>,
String>()
{
@Override
public String apply( Map.Entry<String, String> stringStringEntry )
{
return stringStringEntry.getKey() + "=" + stringStringEntry.getValue();
}
}, params.entrySet() ) );
}
}
@Override
public String toString()
{
List<String> keys = new ArrayList<String>( params.keySet() );
Collections.sort( keys );
LinkedHashMap<String, String> output = new LinkedHashMap<String, String>();
for ( String key : keys )
{
output.put( key, params.get( key ) );
}
return output.toString();
}
public static long parseLongWithUnit( String numberWithPotentialUnit )
{
int firstNonDigitIndex = findFirstNonDigit( numberWithPotentialUnit );
String number = numberWithPotentialUnit.substring( 0, firstNonDigitIndex );
long multiplier = 1;
if ( firstNonDigitIndex < numberWithPotentialUnit.length() )
{
String unit = numberWithPotentialUnit.substring( firstNonDigitIndex );
if ( unit.equalsIgnoreCase( "k" ) )
{
multiplier = 1024;
}
else if ( unit.equalsIgnoreCase( "m" ) )
{
multiplier = 1024 * 1024;
}
else if ( unit.equalsIgnoreCase( "g" ) )
{
multiplier = 1024 * 1024 * 1024;
}
else
{
throw new IllegalArgumentException(
"Illegal unit '" + unit + "' for number '" + numberWithPotentialUnit + "'" );
}
}
return Long.parseLong( number ) * multiplier;
}
/**
* @return index of first non-digit character in {@code numberWithPotentialUnit}. If all digits then
* {@code numberWithPotentialUnit.length()} is returned.
*/
private static int findFirstNonDigit( String numberWithPotentialUnit )
{
int firstNonDigitIndex = numberWithPotentialUnit.length();
for ( int i = 0; i < numberWithPotentialUnit.length(); i++ )
{
if ( !isDigit( numberWithPotentialUnit.charAt( i ) ) )
{
firstNonDigitIndex = i;
break;
}
}
return firstNonDigitIndex;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_Config.java
|
4,455
|
public static abstract class SpecificPropertyMigration implements Migration
{
private String propertyKey;
private String deprecationMessage;
public SpecificPropertyMigration(String propertyKey, String deprecationMessage)
{
this.propertyKey = propertyKey;
this.deprecationMessage = deprecationMessage;
}
public boolean appliesTo(Map<String, String> rawConfiguration)
{
return rawConfiguration.containsKey(propertyKey);
}
public Map<String, String> apply(Map<String, String> rawConfiguration)
{
String value = rawConfiguration.get(propertyKey);
rawConfiguration.remove(propertyKey);
setValueWithOldSetting(value, rawConfiguration);
return rawConfiguration;
}
public String getDeprecationMessage()
{
return deprecationMessage;
}
public abstract void setValueWithOldSetting(String value, Map<String, String> rawConfiguration);
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_BaseConfigurationMigrator.java
|
4,456
|
public static class PropertyRenamed extends SpecificPropertyMigration
{
private String newKey;
public PropertyRenamed(String oldKey, String newKey, String deprecationMessage)
{
super(oldKey, deprecationMessage);
this.newKey = newKey;
}
public void setValueWithOldSetting(String value, Map<String, String> rawConfiguration)
{
rawConfiguration.put(newKey, value);
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_BaseConfigurationMigrator.java
|
4,457
|
public class TestConfigConcurrency
{
/**
* Hammers config with read/write load on both properties and the listener
* interface it provides.
*/
class ConfigHammer implements Runnable, ConfigurationChangeListener
{
private final Config config;
private final Random rand;
protected Throwable failure;
public ConfigHammer(Config config)
{
this.config = config;
this.rand = new Random( );
}
@Override
public void run()
{
try
{
int times = 500;
while ( times --> 0 )
{
config.addConfigurationChangeListener( this );
// Edit config a bit
Map<String,String> params = config.getParams();
params.put( "asd" + rand.nextInt( 10 ),"dsa" + rand.nextInt( 100000 ) );
config.applyChanges( params );
// Unregister listener
config.removeConfigurationChangeListener( this );
}
} catch(Throwable e)
{
this.failure = e;
}
}
@Override
public void notifyConfigurationChanges( Iterable<ConfigurationChange> change )
{
}
}
@Test(timeout = 10000l)
public void shouldHandleConcurrentLoad() throws Throwable
{
// Given
Config config = new Config();
List<Thread> threads = new ArrayList<Thread>( );
List<ConfigHammer> hammers = new ArrayList<ConfigHammer>( );
// When
int numThreads = 10;
while( numThreads --> 0)
{
ConfigHammer configHammer = new ConfigHammer( config );
Thread thread = new Thread( configHammer );
thread.start();
threads.add( thread );
hammers.add( configHammer );
}
// Then
for ( Thread thread : threads )
thread.join();
// And no hammer has broken
for ( ConfigHammer hammer : hammers )
if(hammer.failure != null)
throw hammer.failure;
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestConfigConcurrency.java
|
4,458
|
public class TestGraphDatabaseConfigurationMigrator
{
@Test
public void testNoMigration()
{
ConfigurationMigrator migrator = new GraphDatabaseConfigurationMigrator( );
assertThat( migrator.apply( stringMap( "foo", "bar" ), StringLogger.DEV_NULL ), equalTo( stringMap( "foo", "bar" ) ) );
}
@Test
public void testEnableOnlineBackup()
{
ConfigurationMigrator migrator = new GraphDatabaseConfigurationMigrator( );
assertThat( migrator.apply( stringMap( "enable_online_backup", "true" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "online_backup_enabled", "true", "online_backup_server", "0.0.0.0:6362-6372" ) ) );
// 1.9
assertThat( migrator.apply( stringMap( "online_backup_port", "1234" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "online_backup_server", "0.0.0.0:1234" ) ) );
}
@Test
public void testUdcEnabled()
{
ConfigurationMigrator migrator = new GraphDatabaseConfigurationMigrator( );
assertThat( migrator.apply( stringMap( "neo4j.ext.udc.disable", "true" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "neo4j.ext.udc.enabled", "false" ) ) );
assertThat( migrator.apply( stringMap( "neo4j.ext.udc.disable", "false" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "neo4j.ext.udc.enabled", "true" ) ) );
}
@Test
public void testEnableRemoteShell()
{
ConfigurationMigrator migrator = new GraphDatabaseConfigurationMigrator( );
assertThat( migrator.apply( stringMap( "enable_remote_shell", "true" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "remote_shell_enabled", "true" ) ) );
assertThat( migrator.apply( stringMap( "enable_remote_shell", "false" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "remote_shell_enabled", "false" ) ) );
assertThat( migrator.apply( stringMap( "enable_remote_shell", "port=1234" ), StringLogger.DEV_NULL ),
equalTo( stringMap( "remote_shell_enabled", "true","remote_shell_port","1234","remote_shell_read_only","false","remote_shell_name","shell" ) ) );
}
@Test
public void testGCRRenamedToHPC()
{
ConfigurationMigrator migrator = new GraphDatabaseConfigurationMigrator( );
TestLogger log = new TestLogger();
// When & Then
assertThat( migrator.apply( stringMap( "cache_type", "gcr" ), log ),
equalTo( stringMap( "cache_type", "hpc" ) ) );
log.assertAtLeastOnce( warn( "'gcr' cache type has been renamed to 'hpc', High Performance Cache." ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_TestGraphDatabaseConfigurationMigrator.java
|
4,459
|
public class BaseConfigurationMigrator implements ConfigurationMigrator {
public interface Migration
{
boolean appliesTo(Map<String, String> rawConfiguration);
Map<String, String> apply(Map<String, String> rawConfiguration);
String getDeprecationMessage();
}
public static abstract class SpecificPropertyMigration implements Migration
{
private String propertyKey;
private String deprecationMessage;
public SpecificPropertyMigration(String propertyKey, String deprecationMessage)
{
this.propertyKey = propertyKey;
this.deprecationMessage = deprecationMessage;
}
public boolean appliesTo(Map<String, String> rawConfiguration)
{
return rawConfiguration.containsKey(propertyKey);
}
public Map<String, String> apply(Map<String, String> rawConfiguration)
{
String value = rawConfiguration.get(propertyKey);
rawConfiguration.remove(propertyKey);
setValueWithOldSetting(value, rawConfiguration);
return rawConfiguration;
}
public String getDeprecationMessage()
{
return deprecationMessage;
}
public abstract void setValueWithOldSetting(String value, Map<String, String> rawConfiguration);
}
public static class PropertyRenamed extends SpecificPropertyMigration
{
private String newKey;
public PropertyRenamed(String oldKey, String newKey, String deprecationMessage)
{
super(oldKey, deprecationMessage);
this.newKey = newKey;
}
public void setValueWithOldSetting(String value, Map<String, String> rawConfiguration)
{
rawConfiguration.put(newKey, value);
}
}
public static class ConfigValueChanged implements Migration
{
private final String propertyKey;
private final String oldValue;
private final String newValue;
private final String message;
public ConfigValueChanged( String propertyKey, String oldValue, String newValue, String message )
{
this.propertyKey = propertyKey;
this.oldValue = oldValue;
this.newValue = newValue;
this.message = message;
}
@Override
public boolean appliesTo( Map<String, String> rawConfiguration )
{
return rawConfiguration.containsKey( propertyKey )
&& rawConfiguration.get( propertyKey ).equalsIgnoreCase( oldValue );
}
@Override
public Map<String, String> apply( Map<String, String> rawConfiguration )
{
rawConfiguration.put( propertyKey, newValue );
return rawConfiguration;
}
@Override
public String getDeprecationMessage()
{
return message;
}
}
public static Migration propertyRenamed(String oldKey, String newKey, String deprecationMessage)
{
return new PropertyRenamed(oldKey, newKey, deprecationMessage);
}
private List<Migration> migrations = new ArrayList<>();
public void add(Migration migration)
{
migrations.add(migration);
}
@Override
public Map<String, String> apply(Map<String, String> rawConfiguration, StringLogger log)
{
boolean printedDeprecationMessage = false;
for(Migration migration : migrations)
{
if(migration.appliesTo(rawConfiguration))
{
if(!printedDeprecationMessage)
{
printedDeprecationMessage = true;
log.warn( "WARNING! Deprecated configuration options used. See manual for details" );
}
rawConfiguration = migration.apply(rawConfiguration);
log.warn( migration.getDeprecationMessage() );
}
}
return rawConfiguration;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_BaseConfigurationMigrator.java
|
4,460
|
public abstract class KernelExtensionFactory<DEPENDENCIES> extends Service
{
protected KernelExtensionFactory( String key )
{
super( key );
}
/**
* Return the class that contains GraphDatabaseSetting fields that define
* the properties needed by this extension.
*
* @return a class or null if no settings are needed
*/
public Class getSettingsClass()
{
return null;
}
/**
* Create a new instance of this kernel extension.
*
* @param dependencies
* @return
*/
public abstract Lifecycle newKernelExtension( DEPENDENCIES dependencies )
throws Throwable;
@Override
public String toString()
{
return "KernelExtension:" + getClass().getSimpleName() + getKeys();
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensionFactory.java
|
4,461
|
public class GuardOperationsCountException extends GuardException
{
private final long opsCount;
public GuardOperationsCountException( final long opsCount )
{
super( String.format( "max ops (ops=%d)", opsCount ) );
this.opsCount = opsCount;
}
public long getOpsCount()
{
return opsCount;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_guard_GuardOperationsCountException.java
|
4,462
|
public class GuardException extends RuntimeException
{
protected GuardException( final String message )
{
super( message );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_guard_GuardException.java
|
4,463
|
public class Timeout implements GuardInternal
{
private final long valid;
private final long start;
private Timeout( final long valid )
{
this.valid = valid;
this.start = currentTimeMillis();
}
@Override
public void check()
{
if ( valid < currentTimeMillis() )
{
final long overtime = currentTimeMillis() - valid;
logger.logMessage( "guard-timeout:" + (valid - start) + "(+" + overtime + ")ms" );
throw new GuardTimeoutException( overtime );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_guard_Guard.java
|
4,464
|
public class OperationsCount implements GuardInternal
{
private final long max;
private long opsCount = 0;
private OperationsCount( final long max )
{
this.max = max;
}
@Override
public void check()
{
opsCount++;
if ( max < opsCount )
{
logger.logMessage( "guard-timeout: node-ops: more than " + max );
throw new GuardOperationsCountException( opsCount );
}
}
public long getOpsCount()
{
return opsCount;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_guard_Guard.java
|
4,465
|
public class Guard
{
private final ThreadLocal<GuardInternal> threadLocal = new ThreadLocal<GuardInternal>();
private final StringLogger logger;
public Guard( final StringLogger logger )
{
this.logger = logger;
}
public void check()
{
GuardInternal guardInternal = currentGuard();
if ( guardInternal != null )
{
guardInternal.check();
}
}
public <T extends GuardInternal> T currentGuard()
{
return (T) threadLocal.get();
}
public void startOperationsCount( final long maxOps )
{
start( new OperationsCount( maxOps ) );
}
public void startTimeout( final long validForInMilliSeconds )
{
final Timeout timeout = new Timeout( validForInMilliSeconds + currentTimeMillis() );
start( timeout );
}
public void start( final GuardInternal guard )
{
threadLocal.set( guard );
}
public <T extends GuardInternal> T stop()
{
T guardInternal = Guard.this.<T>currentGuard();
if ( guardInternal != null )
{
threadLocal.remove();
}
return guardInternal;
}
public interface GuardInternal
{
void check();
}
public class OperationsCount implements GuardInternal
{
private final long max;
private long opsCount = 0;
private OperationsCount( final long max )
{
this.max = max;
}
@Override
public void check()
{
opsCount++;
if ( max < opsCount )
{
logger.logMessage( "guard-timeout: node-ops: more than " + max );
throw new GuardOperationsCountException( opsCount );
}
}
public long getOpsCount()
{
return opsCount;
}
}
public class Timeout implements GuardInternal
{
private final long valid;
private final long start;
private Timeout( final long valid )
{
this.valid = valid;
this.start = currentTimeMillis();
}
@Override
public void check()
{
if ( valid < currentTimeMillis() )
{
final long overtime = currentTimeMillis() - valid;
logger.logMessage( "guard-timeout:" + (valid - start) + "(+" + overtime + ")ms" );
throw new GuardTimeoutException( overtime );
}
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_guard_Guard.java
|
4,466
|
{
@Override
public void handle( KernelExtensionFactory kernelExtensionFactory, UnsatisfiedDepencyException e )
{
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_UnsatisfiedDependencyStrategies.java
|
4,467
|
{
@Override
public void handle( KernelExtensionFactory kernelExtensionFactory, UnsatisfiedDepencyException e )
{
throw e;
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_UnsatisfiedDependencyStrategies.java
|
4,468
|
public class UnsatisfiedDependencyStrategies
{
public static UnsatisfiedDependencyStrategy fail()
{
return new UnsatisfiedDependencyStrategy()
{
@Override
public void handle( KernelExtensionFactory kernelExtensionFactory, UnsatisfiedDepencyException e )
{
throw e;
}
};
}
public static UnsatisfiedDependencyStrategy ignore()
{
return new UnsatisfiedDependencyStrategy()
{
@Override
public void handle( KernelExtensionFactory kernelExtensionFactory, UnsatisfiedDepencyException e )
{
}
};
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_UnsatisfiedDependencyStrategies.java
|
4,469
|
static class UnsatisfiedDepencyException extends RuntimeException
{
public UnsatisfiedDepencyException( Throwable cause )
{
super( cause );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,470
|
private static class TypeFilter<T> implements Predicate
{
private final Class<T> type;
public TypeFilter( Class<T> type )
{
this.type = type;
}
@Override
public boolean accept( Object extension )
{
return type.isInstance( extension );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,471
|
private class KernelExtensionHandler
implements InvocationHandler
{
@Override
public Object invoke( Object proxy, Method method, Object[] args ) throws Throwable
{
try
{
return dependencyResolver.resolveDependency( method.getReturnType() );
}
catch ( IllegalArgumentException e )
{
throw new UnsatisfiedDepencyException( e );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,472
|
private class CastFunction<T> implements Function<Object, T>
{
private final Class<T> type;
public CastFunction( Class<T> type )
{
this.type = type;
}
@Override
public T apply( Object o )
{
return type.cast( o );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,473
|
{
@Override
public void notify( KernelExtensionListener listener )
{
listener.stoppingKernelExtension( instance );
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,474
|
{
@Override
public void notify( KernelExtensionListener listener )
{
listener.startedKernelExtension( instance );
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,475
|
{
@Override
public void notifyStatusChanged( final Object instance, LifecycleStatus from, LifecycleStatus to )
{
if ( to.equals( LifecycleStatus.STARTED ) )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<KernelExtensionListener>()
{
@Override
public void notify( KernelExtensionListener listener )
{
listener.startedKernelExtension( instance );
}
} );
}
else if ( to.equals( LifecycleStatus.STOPPING ) )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<KernelExtensionListener>()
{
@Override
public void notify( KernelExtensionListener listener )
{
listener.stoppingKernelExtension( instance );
}
} );
}
}
} );
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,476
|
public class KernelExtensions extends DependencyResolver.Adapter implements Lifecycle
{
private final List<KernelExtensionFactory<?>> kernelExtensionFactories;
private final DependencyResolver dependencyResolver;
private final LifeSupport life = new LifeSupport();
private final Map<Iterable<String>, Lifecycle> extensions = new HashMap<Iterable<String>, Lifecycle>();
private Iterable<KernelExtensionListener> listeners = Listeners.newListeners();
private final UnsatisfiedDependencyStrategy unsatisfiedDepencyStrategy;
public KernelExtensions( Iterable<KernelExtensionFactory<?>> kernelExtensionFactories, Config config,
DependencyResolver dependencyResolver, UnsatisfiedDependencyStrategy unsatisfiedDepencyStrategy )
{
this.unsatisfiedDepencyStrategy = unsatisfiedDepencyStrategy;
this.kernelExtensionFactories = Iterables.addAll( new ArrayList<KernelExtensionFactory<?>>(),
kernelExtensionFactories );
this.dependencyResolver = dependencyResolver;
life.addLifecycleListener( new LifecycleListener()
{
@Override
public void notifyStatusChanged( final Object instance, LifecycleStatus from, LifecycleStatus to )
{
if ( to.equals( LifecycleStatus.STARTED ) )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<KernelExtensionListener>()
{
@Override
public void notify( KernelExtensionListener listener )
{
listener.startedKernelExtension( instance );
}
} );
}
else if ( to.equals( LifecycleStatus.STOPPING ) )
{
Listeners.notifyListeners( listeners, new Listeners.Notification<KernelExtensionListener>()
{
@Override
public void notify( KernelExtensionListener listener )
{
listener.stoppingKernelExtension( instance );
}
} );
}
}
} );
}
@Override
public void init() throws Throwable
{
for ( KernelExtensionFactory kernelExtensionFactory : kernelExtensionFactories )
{
Object configuration = getKernelExtensionDependencies( kernelExtensionFactory );
try
{
extensions.put( kernelExtensionFactory.getKeys(),
life.add( kernelExtensionFactory.newKernelExtension( configuration ) ) );
}
catch ( UnsatisfiedDepencyException e )
{
unsatisfiedDepencyStrategy.handle( kernelExtensionFactory, e );
}
}
life.init();
}
@Override
public void start() throws Throwable
{
life.start();
}
@Override
public void stop() throws Throwable
{
life.stop();
}
@Override
public void shutdown() throws Throwable
{
life.shutdown();
}
public boolean isRegistered( Class<?> kernelExtensionFactoryClass )
{
for ( KernelExtensionFactory<?> kernelExtensionFactory : kernelExtensionFactories )
{
if ( kernelExtensionFactoryClass.isInstance( kernelExtensionFactory ) )
{
return true;
}
}
return false;
}
public synchronized void addKernelExtension( KernelExtensionFactory kernelExtensionFactory )
{
// Check that it is not already registered
if ( kernelExtensionFactories.contains( kernelExtensionFactory ) )
{
return;
}
Lifecycle extension = null;
try
{
extension = kernelExtensionFactory.newKernelExtension( getKernelExtensionDependencies(
kernelExtensionFactory ) );
extensions.put( kernelExtensionFactory.getKeys(), extension );
// Add to list of current factories
kernelExtensionFactories.add( kernelExtensionFactory );
}
catch ( Throwable throwable )
{
throw new LifecycleException( extension, LifecycleStatus.NONE, LifecycleStatus.INITIALIZING, throwable );
}
life.add( extension );
}
public synchronized void removeKernelExtension( KernelExtensionFactory kernelExtensionFactory )
{
Lifecycle extension = extensions.remove( kernelExtensionFactory.getKeys() );
if ( extension != null )
{
kernelExtensionFactories.remove( kernelExtensionFactory );
life.remove( extension );
}
}
public void addKernelExtensionListener( KernelExtensionListener listener )
{
listeners = Listeners.addListener( listener, listeners );
// Notify listener about already started instances
if ( life.getStatus().equals( LifecycleStatus.STARTED ) )
{
for ( Lifecycle extension : life.getLifecycleInstances() )
{
listener.startedKernelExtension( extension );
}
}
}
public void removeKernelExtensionListener( KernelExtensionListener listener )
{
listeners = Listeners.removeListener( listener, listeners );
}
@Override
public <T> T resolveDependency( final Class<T> type, SelectionStrategy selector ) throws IllegalArgumentException
{
Iterable<Lifecycle> filtered = filter( new TypeFilter( type ), life.getLifecycleInstances() );
Iterable<T> casted = map( new CastFunction( type ), filtered );
return selector.select( type, casted );
}
private Object getKernelExtensionDependencies( KernelExtensionFactory<?> factory )
{
Class configurationClass = (Class) ((ParameterizedType) factory.getClass().getGenericSuperclass())
.getActualTypeArguments()[0];
return Proxy.newProxyInstance( configurationClass.getClassLoader(), new Class[]{configurationClass},
new KernelExtensionHandler() );
}
public Iterable<KernelExtensionFactory<?>> listFactories()
{
return kernelExtensionFactories;
}
private static class TypeFilter<T> implements Predicate
{
private final Class<T> type;
public TypeFilter( Class<T> type )
{
this.type = type;
}
@Override
public boolean accept( Object extension )
{
return type.isInstance( extension );
}
}
private class KernelExtensionHandler
implements InvocationHandler
{
@Override
public Object invoke( Object proxy, Method method, Object[] args ) throws Throwable
{
try
{
return dependencyResolver.resolveDependency( method.getReturnType() );
}
catch ( IllegalArgumentException e )
{
throw new UnsatisfiedDepencyException( e );
}
}
}
private class CastFunction<T> implements Function<Object, T>
{
private final Class<T> type;
public CastFunction( Class<T> type )
{
this.type = type;
}
@Override
public T apply( Object o )
{
return type.cast( o );
}
}
static class UnsatisfiedDepencyException extends RuntimeException
{
public UnsatisfiedDepencyException( Throwable cause )
{
super( cause );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensions.java
|
4,477
|
public abstract class KernelExtensionUtil
{
private KernelExtensionUtil()
{ // No instances allowed
}
public static String servicesClassPathEntryInformation()
{
String separator = System.lineSeparator();
StringBuilder result = new StringBuilder( "Kernel extensions available on classpath: " );
StringBuilder classPath = new StringBuilder();
for ( String entry : getProperty( "java.class.path" ).split( pathSeparator ) )
{
classPath.append( separator ).append( " " ).append( entry );
File entryFile = new File( entry );
if ( entryFile.isDirectory() )
{ // Might we have a directory containing META-INF/services here?
File servicesDir = new File( new File( entryFile, "META-INF" ), "services" );
if ( servicesDir.exists() )
{
result.append( separator ).append(
"Listing service files and kernel extensions where possible in " + servicesDir + ":" );
for ( File serviceFile : servicesDir.listFiles() )
{
if ( serviceFile.isFile() )
{
result.append( separator ).append( " " ).append( serviceFile.getName() );
}
}
File extensionsFile = new File( servicesDir, KernelExtensionFactory.class.getName() );
if ( extensionsFile.exists() )
{
appendKernelExtensionsList( extensionsFile, result, separator + " + " );
}
}
}
}
return result.append( separator ).append( separator )
.append( "Class path entries:" ).append( classPath ).toString();
}
private static void appendKernelExtensionsList( File file, StringBuilder to, String separator )
{
try ( BufferedReader reader = new BufferedReader( new FileReader( file ) ) )
{
String line = null;
while ( (line = reader.readLine()) != null )
{
boolean exists = tryLoadClass( line );
to.append( separator ).append( line ).append( " (" + (exists ? "exists" : "DOES NOT exist") + ")" );
}
}
catch ( IOException e )
{
to.append( "Couldn't read due to " + e.getMessage() );
}
}
private static boolean tryLoadClass( String className )
{
try
{
Class.forName( className );
return true;
}
catch ( ClassNotFoundException e )
{
return false;
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensionUtil.java
|
4,478
|
class Adapter implements KernelExtensionListener
{
@Override
public void startedKernelExtension( Object extension )
{
}
@Override
public void stoppingKernelExtension( Object extension )
{
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_extension_KernelExtensionListener.java
|
4,479
|
public abstract class KernelExtensionFactoryContractTest
{
protected final Class<? extends KernelExtensionFactory<?>> extClass;
private final String key;
private final TargetDirectory target;
public KernelExtensionFactoryContractTest( String key, Class<? extends KernelExtensionFactory<?>> extClass )
{
this.target = TargetDirectory.forTest( getClass() );
this.extClass = extClass;
this.key = key;
}
public GraphDatabaseAPI graphdb( String name, int instance )
{
Map<String, String> config = configuration( true, instance );
return (GraphDatabaseAPI) new TestGraphDatabaseFactory().newImpermanentDatabaseBuilder().setConfig( config ).newGraphDatabase();
}
/**
* Override to create default configuration for the {@link org.neo4j.kernel.extension.KernelExtensionFactory}
* under test.
*
* @param shouldLoad <code>true</code> if configuration that makes the
* extension load should be created, <code>false</code> if
* configuration that makes the extension not load should be
* created.
* @param instance used for differentiating multiple instances that will run
* simultaneously.
* @return configuration for an {@link org.neo4j.kernel.EmbeddedGraphDatabase} that
*/
protected Map<String, String> configuration( boolean shouldLoad, int instance )
{
return MapUtil.stringMap();
}
static KernelExtensions getExtensions( GraphDatabaseService graphdb )
{
return ((GraphDatabaseAPI) graphdb).getDependencyResolver().resolveDependency( KernelExtensions.class );
}
@Test
public void extensionShouldHavePublicNoArgConstructor() throws Exception
{
KernelExtensionFactory<?> instance = null;
try
{
instance = newInstance();
}
catch ( IllegalArgumentException failure )
{
failure.printStackTrace();
fail( "Contract violation: extension class must have public no-arg constructor (Exception in stderr)" );
}
assertNotNull( instance );
}
@Test
public void shouldBeAbleToLoadExtensionAsAServiceProvider() throws Exception
{
KernelExtensionFactory<?> instance = null;
try
{
instance = loadInstance();
}
catch ( ClassCastException failure )
{
failure.printStackTrace();
fail( "Loaded instance does not match the extension class (Exception in stderr)" );
}
assertNotNull( "Could not load the kernel extension with the provided key", instance );
assertTrue( "Class of the loaded instance is a subclass of the extension class",
instance.getClass() == extClass );
}
@Test
public void differentInstancesShouldHaveEqualHashCodesAndBeEqual() throws Exception
{
KernelExtensionFactory<?> one = newInstance();
KernelExtensionFactory<?> two = newInstance();
assertEquals( "new instances have different hash codes", one.hashCode(), two.hashCode() );
assertEquals( "new instances are not equals", one, two );
one = loadInstance();
two = loadInstance();
assertEquals( "loaded instances have different hash codes", one.hashCode(), two.hashCode() );
assertEquals( "loaded instances are not equals", one, two );
one = loadInstance();
two = newInstance();
assertEquals( "loaded instance and new instance have different hash codes", one.hashCode(), two.hashCode() );
assertEquals( "loaded instance and new instance are not equals", one, two );
}
@Test
public void canLoadKernelExtension() throws Exception
{
GraphDatabaseService graphdb = graphdb( "graphdb", 0 );
try
{
assertTrue( "Failed to load extension", getExtensions( graphdb ).isRegistered( extClass ) );
}
finally
{
graphdb.shutdown();
}
}
private final KernelExtensionFactory<?> newInstance()
{
try
{
return extClass.newInstance();
}
catch ( Exception cause )
{
throw new IllegalArgumentException( "Could not instantiate extension class", cause );
}
}
protected final KernelExtensionFactory<?> loadInstance()
{
return extClass.cast( Service.load( KernelExtensionFactory.class, key ) );
}
}
| false
|
community_kernel_src_test_java_org_neo4j_kernel_extension_KernelExtensionFactoryContractTest.java
|
4,480
|
public static class ConfigValueChanged implements Migration
{
private final String propertyKey;
private final String oldValue;
private final String newValue;
private final String message;
public ConfigValueChanged( String propertyKey, String oldValue, String newValue, String message )
{
this.propertyKey = propertyKey;
this.oldValue = oldValue;
this.newValue = newValue;
this.message = message;
}
@Override
public boolean appliesTo( Map<String, String> rawConfiguration )
{
return rawConfiguration.containsKey( propertyKey )
&& rawConfiguration.get( propertyKey ).equalsIgnoreCase( oldValue );
}
@Override
public Map<String, String> apply( Map<String, String> rawConfiguration )
{
rawConfiguration.put( propertyKey, newValue );
return rawConfiguration;
}
@Override
public String getDeprecationMessage()
{
return message;
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_configuration_BaseConfigurationMigrator.java
|
4,481
|
{
@Override
public Map<String, String> apply( Map<String, String> rawConfiguration, StringLogger log )
{
wasCalled.set( true );
return rawConfiguration;
}
};
| false
|
community_kernel_src_test_java_org_neo4j_kernel_configuration_AnnotationBasedConfigurationMigratorTest.java
|
4,482
|
final class BooleanProperty extends DefinedProperty
{
private final boolean value;
BooleanProperty( int propertyKeyId, boolean value )
{
super( propertyKeyId );
this.value = value;
}
@Override
@SuppressWarnings("UnnecessaryUnboxing")
public boolean valueEquals( Object other )
{
return other instanceof Boolean && value == ((Boolean) other).booleanValue();
}
@Override
public Boolean value()
{
return value;
}
@Override
int valueHash()
{
return value ? -1 : 0;
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return value == ((BooleanProperty) that).value;
}
@Override
public boolean booleanValue()
{
return value;
}
@Override
public boolean booleanValue( boolean defaultValue )
{
return value;
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( 8 );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_BooleanProperty.java
|
4,483
|
FLOAT
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (float[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof float[] && Arrays.equals( (float[]) array1, (float[]) array2 );
}
@Override
Object clone( Object array )
{
return ((float[])array).clone();
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,484
|
SHORT
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (short[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof short[] && Arrays.equals( (short[]) array1, (short[]) array2 );
}
@Override
Object clone( Object array )
{
return ((short[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,485
|
STRING
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (String[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof String[] && Arrays.equals( (String[]) array1, (String[]) array2 );
}
@Override
Object clone( Object array )
{
return ((String[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,486
|
DOUBLE
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (double[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof double[] && Arrays.equals( (double[]) array1, (double[]) array2 );
}
@Override
Object clone( Object array )
{
return ((double[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,487
|
BYTE
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (byte[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof byte[] && Arrays.equals( (byte[]) array1, (byte[]) array2 );
}
@Override
Object clone( Object array )
{
return ((byte[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,488
|
BOOLEAN
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (boolean[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof boolean[] && Arrays.equals( (boolean[]) array1, (boolean[]) array2 );
}
@Override
Object clone( Object array )
{
return ((boolean[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,489
|
LONG
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (long[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof long[] && Arrays.equals( (long[]) array1, (long[]) array2 );
}
@Override
Object clone( Object array )
{
return ((long[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,490
|
INT
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (int[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof int[] && Arrays.equals( (int[]) array1, (int[]) array2 );
}
@Override
Object clone( Object array )
{
return ((int[])array).clone();
}
},
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,491
|
class LazyArrayProperty extends LazyProperty<Object>
{
/*
* Access to this field needs synchronization, since it must be safe for use from multiple threads.
* The synchronization of this field is carefully designed to be implicit.
*
*
* assuming: produceValue() is called under synchronization - this is where this field is written.
* produceValue() is called *before* assigning the volatile LazyProperty.value field
* (still under synchronization)
* assuming: value member field is volatile, so accessing it implies the required read barrier.
* type doesn't need to be volatile since any call path to it first reads value,
* it's ALWAYS written before value, implying write barrier, and read after value, implying read barrier.
*/
private Type type;
LazyArrayProperty( int propertyKeyId, final Callable<?> producer )
{
super( propertyKeyId, producer );
}
@Override
protected Object produceValue()
{
// this method is called under synchronization, before assigning LazyProperty.value ...
Object value = super.produceValue();
this.type = Type.from( value ); // ... so assigning type is safe
return value;
}
@Override
public boolean valueEquals( Object value )
{
Object myValue = value(); // value() accesses LazyProperty.value, implying a read barrier ...
return type.equals( myValue, value ); // ... so accessing type is safe
}
@Override
int valueHash()
{
Object myValue = value(); // value() accesses LazyProperty.value, implying a read barrier ...
return type.hashCode( myValue ); // ... so accessing type is safe
}
@Override
protected Object castAndPrepareForReturn( Object value )
{
// this method is invoked after accessing LazyProperty.value, implying a read barrier ...
return type.clone( value ); // ... so accessing type is safe
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return super.sizeOfObjectInBytesIncludingOverhead() + SizeOfs.REFERENCE_SIZE;
}
private enum Type
{
INT
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (int[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof int[] && Arrays.equals( (int[]) array1, (int[]) array2 );
}
@Override
Object clone( Object array )
{
return ((int[])array).clone();
}
},
LONG
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (long[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof long[] && Arrays.equals( (long[]) array1, (long[]) array2 );
}
@Override
Object clone( Object array )
{
return ((long[])array).clone();
}
},
BOOLEAN
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (boolean[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof boolean[] && Arrays.equals( (boolean[]) array1, (boolean[]) array2 );
}
@Override
Object clone( Object array )
{
return ((boolean[])array).clone();
}
},
BYTE
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (byte[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof byte[] && Arrays.equals( (byte[]) array1, (byte[]) array2 );
}
@Override
Object clone( Object array )
{
return ((byte[])array).clone();
}
},
DOUBLE
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (double[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof double[] && Arrays.equals( (double[]) array1, (double[]) array2 );
}
@Override
Object clone( Object array )
{
return ((double[])array).clone();
}
},
STRING
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (String[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof String[] && Arrays.equals( (String[]) array1, (String[]) array2 );
}
@Override
Object clone( Object array )
{
return ((String[])array).clone();
}
},
SHORT
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (short[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof short[] && Arrays.equals( (short[]) array1, (short[]) array2 );
}
@Override
Object clone( Object array )
{
return ((short[])array).clone();
}
},
CHAR
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (char[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof char[] && Arrays.equals( (char[]) array1, (char[]) array2 );
}
@Override
Object clone( Object array )
{
return ((char[])array).clone();
}
},
FLOAT
{
@Override
int hashCode( Object array )
{
return Arrays.hashCode( (float[]) array );
}
@Override
boolean equals( Object array1, Object array2 )
{
return array2 instanceof float[] && Arrays.equals( (float[]) array1, (float[]) array2 );
}
@Override
Object clone( Object array )
{
return ((float[])array).clone();
}
};
abstract int hashCode( Object array );
abstract boolean equals( Object array1, Object array2 );
abstract Object clone( Object array );
public static Type from( Object array )
{
if ( !array.getClass().isArray() )
{
throw new IllegalArgumentException( array + " is not an array, it's a " + array.getClass() );
}
if ( array instanceof int[] )
{
return INT;
}
if ( array instanceof long[] )
{
return LONG;
}
if ( array instanceof boolean[] )
{
return BOOLEAN;
}
if ( array instanceof byte[] )
{
return BYTE;
}
if ( array instanceof double[] )
{
return DOUBLE;
}
if ( array instanceof String[] )
{
return STRING;
}
if ( array instanceof short[] )
{
return SHORT;
}
if ( array instanceof char[] )
{
return CHAR;
}
if ( array instanceof float[] )
{
return FLOAT;
}
throw new IllegalArgumentException( "Unrecognized array type " + array.getClass().getComponentType() );
}
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_LazyArrayProperty.java
|
4,492
|
final class IntProperty extends DefinedProperty
{
private final int value;
IntProperty( int propertyKeyId, int value )
{
super(propertyKeyId);
this.value = value;
}
@Override
@SuppressWarnings("UnnecessaryUnboxing")
public boolean valueEquals( Object other )
{
if ( other instanceof Integer )
{
return value == ((Integer)other).intValue();
}
return valueCompare( value, other );
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return value == ((IntProperty) that).value;
}
@Override
public Integer value()
{
return value;
}
@Override
int valueHash()
{
return value;
}
@Override
public int intValue()
{
return value;
}
@Override
public long longValue()
{
return value;
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( 8 );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_IntProperty.java
|
4,493
|
class IntArrayProperty extends DefinedProperty
{
private final int[] value;
IntArrayProperty( int propertyKeyId, int[] value )
{
super( propertyKeyId );
assert value != null;
this.value = value;
}
@Override
public int[] value()
{
return value.clone();
}
@Override
public boolean valueEquals( Object value )
{
if ( value instanceof int[] )
{
return Arrays.equals( this.value, (int[]) value );
}
return valueCompare( this.value, value );
}
@Override
int valueHash()
{
return Arrays.hashCode( value );
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return Arrays.equals( this.value, ((IntArrayProperty) that).value );
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( withReference( withArrayOverhead( value.length*4 ) ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_IntArrayProperty.java
|
4,494
|
final class FloatProperty extends DefinedProperty
{
private final float value;
FloatProperty( int propertyKeyId, float value )
{
super( propertyKeyId );
this.value = value;
}
@Override
@SuppressWarnings("UnnecessaryUnboxing")
public boolean valueEquals( Object other )
{
if ( other instanceof Float )
{
return value == ((Float) other).floatValue();
}
return valueCompare( value, other );
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return value == ((FloatProperty) that).value;
}
@Override
public Number value()
{
return value;
}
@Override
int valueHash()
{
return floatToIntBits( value );
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( 8 );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_FloatProperty.java
|
4,495
|
class FloatArrayProperty extends DefinedProperty
{
private final float[] value;
FloatArrayProperty( int propertyKeyId, float[] value )
{
super( propertyKeyId );
assert value != null;
this.value = value;
}
@Override
public float[] value()
{
return value.clone();
}
@Override
public boolean valueEquals( Object value )
{
if ( value instanceof float[] )
{
return Arrays.equals( this.value, (float[]) value );
}
return valueCompare( this.value, value );
}
@Override
int valueHash()
{
return Arrays.hashCode( value );
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return Arrays.equals( this.value, ((FloatArrayProperty)that).value );
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( 4 + withReference( sizeOfArray( value ) ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_FloatArrayProperty.java
|
4,496
|
final class DoubleProperty extends DefinedProperty
{
private final double value;
DoubleProperty( int propertyKeyId, double value )
{
super( propertyKeyId );
this.value = value;
}
@Override
@SuppressWarnings("UnnecessaryUnboxing")
public boolean valueEquals( Object other )
{
if ( other instanceof Double )
{
return value == ((Double) other).doubleValue();
}
return valueCompare( value, other );
}
@Override
public Double value()
{
return value;
}
@Override
int valueHash()
{
long temp = Double.doubleToLongBits( value );
return (int) (temp ^ (temp >>> 32));
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return Double.compare( this.value, ((DoubleProperty) that).value ) == 0;
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( 8 );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_DoubleProperty.java
|
4,497
|
class DoubleArrayProperty extends DefinedProperty
{
private final double[] value;
DoubleArrayProperty( int propertyKeyId, double[] value )
{
super( propertyKeyId );
assert value != null;
this.value = value;
}
@Override
public double[] value()
{
return value.clone();
}
@Override
public boolean valueEquals( Object value )
{
if ( value instanceof double[] )
{
return Arrays.equals( this.value, (double[]) value );
}
return valueCompare( this.value, value );
}
@Override
int valueHash()
{
return Arrays.hashCode( value );
}
@Override
boolean hasEqualValue( DefinedProperty that )
{
return Arrays.equals( this.value, ((DoubleArrayProperty)that).value );
}
@Override
public int sizeOfObjectInBytesIncludingOverhead()
{
return withObjectOverhead( withReference( sizeOfArray( value ) ) );
}
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_DoubleArrayProperty.java
|
4,498
|
{
@Override
public boolean typeEquals( Class<?> firstType, Class<?> otherType )
{ // Not always true, but we won't let type differences affect the outcome at this stage,
// since many types are compatible in this property comparison.
return true;
}
@Override
public boolean itemEquals( Object lhs, Object rhs )
{
return compareValues( lhs, rhs );
}
};
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_DefinedProperty.java
|
4,499
|
public abstract class DefinedProperty extends Property implements SizeOfObject
{
@Override
public boolean isDefined()
{
return true;
}
@Override
public abstract Object value();
@Override
public Object value( Object defaultValue )
{
return value();
}
@Override
public String toString()
{
return getClass().getSimpleName() + "[propertyKeyId=" + propertyKeyId() + ", value=" + valueAsString() + "]";
}
@Override
public final boolean equals( Object o )
{
if ( this == o )
{
return true;
}
if ( o != null && getClass() == o.getClass() )
{
DefinedProperty that = (DefinedProperty) o;
return this.propertyKeyId == that.propertyKeyId && hasEqualValue( that );
}
return false;
}
@Override
public final int hashCode()
{
return propertyKeyId ^ valueHash();
}
abstract int valueHash();
abstract boolean hasEqualValue( DefinedProperty that );
@Override
public String valueAsString()
{
Object value = value();
if ( value.getClass().isArray() )
{
return ArrayUtil.toString( value );
}
return value.toString();
}
@Override
public String stringValue()
{
Object value = value();
throw new ClassCastException(
String.format( "[%s:%s] is not a String", value, value.getClass().getSimpleName() ) );
}
@Override
public String stringValue( String defaultValue )
{
return stringValue();
}
@Override
public Number numberValue()
{
Object value = value();
throw new ClassCastException(
String.format( "[%s:%s] is not a Number", value, value.getClass().getSimpleName() ) );
}
@Override
public Number numberValue( Number defaultValue )
{
return numberValue();
}
@Override
public int intValue()
{
Object value = value();
throw new ClassCastException(
String.format( "[%s:%s] is not an int", value, value.getClass().getSimpleName() ) );
}
@Override
public int intValue( int defaultValue )
{
return intValue();
}
@Override
public long longValue()
{
Object value = value();
throw new ClassCastException(
String.format( "[%s:%s] is not a long", value, value.getClass().getSimpleName() ) );
}
@Override
public long longValue( long defaultValue )
{
return longValue();
}
@Override
public boolean booleanValue()
{
Object value = value();
throw new ClassCastException(
String.format( "[%s:%s] is not a boolean", value, value.getClass().getSimpleName() ) );
}
@Override
public boolean booleanValue( boolean defaultValue )
{
return booleanValue();
}
DefinedProperty( int propertyKeyId )
{
super( propertyKeyId );
}
protected boolean valueCompare( Object lhs, Object rhs )
{
return compareValues( lhs, rhs );
}
private static boolean compareValues( Object lhs, Object rhs )
{
// COMPARE NUMBERS
if ( lhs instanceof Number && rhs instanceof Number )
{
return compareNumbers( (Number) lhs, (Number) rhs );
}
// COMPARE STRINGS
if ( (lhs instanceof String || lhs instanceof Character) &&
(rhs instanceof String || rhs instanceof Character) )
{
return lhs.toString().equals( rhs.toString() );
}
// COMPARE BOOLEANS
if ( lhs instanceof Boolean && rhs instanceof Boolean )
{
return compareBooleans( (Boolean) lhs, (Boolean) rhs );
}
// COMPARE ARRAYS
if ( lhs.getClass().isArray() && rhs.getClass().isArray() )
{
return ArrayUtil.equals( lhs, rhs, PROPERTY_EQUALITY );
}
return false;
}
private static boolean compareBooleans( Boolean lhs, Boolean rhs )
{
return lhs.equals( rhs );
}
private static boolean compareNumbers( Number aNumber, Number bNumber )
{
// If any of the two are non-integers
if ( aNumber instanceof Float
|| bNumber instanceof Float
|| aNumber instanceof Double
|| bNumber instanceof Double )
{
double b = bNumber.doubleValue();
double a = aNumber.doubleValue();
return a == b;
}
return aNumber.longValue() == bNumber.longValue();
}
private static final ArrayEquality PROPERTY_EQUALITY = new ArrayEquality()
{
@Override
public boolean typeEquals( Class<?> firstType, Class<?> otherType )
{ // Not always true, but we won't let type differences affect the outcome at this stage,
// since many types are compatible in this property comparison.
return true;
}
@Override
public boolean itemEquals( Object lhs, Object rhs )
{
return compareValues( lhs, rhs );
}
};
}
| false
|
community_kernel_src_main_java_org_neo4j_kernel_api_properties_DefinedProperty.java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.