idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
140,900
public Object command ( final OCommandRequestText iCommand ) { final OCommandExecutor executor = OCommandManager . instance ( ) . getExecutor ( iCommand ) ; executor . setProgressListener ( iCommand . getProgressListener ( ) ) ; executor . parse ( iCommand ) ; return executeCommand ( iCommand , executor ) ; }
Executes the command request and return the result back .
75
11
140,901
public long pushPosition ( final long iPosition ) throws IOException { final int position = getHoles ( ) * RECORD_SIZE ; file . allocateSpace ( RECORD_SIZE ) ; file . writeLong ( position , iPosition ) ; if ( OLogManager . instance ( ) . isDebugEnabled ( ) ) OLogManager . instance ( ) . debug ( this , "Pushed new hole %s/#%d -> #%d:%d" , owner . getName ( ) , position / RECORD_SIZE , owner . getId ( ) , iPosition ) ; return position ; }
Append the hole to the end of segment
127
9
140,902
public long popLastEntryPosition ( ) throws IOException { // BROWSE IN ASCENDING ORDER UNTIL A GOOD POSITION IS FOUND (!=-1) for ( int pos = getHoles ( ) - 1 ; pos >= 0 ; -- pos ) { final long recycledPosition = file . readLong ( pos * RECORD_SIZE ) ; if ( recycledPosition > - 1 ) { if ( OLogManager . instance ( ) . isDebugEnabled ( ) ) OLogManager . instance ( ) . debug ( this , "Recycled hole %s/#%d -> #%d:%d" , owner . getName ( ) , pos , owner . getId ( ) , recycledPosition ) ; // SHRINK THE FILE file . removeTail ( ( getHoles ( ) - pos ) * RECORD_SIZE ) ; return recycledPosition ; } } return - 1 ; }
Returns and remove the recycled position if any .
189
9
140,903
public boolean removeEntryWithPosition ( final long iPosition ) throws IOException { // BROWSE IN ASCENDING ORDER UNTIL THE REQUESTED POSITION IS FOUND boolean canShrink = true ; for ( int pos = getHoles ( ) - 1 ; pos >= 0 ; -- pos ) { final long recycledPosition = file . readLong ( pos * RECORD_SIZE ) ; if ( recycledPosition == iPosition ) { if ( OLogManager . instance ( ) . isDebugEnabled ( ) ) OLogManager . instance ( ) . debug ( this , "Removing hole #%d containing the position #%d:%d" , pos , owner . getId ( ) , recycledPosition ) ; file . writeLong ( pos * RECORD_SIZE , - 1 ) ; if ( canShrink ) // SHRINK THE FILE file . removeTail ( ( getHoles ( ) - pos ) * RECORD_SIZE ) ; return true ; } else if ( iPosition != - 1 ) // NO NULL ENTRY: CAN'T SHRINK WITHOUT LOST OF ENTRIES canShrink = false ; } return false ; }
Removes a hole . Called on transaction recover to invalidate a delete for a record . Try to shrink the file if the invalidated entry is not in the middle of valid entries .
247
37
140,904
public void requestModificationLock ( ) { lock . readLock ( ) . lock ( ) ; if ( ! veto ) return ; if ( throwException ) { lock . readLock ( ) . unlock ( ) ; throw new OModificationOperationProhibitedException ( "Modification requests are prohibited" ) ; } boolean wasInterrupted = false ; Thread thread = Thread . currentThread ( ) ; waiters . add ( thread ) ; while ( veto ) { LockSupport . park ( this ) ; if ( Thread . interrupted ( ) ) wasInterrupted = true ; } waiters . remove ( thread ) ; if ( wasInterrupted ) thread . interrupt ( ) ; }
Tells the lock that thread is going to perform data modifications in storage . This method allows to perform several data modifications in parallel .
138
26
140,905
public OClass setSuperClass ( final OClass iSuperClass ) { getDatabase ( ) . checkSecurity ( ODatabaseSecurityResources . SCHEMA , ORole . PERMISSION_UPDATE ) ; final String cmd = String . format ( "alter class %s superclass %s" , name , iSuperClass . getName ( ) ) ; getDatabase ( ) . command ( new OCommandSQL ( cmd ) ) . execute ( ) ; setSuperClassInternal ( iSuperClass ) ; return this ; }
Set the super class .
106
5
140,906
private OClass addBaseClasses ( final OClass iBaseClass ) { if ( baseClasses == null ) baseClasses = new ArrayList < OClass > ( ) ; if ( baseClasses . contains ( iBaseClass ) ) return this ; baseClasses . add ( iBaseClass ) ; // ADD CLUSTER IDS OF BASE CLASS TO THIS CLASS AND ALL SUPER-CLASSES OClassImpl currentClass = this ; while ( currentClass != null ) { currentClass . addPolymorphicClusterIds ( ( OClassImpl ) iBaseClass ) ; currentClass = ( OClassImpl ) currentClass . getSuperClass ( ) ; } return this ; }
Adds a base class to the current one . It adds also the base class cluster ids to the polymorphic cluster ids array .
144
27
140,907
public void truncate ( ) throws IOException { getDatabase ( ) . checkSecurity ( ODatabaseSecurityResources . CLASS , ORole . PERMISSION_UPDATE ) ; getDatabase ( ) . getStorage ( ) . callInLock ( new Callable < Object > ( ) { public Object call ( ) throws Exception { for ( int id : clusterIds ) { getDatabase ( ) . getStorage ( ) . getClusterById ( id ) . truncate ( ) ; } for ( OIndex < ? > index : getClassIndexes ( ) ) { index . clear ( ) ; } return null ; } } , true ) ; }
Truncates all the clusters the class uses .
134
10
140,908
private void addPolymorphicClusterIds ( final OClassImpl iBaseClass ) { boolean found ; for ( int i : iBaseClass . polymorphicClusterIds ) { found = false ; for ( int k : polymorphicClusterIds ) { if ( i == k ) { found = true ; break ; } } if ( ! found ) { // ADD IT polymorphicClusterIds = OArrays . copyOf ( polymorphicClusterIds , polymorphicClusterIds . length + 1 ) ; polymorphicClusterIds [ polymorphicClusterIds . length - 1 ] = i ; Arrays . sort ( polymorphicClusterIds ) ; } } }
Add different cluster id to the polymorphic cluster ids array .
151
13
140,909
protected long [ ] allocateSpace ( final int iRecordSize ) throws IOException { // IT'S PREFEREABLE TO FIND SPACE WITHOUT ENLARGE ANY FILES: FIND THE FIRST FILE WITH FREE SPACE TO USE OFile file ; for ( int i = 0 ; i < files . length ; ++ i ) { file = files [ i ] ; if ( file . getFreeSpace ( ) >= iRecordSize ) // FOUND: RETURN THIS OFFSET return new long [ ] { i , file . allocateSpace ( iRecordSize ) } ; } // NOT FOUND: CHECK IF CAN OVERSIZE SOME FILES for ( int i = 0 ; i < files . length ; ++ i ) { file = files [ i ] ; if ( file . canOversize ( iRecordSize ) ) { // FOUND SPACE: ENLARGE IT return new long [ ] { i , file . allocateSpace ( iRecordSize ) } ; } } // TRY TO CREATE A NEW FILE if ( maxSize > 0 && getSize ( ) >= maxSize ) // OUT OF MAX SIZE throw new OStorageException ( "Unable to allocate the requested space of " + iRecordSize + " bytes because the segment is full: max-Size=" + maxSize + ", currentSize=" + getFilledUpTo ( ) ) ; // COPY THE OLD ARRAY TO THE NEW ONE OFile [ ] newFiles = new OFile [ files . length + 1 ] ; for ( int i = 0 ; i < files . length ; ++ i ) newFiles [ i ] = files [ i ] ; files = newFiles ; // CREATE THE NEW FILE AND PUT IT AS LAST OF THE ARRAY file = createNewFile ( ) ; file . allocateSpace ( iRecordSize ) ; config . root . update ( ) ; return new long [ ] { files . length - 1 , 0 } ; }
Find free space for iRecordSize bytes .
406
9
140,910
private void deleteHoleRecords ( ) { listener . onMessage ( "\nDelete temporary records..." ) ; final ORecordId rid = new ORecordId ( ) ; final ODocument doc = new ODocument ( rid ) ; for ( String recId : recordToDelete ) { doc . reset ( ) ; rid . fromString ( recId ) ; doc . delete ( ) ; } listener . onMessage ( "OK (" + recordToDelete . size ( ) + " records)" ) ; }
Delete all the temporary records created to fill the holes and to mantain the same record ID
106
18
140,911
public PrintStream createStatusPrintStream ( ) { return new PrintStream ( new OutputStream ( ) { StringBuffer sb = new StringBuffer ( ) ; @ Override public void write ( int b ) throws IOException { if ( b == ' ' ) { String str = sb . toString ( ) ; sb . delete ( 0 , sb . length ( ) ) ; writeLine ( str ) ; } else { sb . append ( Character . toString ( ( char ) b ) ) ; } } } ) ; }
Returns a new PrinstStream that can be used to update the status - text - area on this page .
112
22
140,912
protected void writeLine ( final String line ) { getContainer ( ) . getShell ( ) . getDisplay ( ) . syncExec ( new Runnable ( ) { public void run ( ) { if ( statusTextArea != null ) { statusTextArea . setText ( line + ' ' + statusTextArea . getText ( ) ) ; } } } ) ; }
Writes a line to the status - text - area in the GUI - thread
78
16
140,913
public Object attemptTrade ( String summonerInternalName , int championId ) { return client . sendRpcAndWait ( SERVICE , "attemptTrade" , summonerInternalName , championId , false ) ; }
Attempt to trade with the target player
45
7
140,914
public Object acceptTrade ( String summonerInternalName , int championId ) { return client . sendRpcAndWait ( SERVICE , "attemptTrade" , summonerInternalName , championId , true ) ; }
Accept a trade
45
3
140,915
public MigrationConfiguration getMigrationConfiguration ( String configurationName ) throws IOException , LightblueException { DataFindRequest findRequest = new DataFindRequest ( "migrationConfiguration" , null ) ; findRequest . where ( Query . and ( Query . withValue ( "configurationName" , Query . eq , configurationName ) , Query . withValue ( "consistencyCheckerName" , Query . eq , cfg . getName ( ) ) ) ) ; findRequest . select ( Projection . includeFieldRecursively ( "*" ) ) ; LOGGER . debug ( "Loading configuration:{}" , findRequest . getBody ( ) ) ; return lightblueClient . data ( findRequest , MigrationConfiguration . class ) ; }
Read a configuration from the database whose name matches the the given configuration name
154
14
140,916
public MigrationConfiguration loadMigrationConfiguration ( String migrationConfigurationId ) throws IOException , LightblueException { DataFindRequest findRequest = new DataFindRequest ( "migrationConfiguration" , null ) ; findRequest . where ( Query . withValue ( "_id" , Query . eq , migrationConfigurationId ) ) ; findRequest . select ( Projection . includeFieldRecursively ( "*" ) ) ; LOGGER . debug ( "Loading configuration" ) ; return lightblueClient . data ( findRequest , MigrationConfiguration . class ) ; }
Load migration configuration based on its id
113
7
140,917
public void createControllers ( MigrationConfiguration [ ] configurations ) throws Exception { for ( MigrationConfiguration cfg : configurations ) { MigrationProcess process = migrationMap . get ( cfg . get_id ( ) ) ; if ( process == null ) { LOGGER . debug ( "Creating a controller thread for configuration {}: {}" , cfg . get_id ( ) , cfg . getConfigurationName ( ) ) ; MigratorController c = new MigratorController ( this , cfg ) ; if ( c instanceof MonitoredThread ) { ( ( MonitoredThread ) c ) . registerThreadMonitor ( threadMonitor ) ; } AbstractController ccc = getConsistencyCheckerController ( cfg ) ; ; if ( ccc instanceof MonitoredThread ) { ( ( MonitoredThread ) ccc ) . registerThreadMonitor ( threadMonitor ) ; } migrationMap . put ( cfg . get_id ( ) , new MigrationProcess ( cfg , c , ccc ) ) ; c . start ( ) ; if ( ccc != null ) { ccc . start ( ) ; } } else { healthcheck ( cfg ) ; } } }
Creates controller threads for migrators and consistency checkers based on the configuration loaded from the db .
244
20
140,918
public OIdentifiable getKeyAt ( final int iIndex ) { if ( rids != null && rids [ iIndex ] != null ) return rids [ iIndex ] ; final ORecordId rid = itemFromStream ( iIndex ) ; if ( rids != null ) rids [ iIndex ] = rid ; return rid ; }
Lazy unmarshall the RID if not in memory .
73
13
140,919
public void close ( ) { if ( indexManager != null ) indexManager . flush ( ) ; if ( schema != null ) schema . close ( ) ; if ( security != null ) security . close ( ) ; }
Closes internal objects
45
4
140,920
public MuleMessage doHttpSendRequest ( String url , String method , String payload , String contentType ) { Map < String , String > properties = new HashMap < String , String > ( ) ; properties . put ( "http.method" , method ) ; properties . put ( "Content-Type" , contentType ) ; MuleMessage response = send ( url , payload , properties ) ; return response ; }
Perform a HTTP call sending information to the server using POST or PUT
87
15
140,921
public MuleMessage doHttpReceiveRequest ( String url , String method , String acceptConentType , String acceptCharSet ) { Map < String , String > properties = new HashMap < String , String > ( ) ; properties . put ( "http.method" , method ) ; properties . put ( "Accept" , acceptConentType ) ; properties . put ( "Accept-Charset" , acceptCharSet ) ; MuleMessage response = send ( url , null , properties ) ; return response ; }
Perform a HTTP call receiving information from the server using GET or DELETE
109
16
140,922
public byte [ ] toByteArray ( ) { if ( this . pos == this . length ) return this . buffer ; else { byte [ ] res = new byte [ this . pos ] ; System . arraycopy ( this . buffer , 0 , res , 0 , this . pos ) ; return res ; } }
Returns the written to the stream data as a byte array
65
11
140,923
@ Override public void write ( byte [ ] b , int off , int len ) { this . checkIncreaseArray ( len ) ; System . arraycopy ( b , off , this . buffer , this . pos , len ) ; this . pos += len ; }
Writes a byte array content into the stream
55
9
140,924
public void writeTag ( int tagClass , boolean primitive , int tag ) throws AsnException { if ( tag < 0 ) throw new AsnException ( "Tag must not be negative" ) ; if ( tag <= 30 ) { int toEncode = ( tagClass & 0x03 ) << 6 ; toEncode |= ( primitive ? 0 : 1 ) << 5 ; toEncode |= tag & 0x1F ; this . write ( toEncode ) ; } else { int toEncode = ( tagClass & 0x03 ) << 6 ; toEncode |= ( primitive ? 0 : 1 ) << 5 ; toEncode |= 0x1F ; this . write ( toEncode ) ; int byteArr = 8 ; byte [ ] buf = new byte [ byteArr ] ; int pos = byteArr ; while ( true ) { int dd ; if ( tag <= 0x7F ) { dd = tag ; if ( pos != byteArr ) dd = dd | 0x80 ; buf [ -- pos ] = ( byte ) dd ; break ; } else { dd = ( tag & 0x7F ) ; tag >>= 7 ; if ( pos != byteArr ) dd = dd | 0x80 ; buf [ -- pos ] = ( byte ) dd ; } } this . write ( buf , pos , byteArr - pos ) ; } }
Writes a tag field into the atream
296
9
140,925
public void writeLength ( int v ) throws IOException { if ( v == Tag . Indefinite_Length ) { this . write ( 0x80 ) ; return ; } else if ( v > 0x7F ) { int count ; byte [ ] buf = new byte [ 4 ] ; if ( ( v & 0xFF000000 ) > 0 ) { buf [ 0 ] = ( byte ) ( ( v >> 24 ) & 0xFF ) ; buf [ 1 ] = ( byte ) ( ( v >> 16 ) & 0xFF ) ; buf [ 2 ] = ( byte ) ( ( v >> 8 ) & 0xFF ) ; buf [ 3 ] = ( byte ) ( v & 0xFF ) ; count = 4 ; } else if ( ( v & 0x00FF0000 ) > 0 ) { buf [ 0 ] = ( byte ) ( ( v >> 16 ) & 0xFF ) ; buf [ 1 ] = ( byte ) ( ( v >> 8 ) & 0xFF ) ; buf [ 2 ] = ( byte ) ( v & 0xFF ) ; count = 3 ; } else if ( ( v & 0x0000FF00 ) > 0 ) { buf [ 0 ] = ( byte ) ( ( v >> 8 ) & 0xFF ) ; buf [ 1 ] = ( byte ) ( v & 0xFF ) ; count = 2 ; } else { buf [ 0 ] = ( byte ) ( v & 0xFF ) ; count = 1 ; } this . buffer [ pos ] = ( byte ) ( 0x80 | count ) ; for ( int i1 = 0 ; i1 < count ; i1 ++ ) { this . buffer [ pos + i1 + 1 ] = buf [ i1 ] ; } this . pos += count + 1 ; // int posLen = this.pos; // this.write(0); // int count = this.writeIntegerData(v); // this.buffer[posLen] = (byte) (count | 0x80); } else { // short this . write ( v ) ; } }
Write the length field into the stream Use Tag . Indefinite_Length for writing the indefinite length
438
20
140,926
public void FinalizeContent ( int lenPos ) { if ( lenPos == Tag . Indefinite_Length ) { this . write ( 0 ) ; this . write ( 0 ) ; } else { int length = this . pos - lenPos - 1 ; if ( length <= 0x7F ) { this . buffer [ lenPos ] = ( byte ) length ; } else { int count ; byte [ ] buf = new byte [ 4 ] ; if ( ( length & 0xFF000000 ) > 0 ) { buf [ 0 ] = ( byte ) ( ( length >> 24 ) & 0xFF ) ; buf [ 1 ] = ( byte ) ( ( length >> 16 ) & 0xFF ) ; buf [ 2 ] = ( byte ) ( ( length >> 8 ) & 0xFF ) ; buf [ 3 ] = ( byte ) ( length & 0xFF ) ; count = 4 ; } else if ( ( length & 0x00FF0000 ) > 0 ) { buf [ 0 ] = ( byte ) ( ( length >> 16 ) & 0xFF ) ; buf [ 1 ] = ( byte ) ( ( length >> 8 ) & 0xFF ) ; buf [ 2 ] = ( byte ) ( length & 0xFF ) ; count = 3 ; } else if ( ( length & 0x0000FF00 ) > 0 ) { buf [ 0 ] = ( byte ) ( ( length >> 8 ) & 0xFF ) ; buf [ 1 ] = ( byte ) ( length & 0xFF ) ; count = 2 ; } else { buf [ 0 ] = ( byte ) ( length & 0xFF ) ; count = 1 ; } this . checkIncreaseArray ( count ) ; System . arraycopy ( this . buffer , lenPos + 1 , this . buffer , lenPos + 1 + count , length ) ; this . pos += count ; this . buffer [ lenPos ] = ( byte ) ( 0x80 | count ) ; for ( int i1 = 0 ; i1 < count ; i1 ++ ) { this . buffer [ lenPos + i1 + 1 ] = buf [ i1 ] ; } } } }
This method must be invoked after finishing the content writing
449
10
140,927
private static byte _getByte ( int startIndex , BitSetStrictLength set ) throws AsnException { int count = 8 ; byte data = 0 ; // if (set.length() - 1 < startIndex) { // throw new AsnException(); // } while ( count > 0 ) { if ( set . length ( ) - 1 < startIndex ) { break ; } else { boolean lit = set . get ( startIndex ) ; if ( lit ) { data |= ( 0x01 << ( count - 1 ) ) ; } startIndex ++ ; count -- ; } } return data ; }
Attepts to read up to 8 bits and store into byte . If less is found only those are returned
127
23
140,928
public void startup ( ) { underlying . startup ( ) ; OProfiler . getInstance ( ) . registerHookValue ( profilerPrefix + "enabled" , new OProfilerHookValue ( ) { public Object getValue ( ) { return isEnabled ( ) ; } } ) ; OProfiler . getInstance ( ) . registerHookValue ( profilerPrefix + "current" , new OProfilerHookValue ( ) { public Object getValue ( ) { return getSize ( ) ; } } ) ; OProfiler . getInstance ( ) . registerHookValue ( profilerPrefix + "max" , new OProfilerHookValue ( ) { public Object getValue ( ) { return getMaxSize ( ) ; } } ) ; }
All operations running at cache initialization stage
163
7
140,929
public Cipher getCipher ( ) throws GeneralSecurityException { Cipher cipher = Cipher . getInstance ( "Blowfish/ECB/PKCS5Padding" ) ; cipher . init ( Cipher . DECRYPT_MODE , new SecretKeySpec ( key , "Blowfish" ) ) ; return cipher ; }
Creates a cipher for decrypting data with the specified key .
67
13
140,930
public Cipher getEncryptionCipher ( ) throws GeneralSecurityException { Cipher cipher = Cipher . getInstance ( "Blowfish/ECB/PKCS5Padding" ) ; cipher . init ( Cipher . ENCRYPT_MODE , new SecretKeySpec ( key , "Blowfish" ) ) ; return cipher ; }
Creates a cipher for encrypting data with the specified key .
70
13
140,931
public Object execute ( final Map < Object , Object > iArgs ) { if ( className == null ) throw new OCommandExecutionException ( "Cannot execute the command because it has not been parsed yet" ) ; final ODatabaseRecord database = getDatabase ( ) ; final OClass oClass = database . getMetadata ( ) . getSchema ( ) . getClass ( className ) ; if ( oClass == null ) return null ; for ( final OIndex < ? > oIndex : oClass . getClassIndexes ( ) ) { database . getMetadata ( ) . getIndexManager ( ) . dropIndex ( oIndex . getName ( ) ) ; } final OClass superClass = oClass . getSuperClass ( ) ; final int [ ] clustersToIndex = oClass . getPolymorphicClusterIds ( ) ; final String [ ] clusterNames = new String [ clustersToIndex . length ] ; for ( int i = 0 ; i < clustersToIndex . length ; i ++ ) { clusterNames [ i ] = database . getClusterNameById ( clustersToIndex [ i ] ) ; } final int clusterId = oClass . getDefaultClusterId ( ) ; ( ( OSchemaProxy ) database . getMetadata ( ) . getSchema ( ) ) . dropClassInternal ( className ) ; ( ( OSchemaProxy ) database . getMetadata ( ) . getSchema ( ) ) . saveInternal ( ) ; database . getMetadata ( ) . getSchema ( ) . reload ( ) ; deleteDefaultCluster ( clusterId ) ; if ( superClass == null ) return true ; for ( final OIndex < ? > oIndex : superClass . getIndexes ( ) ) { for ( final String clusterName : clusterNames ) oIndex . getInternal ( ) . removeCluster ( clusterName ) ; OLogManager . instance ( ) . info ( "Index %s is used in super class of %s and should be rebuilt." , oIndex . getName ( ) , className ) ; oIndex . rebuild ( ) ; } return true ; }
Execute the DROP CLASS .
448
7
140,932
public int compareTo ( final OCompositeKey otherKey ) { final Iterator < Object > inIter = keys . iterator ( ) ; final Iterator < Object > outIter = otherKey . keys . iterator ( ) ; while ( inIter . hasNext ( ) && outIter . hasNext ( ) ) { final Object inKey = inIter . next ( ) ; final Object outKey = outIter . next ( ) ; if ( outKey instanceof OAlwaysGreaterKey ) return - 1 ; if ( outKey instanceof OAlwaysLessKey ) return 1 ; final int result = comparator . compare ( inKey , outKey ) ; if ( result != 0 ) return result ; } return 0 ; }
Performs partial comparison of two composite keys .
151
9
140,933
public Map < String , Object > getVariables ( ) { final HashMap < String , Object > map = new HashMap < String , Object > ( ) ; if ( inherited != null ) map . putAll ( inherited . getVariables ( ) ) ; if ( variables != null ) map . putAll ( variables ) ; return map ; }
Returns a read - only map with all the variables .
72
11
140,934
@ SuppressWarnings ( "unchecked" ) public < T > T getPropertyValue ( ElementDescriptor < T > property ) { if ( mProperties == null ) { return null ; } return ( T ) mProperties . get ( property ) ; }
Returns the value of a property in this propstat element .
58
12
140,935
public < T > void clear ( ElementDescriptor < T > property ) { if ( mSet != null ) { mSet . remove ( property ) ; } }
Remove a property from the initial values .
35
8
140,936
public long countClass ( final String iClassName ) { final OClass cls = getMetadata ( ) . getSchema ( ) . getClass ( iClassName ) ; if ( cls == null ) throw new IllegalArgumentException ( "Class '" + iClassName + "' not found in database" ) ; return cls . count ( ) ; }
Returns the number of the records of the class iClassName .
78
13
140,937
public OBinarySerializer < ? > getObjectSerializer ( final byte identifier ) { OBinarySerializer < ? > impl = serializerIdMap . get ( identifier ) ; if ( impl == null ) { final Class < ? extends OBinarySerializer < ? > > cls = serializerClassesIdMap . get ( identifier ) ; if ( cls != null ) try { impl = cls . newInstance ( ) ; } catch ( Exception e ) { OLogManager . instance ( ) . error ( this , "Cannot create an instance of class %s invoking the empty constructor" , cls ) ; } } return impl ; }
Obtain OBinarySerializer instance by it s id .
136
12
140,938
public long addRecord ( final ORecordId iRid , final byte [ ] iContent ) throws IOException { if ( iContent . length == 0 ) // AVOID UNUSEFUL CREATION OF EMPTY RECORD: IT WILL BE CREATED AT FIRST UPDATE return - 1 ; final int recordSize = iContent . length + RECORD_FIX_SIZE ; acquireExclusiveLock ( ) ; try { final long [ ] newFilePosition = getFreeSpace ( recordSize ) ; writeRecord ( newFilePosition , iRid . clusterId , iRid . clusterPosition , iContent ) ; return getAbsolutePosition ( newFilePosition ) ; } finally { releaseExclusiveLock ( ) ; } }
Add the record content in file .
151
7
140,939
public byte [ ] getRecord ( final long iPosition ) throws IOException { if ( iPosition == - 1 ) return null ; acquireSharedLock ( ) ; try { final long [ ] pos = getRelativePosition ( iPosition ) ; final OFile file = files [ ( int ) pos [ 0 ] ] ; final int recordSize = file . readInt ( pos [ 1 ] ) ; if ( recordSize <= 0 ) // RECORD DELETED return null ; if ( pos [ 1 ] + RECORD_FIX_SIZE + recordSize > file . getFilledUpTo ( ) ) throw new OStorageException ( "Error on reading record from file '" + file . getName ( ) + "', position " + iPosition + ", size " + OFileUtils . getSizeAsString ( recordSize ) + ": the record size is bigger then the file itself (" + OFileUtils . getSizeAsString ( getFilledUpTo ( ) ) + "). Probably the record is dirty due to a previous crash. It is strongly suggested to restore the database or export and reimport this one." ) ; final byte [ ] content = new byte [ recordSize ] ; file . read ( pos [ 1 ] + RECORD_FIX_SIZE , content , recordSize ) ; return content ; } finally { releaseSharedLock ( ) ; } }
Returns the record content from file .
288
7
140,940
public int getRecordSize ( final long iPosition ) throws IOException { acquireSharedLock ( ) ; try { final long [ ] pos = getRelativePosition ( iPosition ) ; final OFile file = files [ ( int ) pos [ 0 ] ] ; return file . readInt ( pos [ 1 ] ) ; } finally { releaseSharedLock ( ) ; } }
Returns the record size .
79
5
140,941
public long setRecord ( final long iPosition , final ORecordId iRid , final byte [ ] iContent ) throws IOException { acquireExclusiveLock ( ) ; try { long [ ] pos = getRelativePosition ( iPosition ) ; final OFile file = files [ ( int ) pos [ 0 ] ] ; final int recordSize = file . readInt ( pos [ 1 ] ) ; final int contentLength = iContent != null ? iContent . length : 0 ; if ( contentLength == recordSize ) { // USE THE OLD SPACE SINCE SIZE ISN'T CHANGED file . write ( pos [ 1 ] + RECORD_FIX_SIZE , iContent ) ; OProfiler . getInstance ( ) . updateCounter ( PROFILER_UPDATE_REUSED_ALL , + 1 ) ; return iPosition ; } else if ( recordSize - contentLength > RECORD_FIX_SIZE + 50 ) { // USE THE OLD SPACE BUT UPDATE THE CURRENT SIZE. IT'S PREFEREABLE TO USE THE SAME INSTEAD FINDING A BEST SUITED FOR IT TO // AVOID CHANGES TO REF FILE AS WELL. writeRecord ( pos , iRid . clusterId , iRid . clusterPosition , iContent ) ; // CREATE A HOLE WITH THE DIFFERENCE OF SPACE handleHole ( iPosition + RECORD_FIX_SIZE + contentLength , recordSize - contentLength - RECORD_FIX_SIZE ) ; OProfiler . getInstance ( ) . updateCounter ( PROFILER_UPDATE_REUSED_PARTIAL , + 1 ) ; } else { // CREATE A HOLE FOR THE ENTIRE OLD RECORD handleHole ( iPosition , recordSize ) ; // USE A NEW SPACE pos = getFreeSpace ( contentLength + RECORD_FIX_SIZE ) ; writeRecord ( pos , iRid . clusterId , iRid . clusterPosition , iContent ) ; OProfiler . getInstance ( ) . updateCounter ( PROFILER_UPDATE_NOT_REUSED , + 1 ) ; } return getAbsolutePosition ( pos ) ; } finally { releaseExclusiveLock ( ) ; } }
Set the record content in file .
478
7
140,942
public List < ODataHoleInfo > getHolesList ( ) { acquireSharedLock ( ) ; try { final List < ODataHoleInfo > holes = new ArrayList < ODataHoleInfo > ( ) ; final int tot = holeSegment . getHoles ( ) ; for ( int i = 0 ; i < tot ; ++ i ) { final ODataHoleInfo h = holeSegment . getHole ( i ) ; if ( h != null ) holes . add ( h ) ; } return holes ; } finally { releaseSharedLock ( ) ; } }
Returns the list of holes as pair of position & ppos
126
12
140,943
private static boolean betweenLongitudes ( double topLeftLon , double bottomRightLon , double lon ) { if ( topLeftLon <= bottomRightLon ) return lon >= topLeftLon && lon <= bottomRightLon ; else return lon >= topLeftLon || lon <= bottomRightLon ; }
Returns true if and only if lon is between the longitudes topLeftLon and bottomRightLon .
72
23
140,944
public int getPropertyStatus ( ElementDescriptor < ? > descriptor ) { PropStat propStat = mPropStatByProperty . get ( descriptor ) ; if ( propStat == null ) { return STATUS_NONE ; } return propStat . getStatusCode ( ) ; }
Return the status of a specific property .
59
8
140,945
public < T > T getPropertyValue ( ElementDescriptor < T > descriptor ) { PropStat propStat = mPropStatByProperty . get ( descriptor ) ; if ( propStat == null ) { return null ; } return propStat . getPropertyValue ( descriptor ) ; }
Get the value of a specific property .
59
8
140,946
private static synchronized Set < OIndexFactory > getFactories ( ) { if ( FACTORIES == null ) { final Iterator < OIndexFactory > ite = lookupProviderWithOrientClassLoader ( OIndexFactory . class , orientClassLoader ) ; final Set < OIndexFactory > factories = new HashSet < OIndexFactory > ( ) ; while ( ite . hasNext ( ) ) { factories . add ( ite . next ( ) ) ; } FACTORIES = Collections . unmodifiableSet ( factories ) ; } return FACTORIES ; }
Cache a set of all factories . we do not use the service loader directly since it is not concurrent .
122
21
140,947
public void addListener ( final ORecordListener iListener ) { if ( _listeners == null ) _listeners = Collections . newSetFromMap ( new WeakHashMap < ORecordListener , Boolean > ( ) ) ; _listeners . add ( iListener ) ; }
Add a listener to the current document to catch all the supported events .
60
14
140,948
public static String getComponentProjectName ( int componentType , String groupId , String artifactId ) { IModel m = ModelFactory . newModel ( groupId , artifactId , null , null , MuleVersionEnum . MAIN_MULE_VERSION , null , null ) ; String projectFolderName = null ; ComponentEnum compEnum = ComponentEnum . get ( componentType ) ; switch ( compEnum ) { case INTEGRATION_COMPONENT : projectFolderName = m . getIntegrationComponentProject ( ) ; break ; case INTEGRATION_TESTSTUBS_COMPONENT : projectFolderName = m . getTeststubStandaloneProject ( ) ; break ; case SD_SCHEMA_COMPONENT : projectFolderName = m . getSchemaProject ( ) ; break ; } return projectFolderName ; }
public static final int IM_SCHEMA_COMPONENT = 3 ;
188
17
140,949
protected boolean checkConsistency ( Object o1 , Object o2 ) { return checkConsistency ( o1 , o2 , null , null ) ; }
convenience method for unit testing
34
7
140,950
public Object execute ( final Map < Object , Object > iArgs ) { if ( newRecords == null ) throw new OCommandExecutionException ( "Cannot execute the command because it has not been parsed yet" ) ; final OCommandParameters commandParameters = new OCommandParameters ( iArgs ) ; if ( indexName != null ) { final OIndex < ? > index = getDatabase ( ) . getMetadata ( ) . getIndexManager ( ) . getIndex ( indexName ) ; if ( index == null ) throw new OCommandExecutionException ( "Target index '" + indexName + "' not found" ) ; // BIND VALUES Map < String , Object > result = null ; for ( Map < String , Object > candidate : newRecords ) { index . put ( getIndexKeyValue ( commandParameters , candidate ) , getIndexValue ( commandParameters , candidate ) ) ; result = candidate ; } // RETURN LAST ENTRY return new ODocument ( result ) ; } else { // CREATE NEW DOCUMENTS final List < ODocument > docs = new ArrayList < ODocument > ( ) ; for ( Map < String , Object > candidate : newRecords ) { final ODocument doc = className != null ? new ODocument ( className ) : new ODocument ( ) ; OSQLHelper . bindParameters ( doc , candidate , commandParameters ) ; if ( clusterName != null ) { doc . save ( clusterName ) ; } else { doc . save ( ) ; } docs . add ( doc ) ; } if ( docs . size ( ) == 1 ) { return docs . get ( 0 ) ; } else { return docs ; } } }
Execute the INSERT and return the ODocument object created .
351
13
140,951
@ SuppressWarnings ( "unchecked" ) public OGraphEdge link ( final OGraphVertex iTargetVertex , final String iClassName ) { if ( iTargetVertex == null ) throw new IllegalArgumentException ( "Missed the target vertex" ) ; // CREATE THE EDGE BETWEEN ME AND THE TARGET final OGraphEdge edge = new OGraphEdge ( database , iClassName , this , iTargetVertex ) ; getOutEdges ( ) . add ( edge ) ; Set < ODocument > recordEdges = ( ( Set < ODocument > ) document . field ( OGraphDatabase . VERTEX_FIELD_OUT ) ) ; if ( recordEdges == null ) { recordEdges = new HashSet < ODocument > ( ) ; document . field ( OGraphDatabase . VERTEX_FIELD_OUT , recordEdges ) ; } recordEdges . add ( edge . getDocument ( ) ) ; document . setDirty ( ) ; // INSERT INTO THE INGOING EDGES OF TARGET iTargetVertex . getInEdges ( ) . add ( edge ) ; recordEdges = ( ( Set < ODocument > ) iTargetVertex . getDocument ( ) . field ( OGraphDatabase . VERTEX_FIELD_IN ) ) ; if ( recordEdges == null ) { recordEdges = new HashSet < ODocument > ( ) ; iTargetVertex . getDocument ( ) . field ( OGraphDatabase . VERTEX_FIELD_IN , recordEdges ) ; } recordEdges . add ( edge . getDocument ( ) ) ; iTargetVertex . getDocument ( ) . setDirty ( ) ; return edge ; }
Create a link between the current vertex and the target one . The link is of type iClassName .
368
21
140,952
public OGraphVertex unlink ( final OGraphVertex iTargetVertex ) { if ( iTargetVertex == null ) throw new IllegalArgumentException ( "Missed the target vertex" ) ; unlink ( database , document , iTargetVertex . getDocument ( ) ) ; return this ; }
Remove the link between the current vertex and the target one .
66
12
140,953
public boolean hasInEdges ( ) { final Set < ODocument > docs = document . field ( OGraphDatabase . VERTEX_FIELD_IN ) ; return docs != null && ! docs . isEmpty ( ) ; }
Returns true if the vertex has at least one incoming edge otherwise false .
48
14
140,954
public boolean hasOutEdges ( ) { final Set < ODocument > docs = document . field ( OGraphDatabase . VERTEX_FIELD_OUT ) ; return docs != null && ! docs . isEmpty ( ) ; }
Returns true if the vertex has at least one outgoing edge otherwise false .
48
14
140,955
public Set < OGraphEdge > getInEdges ( final String iEdgeLabel ) { Set < OGraphEdge > temp = in != null ? in . get ( ) : null ; if ( temp == null ) { if ( iEdgeLabel == null ) temp = new HashSet < OGraphEdge > ( ) ; in = new SoftReference < Set < OGraphEdge > > ( temp ) ; final Set < Object > docs = document . field ( OGraphDatabase . VERTEX_FIELD_IN ) ; if ( docs != null ) { // TRANSFORM ALL THE ARCS for ( Object o : docs ) { final ODocument doc = ( ODocument ) ( ( OIdentifiable ) o ) . getRecord ( ) ; if ( iEdgeLabel != null && ! iEdgeLabel . equals ( doc . field ( OGraphDatabase . LABEL ) ) ) continue ; temp . add ( ( OGraphEdge ) database . getUserObjectByRecord ( doc , null ) ) ; } } } else if ( iEdgeLabel != null ) { // FILTER THE EXISTENT COLLECTION HashSet < OGraphEdge > filtered = new HashSet < OGraphEdge > ( ) ; for ( OGraphEdge e : temp ) { if ( iEdgeLabel . equals ( e . getLabel ( ) ) ) filtered . add ( e ) ; } temp = filtered ; } return temp ; }
Returns the incoming edges of current node having the requested label . If there are no edged then an empty set is returned .
293
24
140,956
@ SuppressWarnings ( "unchecked" ) public Set < OGraphVertex > browseOutEdgesVertexes ( ) { final Set < OGraphVertex > resultset = new HashSet < OGraphVertex > ( ) ; Set < OGraphEdge > temp = out != null ? out . get ( ) : null ; if ( temp == null ) { final Set < OIdentifiable > docEdges = ( Set < OIdentifiable > ) document . field ( OGraphDatabase . VERTEX_FIELD_OUT ) ; // TRANSFORM ALL THE EDGES if ( docEdges != null ) for ( OIdentifiable d : docEdges ) { resultset . add ( ( OGraphVertex ) database . getUserObjectByRecord ( ( ODocument ) ( ( ODocument ) d . getRecord ( ) ) . field ( OGraphDatabase . EDGE_FIELD_IN ) , null ) ) ; } } else { for ( OGraphEdge edge : temp ) { resultset . add ( edge . getIn ( ) ) ; } } return resultset ; }
Returns the set of Vertexes from the outgoing edges . It avoids to unmarshall edges .
231
20
140,957
@ SuppressWarnings ( "unchecked" ) public Set < OGraphVertex > browseInEdgesVertexes ( ) { final Set < OGraphVertex > resultset = new HashSet < OGraphVertex > ( ) ; Set < OGraphEdge > temp = in != null ? in . get ( ) : null ; if ( temp == null ) { final Set < ODocument > docEdges = ( Set < ODocument > ) document . field ( OGraphDatabase . VERTEX_FIELD_IN ) ; // TRANSFORM ALL THE EDGES if ( docEdges != null ) for ( ODocument d : docEdges ) { resultset . add ( ( OGraphVertex ) database . getUserObjectByRecord ( ( ODocument ) d . field ( OGraphDatabase . EDGE_FIELD_OUT ) , null ) ) ; } } else { for ( OGraphEdge edge : temp ) { resultset . add ( edge . getOut ( ) ) ; } } return resultset ; }
Returns the set of Vertexes from the incoming edges . It avoids to unmarshall edges .
217
20
140,958
public static void unlink ( final ODatabaseGraphTx iDatabase , final ODocument iSourceVertex , final ODocument iTargetVertex ) { if ( iTargetVertex == null ) throw new IllegalArgumentException ( "Missed the target vertex" ) ; if ( iDatabase . existsUserObjectByRID ( iSourceVertex . getIdentity ( ) ) ) { // WORK ALSO WITH IN MEMORY OBJECTS final OGraphVertex vertex = ( OGraphVertex ) iDatabase . getUserObjectByRecord ( iSourceVertex , null ) ; // REMOVE THE EDGE OBJECT if ( vertex . out != null ) { final Set < OGraphEdge > obj = vertex . out . get ( ) ; if ( obj != null ) for ( OGraphEdge e : obj ) if ( e . getIn ( ) . getDocument ( ) . equals ( iTargetVertex ) ) obj . remove ( e ) ; } } if ( iDatabase . existsUserObjectByRID ( iTargetVertex . getIdentity ( ) ) ) { // WORK ALSO WITH IN MEMORY OBJECTS final OGraphVertex vertex = ( OGraphVertex ) iDatabase . getUserObjectByRecord ( iTargetVertex , null ) ; // REMOVE THE EDGE OBJECT FROM THE TARGET VERTEX if ( vertex . in != null ) { final Set < OGraphEdge > obj = vertex . in . get ( ) ; if ( obj != null ) for ( OGraphEdge e : obj ) if ( e . getOut ( ) . getDocument ( ) . equals ( iSourceVertex ) ) obj . remove ( e ) ; } } final List < ODocument > edges2Remove = new ArrayList < ODocument > ( ) ; // REMOVE THE EDGE DOCUMENT ODocument edge = null ; Set < ODocument > docs = iSourceVertex . field ( OGraphDatabase . VERTEX_FIELD_OUT ) ; if ( docs != null ) { // USE A TEMP ARRAY TO AVOID CONCURRENT MODIFICATION TO THE ITERATOR for ( OIdentifiable d : docs ) { final ODocument doc = ( ODocument ) d . getRecord ( ) ; if ( doc . field ( OGraphDatabase . EDGE_FIELD_IN ) . equals ( iTargetVertex ) ) { edges2Remove . add ( doc ) ; edge = doc ; } } for ( ODocument d : edges2Remove ) docs . remove ( ) ; } if ( edge == null ) throw new OGraphException ( "Edge not found between the ougoing edges" ) ; iSourceVertex . setDirty ( ) ; iSourceVertex . save ( ) ; docs = iTargetVertex . field ( OGraphDatabase . VERTEX_FIELD_IN ) ; // REMOVE THE EDGE DOCUMENT FROM THE TARGET VERTEX if ( docs != null ) { edges2Remove . clear ( ) ; for ( OIdentifiable d : docs ) { final ODocument doc = ( ODocument ) d . getRecord ( ) ; if ( doc . field ( OGraphDatabase . EDGE_FIELD_IN ) . equals ( iTargetVertex ) ) edges2Remove . add ( doc ) ; } for ( ODocument d : edges2Remove ) docs . remove ( ) ; } iTargetVertex . setDirty ( ) ; iTargetVertex . save ( ) ; edge . delete ( ) ; }
Unlinks all the edges between iSourceVertex and iTargetVertex
738
15
140,959
public SyncCollection limitNumberOfResults ( int limit ) { if ( limit > 0 ) { addLimit ( WebDavSearch . NRESULTS , limit ) ; } else { removeLimit ( WebDavSearch . NRESULTS ) ; } return this ; }
Limit the number of results in the response if supported by the server . A non - positive value will remove the limit .
53
24
140,960
public int getNumberOfResultsLimit ( ) { if ( mLimit == null ) { return 0 ; } Integer limit = ( Integer ) mLimit . get ( WebDavSearch . NRESULTS ) ; return limit == null ? 0 : limit ; }
Returns the limit for the number of results in this request .
52
12
140,961
private < T > void addLimit ( ElementDescriptor < T > descriptor , T limit ) { if ( mLimit == null ) { mLimit = new HashMap < ElementDescriptor < ? > , Object > ( 6 ) ; } mLimit . put ( descriptor , limit ) ; }
Add a limit to the request .
62
7
140,962
public void put ( K k , V v ) { cache . put ( k , v ) ; acquireLock ( k ) . countDown ( ) ; }
Caches the given mapping and releases all waiting locks .
32
11
140,963
public V get ( K k ) throws InterruptedException { await ( k ) ; return cache . get ( k ) ; }
Retrieve the value associated with the given key blocking as long as necessary .
26
15
140,964
public V get ( K k , long timeout , TimeUnit unit ) throws InterruptedException , TimeoutException { await ( k , timeout , unit ) ; return cache . get ( k ) ; }
Retrieve the value associated with the given key blocking as long as necessary up to the specified maximum .
41
20
140,965
public void await ( K k , long timeout , TimeUnit unit ) throws InterruptedException , TimeoutException { if ( ! acquireLock ( k ) . await ( timeout , unit ) ) { throw new TimeoutException ( "Wait time for retrieving value for key " + k + " exceeded " + timeout + " " + unit ) ; } }
Waits until the key has been assigned a value up to the specified maximum .
72
16
140,966
public static void initEndpointDirectories ( MuleContext muleContext , String [ ] serviceNames , String [ ] endpointNames ) throws Exception { // Stop all named services (either Flows or services List < Lifecycle > services = new ArrayList < Lifecycle > ( ) ; for ( String serviceName : serviceNames ) { try { Lifecycle service = muleContext . getRegistry ( ) . lookupObject ( serviceName ) ; // logServiceStatus(service); // service.stop(); // logServiceStatus(service); services . add ( service ) ; } catch ( Exception e ) { logger . error ( "Error '" + e . getMessage ( ) + "' occured while stopping the service " + serviceName + ". Perhaps the service did not exist in the config?" ) ; throw e ; } } // Now init the directory for each named endpoint, one by one for ( String endpointName : endpointNames ) { initEndpointDirectory ( muleContext , endpointName ) ; } // We are done, startup the services again so that the test can begin... for ( @ SuppressWarnings ( "unused" ) Lifecycle service : services ) { // logServiceStatus(service); // service.start(); // logServiceStatus(service); } }
Initiates a list of sftp - endpoint - directories . Ensures that affected services are stopped during the initiation .
266
25
140,967
static protected SftpClient getSftpClient ( MuleContext muleContext , String endpointName ) throws IOException { ImmutableEndpoint endpoint = getImmutableEndpoint ( muleContext , endpointName ) ; try { SftpClient sftpClient = SftpConnectionFactory . createClient ( endpoint ) ; return sftpClient ; } catch ( Exception e ) { throw new RuntimeException ( "Login failed" , e ) ; } /* EndpointURI endpointURI = endpoint.getEndpointURI(); SftpClient sftpClient = new SftpClient(endpointURI.getHost()); SftpConnector sftpConnector = (SftpConnector) endpoint.getConnector(); if (sftpConnector.getIdentityFile() != null) { try { sftpClient.login(endpointURI.getUser(), sftpConnector.getIdentityFile(), sftpConnector.getPassphrase()); } catch (Exception e) { throw new RuntimeException("Login failed", e); } } else { try { sftpClient.login(endpointURI.getUser(), endpointURI.getPassword()); } catch (Exception e) { throw new RuntimeException("Login failed", e); } } return sftpClient; */ }
Returns a SftpClient that is logged in to the sftp server that the endpoint is configured against .
274
23
140,968
static protected void recursiveDelete ( MuleContext muleContext , SftpClient sftpClient , String endpointName , String relativePath ) throws IOException { EndpointURI endpointURI = getImmutableEndpoint ( muleContext , endpointName ) . getEndpointURI ( ) ; String path = endpointURI . getPath ( ) + relativePath ; try { // Ensure that we can delete the current directory and the below // directories (if write is not permitted then delete is either) sftpClient . chmod ( path , 00700 ) ; sftpClient . changeWorkingDirectory ( sftpClient . getAbsolutePath ( path ) ) ; // Delete all sub-directories String [ ] directories = sftpClient . listDirectories ( ) ; for ( String directory : directories ) { recursiveDelete ( muleContext , sftpClient , endpointName , relativePath + "/" + directory ) ; } // Needs to change the directory back after the recursiveDelete sftpClient . changeWorkingDirectory ( sftpClient . getAbsolutePath ( path ) ) ; // Delete all files String [ ] files = sftpClient . listFiles ( ) ; for ( String file : files ) { sftpClient . deleteFile ( file ) ; } // Delete the directory try { sftpClient . deleteDirectory ( path ) ; } catch ( Exception e ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Failed delete directory " + path , e ) ; } } catch ( Exception e ) { if ( logger . isDebugEnabled ( ) ) logger . debug ( "Failed to recursivly delete directory " + path , e ) ; } }
Deletes a directory with all its files and sub - directories . The reason it do a chmod 700 before the delete is that some tests changes the permission and thus we have to restore the right to delete it ...
359
43
140,969
public OMVRBTreeEntryPersistent < K , V > save ( ) throws OSerializationException { if ( ! dataProvider . isEntryDirty ( ) ) return this ; final boolean isNew = dataProvider . getIdentity ( ) . isNew ( ) ; // FOR EACH NEW LINK, SAVE BEFORE if ( left != null && left . dataProvider . getIdentity ( ) . isNew ( ) ) { if ( isNew ) { // TEMPORARY INCORRECT SAVE FOR GETTING AN ID. WILL BE SET DIRTY AGAIN JUST AFTER left . dataProvider . save ( ) ; } else left . save ( ) ; } if ( right != null && right . dataProvider . getIdentity ( ) . isNew ( ) ) { if ( isNew ) { // TEMPORARY INCORRECT SAVE FOR GETTING AN ID. WILL BE SET DIRTY AGAIN JUST AFTER right . dataProvider . save ( ) ; } else right . save ( ) ; } if ( parent != null && parent . dataProvider . getIdentity ( ) . isNew ( ) ) { if ( isNew ) { // TEMPORARY INCORRECT SAVE FOR GETTING AN ID. WILL BE SET DIRTY AGAIN JUST AFTER parent . dataProvider . save ( ) ; } else parent . save ( ) ; } dataProvider . save ( ) ; // if (parent != null) // if (!parent.record.getIdentity().equals(parentRid)) // OLogManager.instance().error(this, // "[save]: Tree node %s has parentRid '%s' different by the rid of the assigned parent node: %s", record.getIdentity(), // parentRid, parent.record.getIdentity()); checkEntryStructure ( ) ; if ( pTree . searchNodeInCache ( dataProvider . getIdentity ( ) ) != this ) { // UPDATE THE CACHE pTree . addNodeInMemory ( this ) ; } return this ; }
Assures that all the links versus parent left and right are consistent .
429
14
140,970
public OMVRBTreeEntryPersistent < K , V > delete ( ) throws IOException { if ( dataProvider != null ) { pTree . removeNodeFromMemory ( this ) ; pTree . removeEntry ( dataProvider . getIdentity ( ) ) ; // EARLY LOAD LEFT AND DELETE IT RECURSIVELY if ( getLeft ( ) != null ) ( ( OMVRBTreeEntryPersistent < K , V > ) getLeft ( ) ) . delete ( ) ; // EARLY LOAD RIGHT AND DELETE IT RECURSIVELY if ( getRight ( ) != null ) ( ( OMVRBTreeEntryPersistent < K , V > ) getRight ( ) ) . delete ( ) ; // DELETE MYSELF dataProvider . removeIdentityChangedListener ( this ) ; dataProvider . delete ( ) ; clear ( ) ; } return this ; }
Delete all the nodes recursively . IF they are not loaded in memory load all the tree .
191
20
140,971
protected int disconnect ( final boolean iForceDirty , final int iLevel ) { if ( dataProvider == null ) // DIRTY NODE, JUST REMOVE IT return 1 ; int totalDisconnected = 0 ; final ORID rid = dataProvider . getIdentity ( ) ; boolean disconnectedFromParent = false ; if ( parent != null ) { // DISCONNECT RECURSIVELY THE PARENT NODE if ( canDisconnectFrom ( parent ) || iForceDirty ) { if ( parent . left == this ) { parent . left = null ; } else if ( parent . right == this ) { parent . right = null ; } else OLogManager . instance ( ) . warn ( this , "Node " + rid + " has the parent (" + parent + ") unlinked to itself. It links to " + parent ) ; totalDisconnected += parent . disconnect ( iForceDirty , iLevel + 1 ) ; parent = null ; disconnectedFromParent = true ; } } else { disconnectedFromParent = true ; } boolean disconnectedFromLeft = false ; if ( left != null ) { // DISCONNECT RECURSIVELY THE LEFT NODE if ( canDisconnectFrom ( left ) || iForceDirty ) { if ( left . parent == this ) left . parent = null ; else OLogManager . instance ( ) . warn ( this , "Node " + rid + " has the left (" + left + ") unlinked to itself. It links to " + left . parent ) ; totalDisconnected += left . disconnect ( iForceDirty , iLevel + 1 ) ; left = null ; disconnectedFromLeft = true ; } } else { disconnectedFromLeft = true ; } boolean disconnectedFromRight = false ; if ( right != null ) { // DISCONNECT RECURSIVELY THE RIGHT NODE if ( canDisconnectFrom ( right ) || iForceDirty ) { if ( right . parent == this ) right . parent = null ; else OLogManager . instance ( ) . warn ( this , "Node " + rid + " has the right (" + right + ") unlinked to itself. It links to " + right . parent ) ; totalDisconnected += right . disconnect ( iForceDirty , iLevel + 1 ) ; right = null ; disconnectedFromRight = true ; } } else { disconnectedFromLeft = true ; } if ( disconnectedFromParent && disconnectedFromLeft && disconnectedFromRight ) if ( ( ! dataProvider . isEntryDirty ( ) && ! dataProvider . getIdentity ( ) . isTemporary ( ) || iForceDirty ) && ! pTree . isNodeEntryPoint ( this ) ) { totalDisconnected ++ ; pTree . removeNodeFromMemory ( this ) ; clear ( ) ; } return totalDisconnected ; }
Disconnect the current node from others .
592
8
140,972
public V setValue ( final V iValue ) { V oldValue = getValue ( ) ; int index = tree . getPageIndex ( ) ; if ( dataProvider . setValueAt ( index , iValue ) ) markDirty ( ) ; return oldValue ; }
Invalidate serialized Value associated in order to be re - marshalled on the next node storing .
57
20
140,973
public static OIdentifiable readIdentifiable ( final OChannelBinaryClient network ) throws IOException { final int classId = network . readShort ( ) ; if ( classId == RECORD_NULL ) return null ; if ( classId == RECORD_RID ) { return network . readRID ( ) ; } else { final ORecordInternal < ? > record = Orient . instance ( ) . getRecordFactoryManager ( ) . newInstance ( network . readByte ( ) ) ; if ( record instanceof ORecordSchemaAware < ? > ) ( ( ORecordSchemaAware < ? > ) record ) . fill ( network . readRID ( ) , network . readInt ( ) , network . readBytes ( ) , false ) ; else // DISCARD CLASS ID record . fill ( network . readRID ( ) , network . readInt ( ) , network . readBytes ( ) , false ) ; return record ; } }
SENT AS SHORT AS FIRST PACKET AFTER SOCKET CONNECTION
204
16
140,974
public static Map < Identity , JsonNode > getDocumentIdMap ( List < JsonNode > list , List < String > identityFields ) { Map < Identity , JsonNode > map = new HashMap <> ( ) ; if ( list != null ) { LOGGER . debug ( "Getting doc IDs for {} docs, fields={}" , list . size ( ) , identityFields ) ; for ( JsonNode node : list ) { Identity id = new Identity ( node , identityFields ) ; LOGGER . debug ( "ID={}" , id ) ; map . put ( id , node ) ; } } return map ; }
Build an id - doc map from a list of docs
136
11
140,975
public static boolean fastCompareDocs ( JsonNode sourceDocument , JsonNode destinationDocument , List < String > exclusionPaths , boolean ignoreTimestampMSDiffs ) { try { JsonDiff diff = new JsonDiff ( ) ; diff . setOption ( JsonDiff . Option . ARRAY_ORDER_INSIGNIFICANT ) ; diff . setOption ( JsonDiff . Option . RETURN_LEAVES_ONLY ) ; diff . setFilter ( new AbstractFieldFilter ( ) { public boolean includeField ( List < String > fieldName ) { return ! fieldName . get ( fieldName . size ( ) - 1 ) . endsWith ( "#" ) ; } } ) ; List < JsonDelta > list = diff . computeDiff ( sourceDocument , destinationDocument ) ; for ( JsonDelta x : list ) { String field = x . getField ( ) ; if ( ! isExcluded ( exclusionPaths , field ) ) { if ( reallyDifferent ( x . getNode1 ( ) , x . getNode2 ( ) , ignoreTimestampMSDiffs ) ) { return true ; } } } } catch ( Exception e ) { LOGGER . error ( "Cannot compare docs:{}" , e , e ) ; } return false ; }
Compare two docs fast if they are the same excluding exclusions
270
12
140,976
public synchronized void createHole ( final long iRecordOffset , final int iRecordSize ) throws IOException { final long timer = OProfiler . getInstance ( ) . startChrono ( ) ; // IN MEMORY final int recycledPosition ; final ODataHoleInfo hole ; if ( ! freeHoles . isEmpty ( ) ) { // RECYCLE THE FIRST FREE HOLE recycledPosition = freeHoles . remove ( 0 ) ; hole = availableHolesList . get ( recycledPosition ) ; hole . dataOffset = iRecordOffset ; hole . size = iRecordSize ; } else { // APPEND A NEW ONE recycledPosition = getHoles ( ) ; hole = new ODataHoleInfo ( iRecordSize , iRecordOffset , recycledPosition ) ; availableHolesList . add ( hole ) ; file . allocateSpace ( RECORD_SIZE ) ; } availableHolesBySize . put ( hole , hole ) ; availableHolesByPosition . put ( hole , hole ) ; if ( maxHoleSize < iRecordSize ) maxHoleSize = iRecordSize ; // TO FILE final long p = recycledPosition * RECORD_SIZE ; file . writeLong ( p , iRecordOffset ) ; file . writeInt ( p + OBinaryProtocol . SIZE_LONG , iRecordSize ) ; OProfiler . getInstance ( ) . stopChrono ( PROFILER_DATA_HOLE_CREATE , timer ) ; }
Appends the hole to the end of the segment .
312
11
140,977
public synchronized ODataHoleInfo getHole ( final int iPosition ) { final ODataHoleInfo hole = availableHolesList . get ( iPosition ) ; if ( hole . dataOffset == - 1 ) return null ; return hole ; }
Fills the holes information into OPhysicalPosition object given as parameter .
53
14
140,978
public synchronized void updateHole ( final ODataHoleInfo iHole , final long iNewDataOffset , final int iNewRecordSize ) throws IOException { final long timer = OProfiler . getInstance ( ) . startChrono ( ) ; final boolean offsetChanged = iNewDataOffset != iHole . dataOffset ; final boolean sizeChanged = iNewRecordSize != iHole . size ; if ( maxHoleSize < iNewRecordSize ) maxHoleSize = iNewRecordSize ; // IN MEMORY if ( offsetChanged ) availableHolesByPosition . remove ( iHole ) ; if ( sizeChanged ) availableHolesBySize . remove ( iHole ) ; if ( offsetChanged ) iHole . dataOffset = iNewDataOffset ; if ( sizeChanged ) iHole . size = iNewRecordSize ; if ( offsetChanged ) availableHolesByPosition . put ( iHole , iHole ) ; if ( sizeChanged ) availableHolesBySize . put ( iHole , iHole ) ; // TO FILE final long holePosition = iHole . holeOffset * RECORD_SIZE ; if ( offsetChanged ) file . writeLong ( holePosition , iNewDataOffset ) ; if ( sizeChanged ) file . writeInt ( holePosition + OBinaryProtocol . SIZE_LONG , iNewRecordSize ) ; OProfiler . getInstance ( ) . stopChrono ( PROFILER_DATA_HOLE_UPDATE , timer ) ; }
Update hole data
326
3
140,979
public synchronized void deleteHole ( int iHolePosition ) throws IOException { // IN MEMORY final ODataHoleInfo hole = availableHolesList . get ( iHolePosition ) ; availableHolesBySize . remove ( hole ) ; availableHolesByPosition . remove ( hole ) ; hole . dataOffset = - 1 ; freeHoles . add ( iHolePosition ) ; // TO FILE iHolePosition = iHolePosition * RECORD_SIZE ; file . writeLong ( iHolePosition , - 1 ) ; }
Delete the hole
117
3
140,980
protected T getObject ( ) { final T object ; if ( reusedObject != null ) { // REUSE THE SAME RECORD AFTER HAVING RESETTED IT object = reusedObject ; object . reset ( ) ; } else // CREATE A NEW ONE object = ( T ) database . newInstance ( className ) ; return object ; }
Returns the object to use for the operation .
72
9
140,981
@ SneakyThrows ( IOException . class ) public static String sha1 ( String input ) throws NoSuchAlgorithmException { MessageDigest mDigest = MessageDigest . getInstance ( "SHA1" ) ; byte [ ] result = mDigest . digest ( input . getBytes ( "UTF-8" ) ) ; String resultString = String . format ( "%040x" , new BigInteger ( 1 , result ) ) ; return resultString ; }
Calculates the SHA1 Digest of a given input .
100
12
140,982
public void saveRecord ( final ORecordInternal < ? > iRecord , final String iClusterName , final OPERATION_MODE iMode , final ORecordCallback < ? extends Number > iCallback ) { try { database . executeSaveRecord ( iRecord , iClusterName , iRecord . getVersion ( ) , iRecord . getRecordType ( ) , true , iMode , iCallback ) ; } catch ( Exception e ) { // REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS final ORecordId rid = ( ORecordId ) iRecord . getIdentity ( ) ; if ( rid . isValid ( ) ) database . getLevel1Cache ( ) . freeRecord ( rid ) ; if ( e instanceof RuntimeException ) throw ( RuntimeException ) e ; throw new OException ( e ) ; } }
Update the record .
185
4
140,983
public void deleteRecord ( final ORecordInternal < ? > iRecord , final OPERATION_MODE iMode ) { if ( ! iRecord . getIdentity ( ) . isPersistent ( ) ) return ; try { database . executeDeleteRecord ( iRecord , iRecord . getVersion ( ) , true , true , iMode ) ; } catch ( Exception e ) { // REMOVE IT FROM THE CACHE TO AVOID DIRTY RECORDS final ORecordId rid = ( ORecordId ) iRecord . getIdentity ( ) ; if ( rid . isValid ( ) ) database . getLevel1Cache ( ) . freeRecord ( rid ) ; if ( e instanceof RuntimeException ) throw ( RuntimeException ) e ; throw new OException ( e ) ; } }
Deletes the record .
170
5
140,984
@ SuppressWarnings ( "unchecked" ) public < RET > RET execute ( final Object ... iArgs ) { setParameters ( iArgs ) ; return ( RET ) ODatabaseRecordThreadLocal . INSTANCE . get ( ) . getStorage ( ) . command ( this ) ; }
Delegates the execution to the configured command executor .
61
11
140,985
public Collection < ODocument > getEntriesBetween ( final Object iRangeFrom , final Object iRangeTo ) { return getEntriesBetween ( iRangeFrom , iRangeTo , true ) ; }
Returns a set of documents with key between the range passed as parameter . Range bounds are included .
42
19
140,986
public long rebuild ( final OProgressListener iProgressListener ) { long documentIndexed = 0 ; final boolean intentInstalled = getDatabase ( ) . declareIntent ( new OIntentMassiveInsert ( ) ) ; acquireExclusiveLock ( ) ; try { try { map . clear ( ) ; } catch ( Exception e ) { // IGNORE EXCEPTION: IF THE REBUILD WAS LAUNCHED IN CASE OF RID INVALID CLEAR ALWAYS GOES IN ERROR } int documentNum = 0 ; long documentTotal = 0 ; for ( final String cluster : clustersToIndex ) documentTotal += getDatabase ( ) . countClusterElements ( cluster ) ; if ( iProgressListener != null ) iProgressListener . onBegin ( this , documentTotal ) ; for ( final String clusterName : clustersToIndex ) try { for ( final ORecord < ? > record : getDatabase ( ) . browseCluster ( clusterName ) ) { if ( record instanceof ODocument ) { final ODocument doc = ( ODocument ) record ; if ( indexDefinition == null ) throw new OConfigurationException ( "Index '" + name + "' cannot be rebuilt because has no a valid definition (" + indexDefinition + ")" ) ; final Object fieldValue = indexDefinition . getDocumentValueToIndex ( doc ) ; if ( fieldValue != null ) { if ( fieldValue instanceof Collection ) { for ( final Object fieldValueItem : ( Collection < ? > ) fieldValue ) { put ( fieldValueItem , doc ) ; } } else put ( fieldValue , doc ) ; ++ documentIndexed ; } } documentNum ++ ; if ( iProgressListener != null ) iProgressListener . onProgress ( this , documentNum , documentNum * 100f / documentTotal ) ; } } catch ( NoSuchElementException e ) { // END OF CLUSTER REACHED, IGNORE IT } lazySave ( ) ; if ( iProgressListener != null ) iProgressListener . onCompletition ( this , true ) ; } catch ( final Exception e ) { if ( iProgressListener != null ) iProgressListener . onCompletition ( this , false ) ; try { map . clear ( ) ; } catch ( Exception e2 ) { // IGNORE EXCEPTION: IF THE REBUILD WAS LAUNCHED IN CASE OF RID INVALID CLEAR ALWAYS GOES IN ERROR } throw new OIndexException ( "Error on rebuilding the index for clusters: " + clustersToIndex , e ) ; } finally { if ( intentInstalled ) getDatabase ( ) . declareIntent ( null ) ; releaseExclusiveLock ( ) ; } return documentIndexed ; }
Populates the index with all the existent records . Uses the massive insert intent to speed up and keep the consumed memory low .
561
26
140,987
public boolean isConnected ( ) { if ( socket != null && socket . isConnected ( ) && ! socket . isInputShutdown ( ) && ! socket . isOutputShutdown ( ) ) return true ; return false ; }
Tells if the channel is connected .
49
8
140,988
public void detach ( Object self ) throws NoSuchMethodException , IllegalAccessException , InvocationTargetException { for ( String fieldName : doc . fieldNames ( ) ) { Object value = getValue ( self , fieldName , false , null ) ; if ( value instanceof OLazyObjectMultivalueElement ) ( ( OLazyObjectMultivalueElement ) value ) . detach ( ) ; OObjectEntitySerializer . setFieldValue ( getField ( fieldName , self . getClass ( ) ) , self , value ) ; } OObjectEntitySerializer . setIdField ( self . getClass ( ) , self , doc . getIdentity ( ) ) ; OObjectEntitySerializer . setVersionField ( self . getClass ( ) , self , doc . getVersion ( ) ) ; }
Method that detaches all fields contained in the document to the given object
168
14
140,989
public void attach ( Object self ) throws IllegalArgumentException , IllegalAccessException , NoSuchMethodException , InvocationTargetException { for ( Class < ? > currentClass = self . getClass ( ) ; currentClass != Object . class ; ) { if ( Proxy . class . isAssignableFrom ( currentClass ) ) { currentClass = currentClass . getSuperclass ( ) ; continue ; } for ( Field f : currentClass . getDeclaredFields ( ) ) { Object value = OObjectEntitySerializer . getFieldValue ( f , self ) ; value = setValue ( self , f . getName ( ) , value ) ; OObjectEntitySerializer . setFieldValue ( f , self , value ) ; } currentClass = currentClass . getSuperclass ( ) ; if ( currentClass == null || currentClass . equals ( ODocument . class ) ) // POJO EXTENDS ODOCUMENT: SPECIAL CASE: AVOID TO CONSIDER // ODOCUMENT FIELDS currentClass = Object . class ; } }
Method that attaches all data contained in the object to the associated document
221
13
140,990
private void ensureRespondJsScriptElement ( ) { if ( this . respondJsScript == null ) { this . respondJsScript = Document . get ( ) . createScriptElement ( ) ; this . respondJsScript . setSrc ( GWT . getModuleBaseForStaticFiles ( ) + DefaultIE8ThemeController . RESPOND_JS_LOCATION ) ; this . respondJsScript . setType ( "text/javascript" ) ; } }
Ensure respond js script element .
95
7
140,991
public static MessageDispatcherServlet createMessageDispatcherServlet ( Class ... contextConfigLocation ) { StringBuilder items = new StringBuilder ( ) ; for ( Class aClass : contextConfigLocation ) { items . append ( aClass . getName ( ) ) ; items . append ( "," ) ; } MessageDispatcherServlet messageDispatcherServlet = new MessageDispatcherServlet ( ) ; messageDispatcherServlet . setContextClass ( AnnotationConfigWebApplicationContext . class ) ; messageDispatcherServlet . setContextConfigLocation ( removeEnd ( items . toString ( ) , "," ) ) ; messageDispatcherServlet . setTransformWsdlLocations ( true ) ; return messageDispatcherServlet ; }
Creates a spring - ws message dispatcher servlet
164
11
140,992
@ VisibleForTesting static Integer getReceivedStations ( AisExtractor extractor , int slotTimeout , int startIndex ) { if ( slotTimeout == 3 || slotTimeout == 5 || slotTimeout == 7 ) return extractor . getValue ( startIndex + 5 , startIndex + 19 ) ; else return null ; }
Returns received stations as per 1371 - 4 . pdf .
69
12
140,993
private static Integer getHourUtc ( AisExtractor extractor , int slotTimeout , int startIndex ) { if ( slotTimeout == 1 ) { // skip the msb bit int hours = extractor . getValue ( startIndex + 5 , startIndex + 10 ) ; return hours ; } else return null ; }
Returns hour UTC as per 1371 - 4 . pdf .
67
12
140,994
private static Integer getMinuteUtc ( AisExtractor extractor , int slotTimeout , int startIndex ) { if ( slotTimeout == 1 ) { // skip the msb bit int minutes = extractor . getValue ( startIndex + 10 , startIndex + 17 ) ; return minutes ; } else return null ; }
Returns minute UTC as per 1371 - 4 . pdf .
68
12
140,995
private static Integer getSlotOffset ( AisExtractor extractor , int slotTimeout , int startIndex ) { if ( slotTimeout == 0 ) return extractor . getValue ( startIndex + 5 , startIndex + 19 ) ; else return null ; }
Returns slot offset as per 1371 - 4 . pdf .
53
12
140,996
public synchronized int getValue ( int from , int to ) { try { // is synchronized so that values of bitSet and calculated can be // lazily // calculated and safely published (thread safe). SixBit . convertSixBitToBits ( message , padBits , bitSet , calculated , from , to ) ; return ( int ) SixBit . getValue ( from , to , bitSet ) ; } catch ( SixBitException | ArrayIndexOutOfBoundsException e ) { throw new AisParseException ( e ) ; } }
Returns an unsigned integer value using the bits from character position start to position stop in the decoded message .
114
21
140,997
public synchronized int getSignedValue ( int from , int to ) { try { // is synchronized so that values of bitSet and calculated can be // lazily // calculated and safely published (thread safe). SixBit . convertSixBitToBits ( message , padBits , bitSet , calculated , from , to ) ; return ( int ) SixBit . getSignedValue ( from , to , bitSet ) ; } catch ( SixBitException e ) { throw new AisParseException ( e ) ; } }
Returns a signed integer value using the bits from character position start to position stop in the decoded message .
110
21
140,998
public static Object getField ( Object obj , String name ) { try { Class < ? extends Object > klass = obj . getClass ( ) ; do { try { Field field = klass . getDeclaredField ( name ) ; field . setAccessible ( true ) ; return field . get ( obj ) ; } catch ( NoSuchFieldException e ) { klass = klass . getSuperclass ( ) ; } } while ( klass != null ) ; throw new RuntimeException ( ) ; // true no such field exception } catch ( SecurityException e ) { throw new RuntimeException ( e ) ; } catch ( IllegalArgumentException e ) { throw new RuntimeException ( e ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; } }
Gets a field from an object .
164
8
140,999
public final double getDistanceToKm ( Position position ) { double lat1 = toRadians ( lat ) ; double lat2 = toRadians ( position . lat ) ; double lon1 = toRadians ( lon ) ; double lon2 = toRadians ( position . lon ) ; double deltaLon = lon2 - lon1 ; double cosLat2 = cos ( lat2 ) ; double cosLat1 = cos ( lat1 ) ; double sinLat1 = sin ( lat1 ) ; double sinLat2 = sin ( lat2 ) ; double cosDeltaLon = cos ( deltaLon ) ; double top = sqrt ( sqr ( cosLat2 * sin ( deltaLon ) ) + sqr ( cosLat1 * sinLat2 - sinLat1 * cosLat2 * cosDeltaLon ) ) ; double bottom = sinLat1 * sinLat2 + cosLat1 * cosLat2 * cosDeltaLon ; double distance = radiusEarthKm * atan2 ( top , bottom ) ; return abs ( distance ) ; }
returns distance between two WGS84 positions according to Vincenty s formula from Wikipedia
230
17