idx
int64
0
165k
question
stringlengths
73
4.15k
target
stringlengths
5
918
len_question
int64
21
890
len_target
int64
3
255
15,700
public synchronized void endLog ( ) { if ( ! finalized ) { finishedTime = Calendar . getInstance ( ) ; finalized = true ; logWriter . writeEndLog ( ) ; //copy backup chain log file in into Backupset files itself for portability (e.g. on another server) try { InputStream in = PrivilegedFileHelper . fileInputStream ( log ) ; File dest = new File ( config . getBackupDir ( ) + File . separator + log . getName ( ) ) ; if ( ! PrivilegedFileHelper . exists ( dest ) ) { OutputStream out = PrivilegedFileHelper . fileOutputStream ( dest ) ; byte [ ] buf = new byte [ ( int ) ( PrivilegedFileHelper . length ( log ) ) ] ; in . read ( buf ) ; String sConfig = new String ( buf , Constants . DEFAULT_ENCODING ) ; sConfig = sConfig . replaceAll ( "<backup-dir>.+</backup-dir>" , "<backup-dir>.</backup-dir>" ) ; out . write ( sConfig . getBytes ( Constants . DEFAULT_ENCODING ) ) ; in . close ( ) ; out . close ( ) ; } } catch ( PatternSyntaxException e ) { logger . error ( "Can't write log" , e ) ; } catch ( FileNotFoundException e ) { logger . error ( "Can't write log" , e ) ; } catch ( IOException e ) { logger . error ( "Can't write log" , e ) ; } } }
Finalize log .
334
4
15,701
public Response delete ( Session session , String path , String lockTokenHeader ) { try { if ( lockTokenHeader == null ) { lockTokenHeader = "" ; } Item item = session . getItem ( path ) ; if ( item . isNode ( ) ) { Node node = ( Node ) item ; if ( node . isLocked ( ) ) { String nodeLockToken = node . getLock ( ) . getLockToken ( ) ; if ( ( nodeLockToken == null ) || ( ! nodeLockToken . equals ( lockTokenHeader ) ) ) { return Response . status ( HTTPStatus . LOCKED ) . entity ( "The " + path + " item is locked. " ) . type ( MediaType . TEXT_PLAIN ) . build ( ) ; } } } item . remove ( ) ; session . save ( ) ; return Response . status ( HTTPStatus . NO_CONTENT ) . build ( ) ; } catch ( PathNotFoundException exc ) { return Response . status ( HTTPStatus . NOT_FOUND ) . entity ( exc . getMessage ( ) ) . build ( ) ; } catch ( RepositoryException exc ) { return Response . status ( HTTPStatus . FORBIDDEN ) . entity ( exc . getMessage ( ) ) . build ( ) ; } }
Webdav Delete method implementation .
271
7
15,702
protected void spoolInputStream ( ) { if ( spoolFile != null || data != null ) // already spooled { return ; } byte [ ] buffer = new byte [ 0 ] ; byte [ ] tmpBuff = new byte [ 2048 ] ; int read = 0 ; int len = 0 ; SpoolFile sf = null ; OutputStream sfout = null ; try { while ( ( read = stream . read ( tmpBuff ) ) >= 0 ) { if ( sfout != null ) { // spool to temp file sfout . write ( tmpBuff , 0 , read ) ; len += read ; } else if ( len + read > spoolConfig . maxBufferSize ) { // threshold for keeping data in memory exceeded, // if have a fileCleaner create temp file and spool buffer contents. sf = SpoolFile . createTempFile ( "jcrvd" , null , spoolConfig . tempDirectory ) ; sf . acquire ( this ) ; sfout = PrivilegedFileHelper . fileOutputStream ( sf ) ; sfout . write ( buffer , 0 , len ) ; sfout . write ( tmpBuff , 0 , read ) ; buffer = null ; len += read ; } else { // reallocate new buffer and spool old buffer contents byte [ ] newBuffer = new byte [ len + read ] ; System . arraycopy ( buffer , 0 , newBuffer , 0 , len ) ; System . arraycopy ( tmpBuff , 0 , newBuffer , len , read ) ; buffer = newBuffer ; len += read ; } } if ( sf != null ) { // spooled to file this . spoolFile = sf ; this . data = null ; } else { // ...bytes this . spoolFile = null ; this . data = buffer ; } } catch ( IOException e ) { if ( sf != null ) { try { sf . release ( this ) ; spoolConfig . fileCleaner . addFile ( sf ) ; } catch ( FileNotFoundException ex ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Could not remove temporary file : " + sf . getAbsolutePath ( ) ) ; } } } throw new IllegalStateException ( "Error of spooling to temp file from " + stream , e ) ; } finally { try { if ( sfout != null ) { sfout . close ( ) ; } } catch ( IOException e ) { LOG . error ( "Error of spool output close." , e ) ; } this . stream = null ; } }
Spool ValueData temp InputStream to a temp File .
553
12
15,703
private void removeSpoolFile ( ) throws IOException { if ( spoolFile != null ) { if ( spoolFile instanceof SpoolFile ) { ( spoolFile ) . release ( this ) ; } if ( PrivilegedFileHelper . exists ( spoolFile ) ) { if ( ! PrivilegedFileHelper . delete ( spoolFile ) ) { spoolConfig . fileCleaner . addFile ( spoolFile ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Could not remove file. Add to fileCleaner " + PrivilegedFileHelper . getAbsolutePath ( spoolFile ) ) ; } } } } }
Delete current spool file .
141
6
15,704
public boolean validateNodeType ( ) { boolean hasValidated = false ; if ( primaryItemName != null ) { if ( primaryItemName . length ( ) <= 0 ) { primaryItemName = null ; hasValidated = true ; } } if ( declaredSupertypeNames == null ) { declaredSupertypeNames = new ArrayList < String > ( ) ; hasValidated = true ; } else { int prevSize = declaredSupertypeNames . size ( ) ; fixStringsList ( declaredSupertypeNames ) ; hasValidated = prevSize != declaredSupertypeNames . size ( ) ; } if ( declaredPropertyDefinitionValues == null ) { declaredPropertyDefinitionValues = new ArrayList < PropertyDefinitionValue > ( ) ; hasValidated = true ; } else { int prevSize = declaredPropertyDefinitionValues . size ( ) ; fixPropertyDefinitionValuesList ( declaredPropertyDefinitionValues ) ; hasValidated = prevSize != declaredPropertyDefinitionValues . size ( ) ; } if ( declaredChildNodeDefinitionValues == null ) { declaredChildNodeDefinitionValues = new ArrayList < NodeDefinitionValue > ( ) ; hasValidated = true ; } else { int prevSize = declaredChildNodeDefinitionValues . size ( ) ; fixNodeDefinitionValuesList ( declaredChildNodeDefinitionValues ) ; hasValidated = prevSize != declaredChildNodeDefinitionValues . size ( ) ; } return hasValidated ; }
validateNodeType method checks the value bean for each valid filed
290
13
15,705
public Value [ ] getValueArray ( ) throws RepositoryException { Value [ ] values = new Value [ propertyData . getValues ( ) . size ( ) ] ; for ( int i = 0 ; i < values . length ; i ++ ) { values [ i ] = valueFactory . loadValue ( propertyData . getValues ( ) . get ( i ) , propertyData . getType ( ) ) ; } return values ; }
Copies property values into array .
90
7
15,706
public String dump ( ) { StringBuilder vals = new StringBuilder ( "Property " ) ; try { vals = new StringBuilder ( getPath ( ) ) . append ( " values: " ) ; for ( int i = 0 ; i < getValueArray ( ) . length ; i ++ ) { vals . append ( ValueDataUtil . getString ( ( ( BaseValue ) getValueArray ( ) [ i ] ) . getInternalData ( ) ) ) . append ( ";" ) ; } } catch ( Exception e ) { LOG . error ( e . getLocalizedMessage ( ) , e ) ; } return vals . toString ( ) ; }
Get info about property values .
141
6
15,707
public void setParentId ( String parentId ) { this . parentId = ( parentId == null || parentId . equals ( "" ) ? null : parentId ) ; setGroupName ( groupName ) ; }
Sets new parenId and refresh groupId .
45
11
15,708
public void skip ( long n ) throws IllegalArgumentException , NoSuchElementException { if ( n < 0 ) { throw new IllegalArgumentException ( "skip(" + n + ")" ) ; } for ( long i = 0 ; i < n ; i ++ ) { next ( ) ; } }
Skips the given number of elements .
64
8
15,709
public static List < File > listFiles ( File srcPath ) throws IOException { List < File > result = new ArrayList < File > ( ) ; if ( ! srcPath . isDirectory ( ) ) { throw new IOException ( srcPath . getAbsolutePath ( ) + " is a directory" ) ; } for ( File subFile : srcPath . listFiles ( ) ) { result . add ( subFile ) ; if ( subFile . isDirectory ( ) ) { result . addAll ( listFiles ( subFile ) ) ; } } return result ; }
Returns the files list of whole directory including its sub directories .
120
12
15,710
public static void copyDirectory ( File srcPath , File dstPath ) throws IOException { if ( srcPath . isDirectory ( ) ) { if ( ! dstPath . exists ( ) ) { dstPath . mkdirs ( ) ; } String files [ ] = srcPath . list ( ) ; for ( int i = 0 ; i < files . length ; i ++ ) { copyDirectory ( new File ( srcPath , files [ i ] ) , new File ( dstPath , files [ i ] ) ) ; } } else { InputStream in = null ; OutputStream out = null ; try { in = new FileInputStream ( srcPath ) ; out = new FileOutputStream ( dstPath ) ; transfer ( in , out ) ; } finally { if ( in != null ) { in . close ( ) ; } if ( out != null ) { out . flush ( ) ; out . close ( ) ; } } } }
Copy directory .
194
3
15,711
public static void removeDirectory ( File dir ) throws IOException { if ( dir . isDirectory ( ) ) { for ( File subFile : dir . listFiles ( ) ) { removeDirectory ( subFile ) ; } if ( ! dir . delete ( ) ) { throw new IOException ( "Can't remove folder : " + dir . getCanonicalPath ( ) ) ; } } else { if ( ! dir . delete ( ) ) { throw new IOException ( "Can't remove file : " + dir . getCanonicalPath ( ) ) ; } } }
Remove directory .
120
3
15,712
private static void compressDirectory ( String relativePath , File srcPath , ZipOutputStream zip ) throws IOException { if ( srcPath . isDirectory ( ) ) { zip . putNextEntry ( new ZipEntry ( relativePath + "/" + srcPath . getName ( ) + "/" ) ) ; zip . closeEntry ( ) ; String files [ ] = srcPath . list ( ) ; for ( int i = 0 ; i < files . length ; i ++ ) { compressDirectory ( relativePath + "/" + srcPath . getName ( ) , new File ( srcPath , files [ i ] ) , zip ) ; } } else { InputStream in = new FileInputStream ( srcPath ) ; try { zip . putNextEntry ( new ZipEntry ( relativePath + "/" + srcPath . getName ( ) ) ) ; transfer ( in , zip ) ; zip . closeEntry ( ) ; } finally { if ( in != null ) { in . close ( ) ; } } } }
Compress files and directories .
210
6
15,713
public static void deleteDstAndRename ( File srcFile , File dstFile ) throws IOException { if ( dstFile . exists ( ) ) { if ( ! dstFile . delete ( ) ) { throw new IOException ( "Cannot delete " + dstFile ) ; } } renameFile ( srcFile , dstFile ) ; }
Rename file . Trying to remove destination first . If file can t be renamed in standard way the coping data will be used instead .
71
27
15,714
public static void renameFile ( File srcFile , File dstFile ) throws IOException { // Rename the srcFile file to the new one. Unfortunately, the renameTo() // method does not work reliably under some JVMs. Therefore, if the // rename fails, we manually rename by copying the srcFile file to the new one if ( ! srcFile . renameTo ( dstFile ) ) { InputStream in = null ; OutputStream out = null ; try { in = new FileInputStream ( srcFile ) ; out = new FileOutputStream ( dstFile ) ; transfer ( in , out ) ; } catch ( IOException ioe ) { IOException newExc = new IOException ( "Cannot rename " + srcFile + " to " + dstFile ) ; newExc . initCause ( ioe ) ; throw newExc ; } finally { if ( in != null ) { in . close ( ) ; } if ( out != null ) { out . close ( ) ; } } // delete the srcFile file. srcFile . delete ( ) ; } }
Rename file . If file can t be renamed in standard way the coping data will be used instead .
223
21
15,715
public static long getSize ( File dir ) { long size = 0 ; for ( File file : dir . listFiles ( ) ) { if ( file . isFile ( ) ) { size += file . length ( ) ; } else { size += getSize ( file ) ; } } return size ; }
Returns the size of directory including all subfolders .
63
11
15,716
protected void removeWorkspace ( ManageableRepository mr , String workspaceName ) throws RepositoryException { boolean isExists = false ; for ( String wsName : mr . getWorkspaceNames ( ) ) if ( workspaceName . equals ( wsName ) ) { isExists = true ; break ; } if ( isExists ) { if ( ! mr . canRemoveWorkspace ( workspaceName ) ) { WorkspaceContainerFacade wc = mr . getWorkspaceContainer ( workspaceName ) ; SessionRegistry sessionRegistry = ( SessionRegistry ) wc . getComponent ( SessionRegistry . class ) ; sessionRegistry . closeSessions ( workspaceName ) ; } mr . removeWorkspace ( workspaceName ) ; } }
Remove workspace .
163
3
15,717
private void removeChildrenItems ( JDBCStorageConnection conn , ResultSet resultSet ) throws SQLException , IllegalNameException , IllegalStateException , UnsupportedOperationException , InvalidItemStateException , RepositoryException { String parentId = resultSet . getString ( DBConstants . COLUMN_ID ) ; String selectStatement = "select * from " + iTable + " where I_CLASS = 1 and PARENT_ID = '" + parentId + "'" ; String deleteStatement = "delete from " + iTable + " where I_CLASS = 1 and PARENT_ID = '" + parentId + "'" ; // traversing down to the bottom of the tree PreparedStatement statement = conn . getJdbcConnection ( ) . prepareStatement ( selectStatement ) ; ResultSet selResult = statement . executeQuery ( ) ; try { while ( selResult . next ( ) ) { removeChildrenItems ( conn , selResult ) ; } } finally { JDBCUtils . freeResources ( selResult , statement , null ) ; } // remove properties NodeData node = createNodeData ( resultSet ) ; for ( PropertyData prop : conn . getChildPropertiesData ( node ) ) { conn . delete ( prop , new SimpleChangedSizeHandler ( ) ) ; } // remove nodes statement = conn . getJdbcConnection ( ) . prepareStatement ( deleteStatement ) ; try { statement . execute ( ) ; } finally { JDBCUtils . freeResources ( null , statement , null ) ; } }
Removes all children items .
327
6
15,718
private void loadScript ( Node node ) throws Exception { ResourceId key = new NodeScriptKey ( repository . getConfiguration ( ) . getName ( ) , workspaceName , node ) ; ObjectFactory < AbstractResourceDescriptor > resource = groovyScript2RestLoader . groovyPublisher . unpublishResource ( key ) ; if ( resource != null ) { groovyScript2RestLoader . groovyPublisher . publishPerRequest ( node . getProperty ( "jcr:data" ) . getStream ( ) , key , resource . getObjectModel ( ) . getProperties ( ) ) ; } else { groovyScript2RestLoader . groovyPublisher . publishPerRequest ( node . getProperty ( "jcr:data" ) . getStream ( ) , key , null ) ; } }
Load script form supplied node .
168
6
15,719
private void unloadScript ( String path ) throws Exception { ResourceId key = new NodeScriptKey ( repository . getConfiguration ( ) . getName ( ) , workspaceName , path ) ; groovyScript2RestLoader . groovyPublisher . unpublishResource ( key ) ; }
Unload script .
59
4
15,720
private ExoContainer getContainer ( ) throws ResourceException { ExoContainer container = ExoContainerContext . getCurrentContainer ( ) ; if ( container instanceof RootContainer ) { String portalContainerName = portalContainer == null ? PortalContainer . DEFAULT_PORTAL_CONTAINER_NAME : portalContainer ; container = RootContainer . getInstance ( ) . getPortalContainer ( portalContainerName ) ; if ( container == null ) { throw new ResourceException ( "The eXo container is null, because the current container is a RootContainer " + "and there is no PortalContainer with the name '" + portalContainerName + "'." ) ; } } else if ( container == null ) { throw new ResourceException ( "The eXo container is null, because the current container is null." ) ; } return container ; }
Gets the container from the current context
175
8
15,721
public static ValueDataWrapper readValueData ( String cid , int type , int orderNumber , int version , final InputStream content , SpoolConfig spoolConfig ) throws IOException { ValueDataWrapper vdDataWrapper = new ValueDataWrapper ( ) ; byte [ ] buffer = new byte [ 0 ] ; byte [ ] spoolBuffer = new byte [ ValueFileIOHelper . IOBUFFER_SIZE ] ; int read ; int len = 0 ; OutputStream out = null ; SwapFile swapFile = null ; try { // stream from database if ( content != null ) { while ( ( read = content . read ( spoolBuffer ) ) >= 0 ) { if ( out != null ) { // spool to temp file out . write ( spoolBuffer , 0 , read ) ; } else if ( len + read > spoolConfig . maxBufferSize ) { // threshold for keeping data in memory exceeded; // create temp file and spool buffer contents swapFile = SwapFile . get ( spoolConfig . tempDirectory , cid + orderNumber + "." + version , spoolConfig . fileCleaner ) ; if ( swapFile . isSpooled ( ) ) { // break, value already spooled buffer = null ; break ; } out = PrivilegedFileHelper . fileOutputStream ( swapFile ) ; out . write ( buffer , 0 , len ) ; out . write ( spoolBuffer , 0 , read ) ; buffer = null ; } else { // reallocate new buffer and spool old buffer contents byte [ ] newBuffer = new byte [ len + read ] ; System . arraycopy ( buffer , 0 , newBuffer , 0 , len ) ; System . arraycopy ( spoolBuffer , 0 , newBuffer , len , read ) ; buffer = newBuffer ; } len += read ; } } } finally { if ( out != null ) { out . close ( ) ; swapFile . spoolDone ( ) ; } } vdDataWrapper . size = len ; if ( swapFile != null ) { vdDataWrapper . value = new CleanableFilePersistedValueData ( orderNumber , swapFile , spoolConfig ) ; } else { vdDataWrapper . value = createValueData ( type , orderNumber , buffer ) ; } return vdDataWrapper ; }
Read value data from stream .
493
6
15,722
public static ValueDataWrapper readValueData ( int type , int orderNumber , File file , SpoolConfig spoolConfig ) throws IOException { ValueDataWrapper vdDataWrapper = new ValueDataWrapper ( ) ; long fileSize = file . length ( ) ; vdDataWrapper . size = fileSize ; if ( fileSize > spoolConfig . maxBufferSize ) { vdDataWrapper . value = new FilePersistedValueData ( orderNumber , file , spoolConfig ) ; } else { // JCR-2463 In case the file was renamed to be removed/changed, // but the transaction wasn't rollbacked cleanly file = fixFileName ( file ) ; FileInputStream is = new FileInputStream ( file ) ; try { byte [ ] data = new byte [ ( int ) fileSize ] ; byte [ ] buff = new byte [ ValueFileIOHelper . IOBUFFER_SIZE > fileSize ? ValueFileIOHelper . IOBUFFER_SIZE : ( int ) fileSize ] ; int rpos = 0 ; int read ; while ( ( read = is . read ( buff ) ) >= 0 ) { System . arraycopy ( buff , 0 , data , rpos , read ) ; rpos += read ; } vdDataWrapper . value = createValueData ( type , orderNumber , data ) ; } finally { is . close ( ) ; } } return vdDataWrapper ; }
Read value data from file .
308
6
15,723
public static PersistedValueData createValueData ( int type , int orderNumber , byte [ ] data ) throws IOException { switch ( type ) { case PropertyType . BINARY : case PropertyType . UNDEFINED : return new ByteArrayPersistedValueData ( orderNumber , data ) ; case PropertyType . BOOLEAN : return new BooleanPersistedValueData ( orderNumber , Boolean . valueOf ( getString ( data ) ) ) ; case PropertyType . DATE : try { return new CalendarPersistedValueData ( orderNumber , JCRDateFormat . parse ( getString ( data ) ) ) ; } catch ( ValueFormatException e ) { throw new IOException ( "Can't create Calendar value" , e ) ; } case PropertyType . DOUBLE : return new DoublePersistedValueData ( orderNumber , Double . valueOf ( getString ( data ) ) ) ; case PropertyType . LONG : return new LongPersistedValueData ( orderNumber , Long . valueOf ( getString ( data ) ) ) ; case PropertyType . NAME : try { return new NamePersistedValueData ( orderNumber , InternalQName . parse ( getString ( data ) ) ) ; } catch ( IllegalNameException e ) { throw new IOException ( e . getMessage ( ) , e ) ; } case PropertyType . PATH : try { return new PathPersistedValueData ( orderNumber , QPath . parse ( getString ( data ) ) ) ; } catch ( IllegalPathException e ) { throw new IOException ( e . getMessage ( ) , e ) ; } case PropertyType . REFERENCE : return new ReferencePersistedValueData ( orderNumber , new Identifier ( data ) ) ; case PropertyType . STRING : return new StringPersistedValueData ( orderNumber , getString ( data ) ) ; case ExtendedPropertyType . PERMISSION : return new PermissionPersistedValueData ( orderNumber , AccessControlEntry . parse ( getString ( data ) ) ) ; default : throw new IllegalStateException ( "Unknown property type " + type ) ; } }
Creates value data depending on its type . It avoids storing unnecessary bytes in memory every time .
440
19
15,724
private boolean isRecordAlreadyExistsException ( SQLException e ) { // Search in UPPER case // MySQL 5.0.x - com.mysql.jdbc.exceptions.MySQLIntegrityConstraintViolationException: // Duplicate entry '4f684b34c0a800030018c34f99165791-0' for key 1 // HSQLDB 8.x - java.sql.SQLException: Violation of unique constraint $$: duplicate value(s) for // column(s) $$: // JCR_VCAS_PK in statement [INSERT INTO JCR_VCAS (PROPERTY_ID, ORDER_NUM, CAS_ID) // VALUES(?,?,?)] String H2_PK_CONSTRAINT_DETECT_PATTERN = "(.*JdbcSQLException.*violation.*PRIMARY_KEY_.*)"; // PostgreSQL 8.2.x - org.postgresql.util.PSQLException: ERROR: duplicate key violates unique // constraint "jcr_vcas_pk" // Oracle 9i x64 (on Fedora 7) - java.sql.SQLException: ORA-00001: unique constraint // (EXOADMIN.JCR_VCAS_PK) violated // H2 - org.h2.jdbc.JdbcSQLException: Unique index or primary key violation: // "PRIMARY_KEY_4 ON PUBLIC.JCR_VCAS_TEST(PROPERTY_ID, ORDER_NUM)"; // String err = e . toString ( ) ; if ( dialect . startsWith ( DBConstants . DB_DIALECT_MYSQL ) ) { // for MySQL will search return MYSQL_PK_CONSTRAINT_DETECT . matcher ( err ) . find ( ) ; } else if ( err . toLowerCase ( ) . toUpperCase ( ) . indexOf ( sqlConstraintPK . toLowerCase ( ) . toUpperCase ( ) ) >= 0 ) { // most of supported dbs prints PK name in exception return true ; } else if ( dialect . startsWith ( DBConstants . DB_DIALECT_DB2 ) ) { return DB2_PK_CONSTRAINT_DETECT . matcher ( err ) . find ( ) ; } else if ( dialect . startsWith ( DBConstants . DB_DIALECT_H2 ) ) { return H2_PK_CONSTRAINT_DETECT . matcher ( err ) . find ( ) ; } // NOTICE! As an additional check we may ask the database for property currently processed in // VCAS // and tell true if the property already exists only. return false ; }
Tell is it a RecordAlreadyExistsException .
616
10
15,725
public boolean isTextContent ( ) { try { return dataProperty ( ) . getType ( ) != PropertyType . BINARY ; } catch ( RepositoryException exc ) { LOG . error ( exc . getMessage ( ) , exc ) ; return false ; } }
if the content of node is text .
56
8
15,726
public QName createQName ( String strName ) { String [ ] parts = strName . split ( ":" ) ; if ( parts . length > 1 ) return new QName ( getNamespaceURI ( parts [ 0 ] ) , parts [ 1 ] , parts [ 0 ] ) ; else return new QName ( parts [ 0 ] ) ; }
Converts String into QName .
74
7
15,727
public String getNamespaceURI ( String prefix ) { String uri = null ; try { uri = namespaceRegistry . getURI ( prefix ) ; } catch ( NamespaceException exc ) { uri = namespaces . get ( prefix ) ; } catch ( RepositoryException exc ) { log . error ( exc . getMessage ( ) , exc ) ; } return uri ; }
Returns namespace URI .
81
4
15,728
public String getPrefix ( String namespaceURI ) { String prefix = null ; try { prefix = namespaceRegistry . getPrefix ( namespaceURI ) ; } catch ( NamespaceException exc ) { prefix = prefixes . get ( namespaceURI ) ; } catch ( RepositoryException exc ) { log . error ( exc . getMessage ( ) , exc ) ; } return prefix ; }
Returns namespace prefix .
80
4
15,729
public Iterator < String > getPrefixes ( String namespaceURI ) { List < String > list = new ArrayList < String > ( ) ; list . add ( getPrefix ( namespaceURI ) ) ; return list . iterator ( ) ; }
Returns the list of registered for this URI namespace prefixes .
52
12
15,730
private static Map < String , String [ ] > getSynonyms ( InputStream config ) throws IOException { try { Map < String , String [ ] > synonyms = new HashMap < String , String [ ] > ( ) ; Properties props = new Properties ( ) ; props . load ( config ) ; Iterator < Map . Entry < Object , Object > > it = props . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < Object , Object > e = it . next ( ) ; String key = ( String ) e . getKey ( ) ; String value = ( String ) e . getValue ( ) ; addSynonym ( key , value , synonyms ) ; addSynonym ( value , key , synonyms ) ; } return synonyms ; } catch ( Exception e ) { throw Util . createIOException ( e ) ; } }
Reads the synonym properties file and returns the contents as a synonym Map .
189
17
15,731
private static void addSynonym ( String term , String synonym , Map < String , String [ ] > synonyms ) { term = term . toLowerCase ( ) ; String [ ] syns = synonyms . get ( term ) ; if ( syns == null ) { syns = new String [ ] { synonym } ; } else { String [ ] tmp = new String [ syns . length + 1 ] ; System . arraycopy ( syns , 0 , tmp , 0 , syns . length ) ; tmp [ syns . length ] = synonym ; syns = tmp ; } synonyms . put ( term , syns ) ; }
Adds a synonym definition to the map .
138
9
15,732
public Response uncheckout ( Session session , String path ) { try { Node node = session . getRootNode ( ) . getNode ( TextUtil . relativizePath ( path ) ) ; Version restoreVersion = node . getBaseVersion ( ) ; node . restore ( restoreVersion , true ) ; return Response . ok ( ) . header ( HttpHeaders . CACHE_CONTROL , "no-cache" ) . build ( ) ; } catch ( UnsupportedRepositoryOperationException exc ) { return Response . status ( HTTPStatus . CONFLICT ) . entity ( exc . getMessage ( ) ) . build ( ) ; } catch ( LockException exc ) { return Response . status ( HTTPStatus . LOCKED ) . entity ( exc . getMessage ( ) ) . build ( ) ; } catch ( PathNotFoundException exc ) { return Response . status ( HTTPStatus . NOT_FOUND ) . entity ( exc . getMessage ( ) ) . build ( ) ; } catch ( RepositoryException exc ) { LOG . error ( exc . getMessage ( ) , exc ) ; return Response . serverError ( ) . entity ( exc . getMessage ( ) ) . build ( ) ; } }
Webdav Uncheckout method implementation .
256
9
15,733
protected boolean isDbInitialized ( final Connection con ) { return SecurityHelper . doPrivilegedAction ( new PrivilegedAction < Boolean > ( ) { public Boolean run ( ) { return JDBCUtils . tableExists ( configTableName , con ) ; } } ) ; }
Check if config table already exists
58
6
15,734
public boolean aquire ( final Object resource , final ValueLockSupport lockHolder ) throws InterruptedException , IOException { final Thread myThread = Thread . currentThread ( ) ; final VDResource res = resources . get ( resource ) ; if ( res != null ) { if ( res . addUserLock ( myThread , lockHolder ) ) // resource locked in this thread (by me) return false ; synchronized ( res . lock ) { // resource locked, wait for unlock res . lock . wait ( ) ; // new record with existing lock (to respect Object.notify()) resources . put ( resource , new VDResource ( myThread , res . lock , lockHolder ) ) ; } } else resources . put ( resource , new VDResource ( myThread , new Object ( ) , lockHolder ) ) ; return true ; }
Aquire ValueData resource .
178
6
15,735
public boolean release ( final Object resource ) throws IOException { final Thread myThread = Thread . currentThread ( ) ; final VDResource res = resources . get ( resource ) ; if ( res != null ) { if ( res . removeUserLock ( myThread ) ) { synchronized ( res . lock ) { // unlock holder res . lockSupport . unlock ( ) ; // locked by this thread (by me) // ...Wakes up a single thread that is waiting on this object's monitor res . lock . notify ( ) ; // resources will be reputed with new VDResource in aquire() of another Thread resources . remove ( resource ) ; } return true ; } } return false ; }
Release resource .
144
3
15,736
public PropertyData getProperty ( String name ) { return properties == null ? null : properties . get ( name ) ; }
Property data .
25
3
15,737
public PersistedPropertyData read ( ObjectReader in ) throws UnknownClassIdException , IOException { // read id int key ; if ( ( key = in . readInt ( ) ) != SerializationConstants . PERSISTED_PROPERTY_DATA ) { throw new UnknownClassIdException ( "There is unexpected class [" + key + "]" ) ; } QPath qpath ; try { String sQPath = in . readString ( ) ; qpath = QPath . parse ( sQPath ) ; } catch ( final IllegalPathException e ) { throw new IOException ( "Deserialization error. " + e ) { /** * {@inheritDoc} */ @ Override public Throwable getCause ( ) { return e ; } } ; } String identifier = in . readString ( ) ; String parentIdentifier = null ; if ( in . readByte ( ) == SerializationConstants . NOT_NULL_DATA ) { parentIdentifier = in . readString ( ) ; } int persistedVersion = in . readInt ( ) ; // -------------- int type = in . readInt ( ) ; boolean multiValued = in . readBoolean ( ) ; PersistedSize persistedSizeHandler = new SimplePersistedSize ( in . readLong ( ) ) ; PersistedPropertyData prop ; if ( in . readByte ( ) == SerializationConstants . NOT_NULL_DATA ) { int listSize = in . readInt ( ) ; List < ValueData > values = new ArrayList < ValueData > ( ) ; PersistedValueDataReader rdr = new PersistedValueDataReader ( holder , spoolConfig ) ; for ( int i = 0 ; i < listSize ; i ++ ) { values . add ( rdr . read ( in , type ) ) ; } prop = new PersistedPropertyData ( identifier , qpath , parentIdentifier , persistedVersion , type , multiValued , values , persistedSizeHandler ) ; } else { prop = new PersistedPropertyData ( identifier , qpath , parentIdentifier , persistedVersion , type , multiValued , null , persistedSizeHandler ) ; } return prop ; }
Read and set PersistedPropertyData object data .
454
10
15,738
void addDocuments ( final Document [ ] docs ) throws IOException { final IndexWriter writer = getIndexWriter ( ) ; IOException ioExc = null ; try { for ( Document doc : docs ) { try { writer . addDocument ( doc ) ; } catch ( Throwable e ) //NOSONAR { if ( ioExc == null ) { if ( e instanceof IOException ) { ioExc = ( IOException ) e ; } else { ioExc = Util . createIOException ( e ) ; } } log . warn ( "Exception while inverting document" , e ) ; } } } finally { invalidateSharedReader ( ) ; } if ( ioExc != null ) { throw ioExc ; } }
Adds documents to this index and invalidates the shared reader .
152
12
15,739
synchronized void close ( ) { releaseWriterAndReaders ( ) ; if ( directory != null ) { try { directory . close ( ) ; } catch ( IOException e ) { directory = null ; } } }
Closes this index releasing all held resources .
46
9
15,740
protected void releaseWriterAndReaders ( ) { if ( indexWriter != null ) { try { indexWriter . close ( ) ; } catch ( IOException e ) { log . warn ( "Exception closing index writer: " + e . toString ( ) ) ; } indexWriter = null ; } if ( indexReader != null ) { try { indexReader . close ( ) ; } catch ( IOException e ) { log . warn ( "Exception closing index reader: " + e . toString ( ) ) ; } indexReader = null ; } if ( readOnlyReader != null ) { try { readOnlyReader . release ( ) ; } catch ( IOException e ) { log . warn ( "Exception closing index reader: " + e . toString ( ) ) ; } readOnlyReader = null ; } if ( sharedReader != null ) { try { sharedReader . release ( ) ; } catch ( IOException e ) { log . warn ( "Exception closing index reader: " + e . toString ( ) ) ; } sharedReader = null ; } }
Releases all potentially held index writer and readers .
222
10
15,741
protected synchronized void invalidateSharedReader ( ) throws IOException { // also close the read-only reader if ( readOnlyReader != null ) { readOnlyReader . release ( ) ; readOnlyReader = null ; } // invalidate shared reader if ( sharedReader != null ) { sharedReader . release ( ) ; sharedReader = null ; } }
Closes the shared reader .
73
6
15,742
public String getQueryLanguage ( ) throws UnsupportedQueryException { if ( body . getChild ( 0 ) . getName ( ) . getNamespaceURI ( ) . equals ( "DAV:" ) && body . getChild ( 0 ) . getName ( ) . getLocalPart ( ) . equals ( "sql" ) ) { return "sql" ; } else if ( body . getChild ( 0 ) . getName ( ) . getNamespaceURI ( ) . equals ( "DAV:" ) && body . getChild ( 0 ) . getName ( ) . getLocalPart ( ) . equals ( "xpath" ) ) { return "xpath" ; } throw new UnsupportedOperationException ( ) ; }
Get query language .
152
4
15,743
public String getQuery ( ) throws UnsupportedQueryException { if ( body . getChild ( 0 ) . getName ( ) . getNamespaceURI ( ) . equals ( "DAV:" ) && body . getChild ( 0 ) . getName ( ) . getLocalPart ( ) . equals ( "sql" ) ) { return body . getChild ( 0 ) . getValue ( ) ; } else if ( body . getChild ( 0 ) . getName ( ) . getNamespaceURI ( ) . equals ( "DAV:" ) && body . getChild ( 0 ) . getName ( ) . getLocalPart ( ) . equals ( "xpath" ) ) { return body . getChild ( 0 ) . getValue ( ) ; } throw new UnsupportedQueryException ( ) ; }
Get query .
168
3
15,744
public synchronized void setMode ( IndexerIoMode mode ) { if ( this . mode != mode ) { log . info ( "Indexer io mode=" + mode ) ; this . mode = mode ; for ( IndexerIoModeListener listener : listeners ) { listener . onChangeMode ( mode ) ; } } }
Changes the current mode of the indexer . If the value has changes all the listeners will be notified
68
20
15,745
public void read ( ) throws IOException { SecurityHelper . doPrivilegedIOExceptionAction ( new PrivilegedExceptionAction < Object > ( ) { public Object run ( ) throws Exception { // Known issue for NFS bases on ext3. Need to refresh directory to read actual data. dir . listAll ( ) ; names . clear ( ) ; indexes . clear ( ) ; if ( dir . fileExists ( name ) ) { // clear current lists InputStream in = new IndexInputStream ( dir . openInput ( name ) ) ; DataInputStream di = null ; try { di = new DataInputStream ( in ) ; counter = di . readInt ( ) ; for ( int i = di . readInt ( ) ; i > 0 ; i -- ) { String indexName = di . readUTF ( ) ; indexes . add ( indexName ) ; names . add ( indexName ) ; } } finally { if ( di != null ) di . close ( ) ; in . close ( ) ; } } return null ; } } ) ; }
Reads the index infos . Before reading it checks if file exists
218
14
15,746
public void write ( ) throws IOException { SecurityHelper . doPrivilegedIOExceptionAction ( new PrivilegedExceptionAction < Object > ( ) { public Object run ( ) throws Exception { // do not write if not dirty if ( ! dirty ) { return null ; } OutputStream out = new IndexOutputStream ( dir . createOutput ( name + ".new" ) ) ; DataOutputStream dataOut = null ; try { dataOut = new DataOutputStream ( out ) ; dataOut . writeInt ( counter ) ; dataOut . writeInt ( indexes . size ( ) ) ; for ( int i = 0 ; i < indexes . size ( ) ; i ++ ) { dataOut . writeUTF ( getName ( i ) ) ; } } finally { if ( dataOut != null ) dataOut . close ( ) ; out . close ( ) ; } // delete old if ( dir . fileExists ( name ) ) { dir . deleteFile ( name ) ; } rename ( name + ".new" , name ) ; dirty = false ; return null ; } } ) ; }
Writes the index infos to disk if they are dirty .
224
13
15,747
private void rename ( String from , String to ) throws IOException { IndexOutputStream out = null ; IndexInputStream in = null ; try { out = new IndexOutputStream ( dir . createOutput ( to ) ) ; in = new IndexInputStream ( dir . openInput ( from ) ) ; DirectoryHelper . transfer ( in , out ) ; } finally { if ( in != null ) { in . close ( ) ; } if ( out != null ) { out . flush ( ) ; out . close ( ) ; } } try { // delete old one if ( dir . fileExists ( from ) ) { dir . deleteFile ( from ) ; } } catch ( IOException e ) { // do noting. Will be removed later if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "Can't deleted file: " + e . getMessage ( ) ) ; } } }
Renames file by copying .
188
6
15,748
public void addName ( String name ) { if ( names . contains ( name ) ) { throw new IllegalArgumentException ( "already contains: " + name ) ; } indexes . add ( name ) ; names . add ( name ) ; dirty = true ; }
Adds a name to the index infos .
56
9
15,749
protected void setNames ( Set < String > names ) { this . names . clear ( ) ; this . indexes . clear ( ) ; this . names . addAll ( names ) ; this . indexes . addAll ( names ) ; // new list of indexes if thought to be up to date dirty = false ; }
Sets new names clearing existing . It is thought to be used when list of indexes can be externally changed .
65
22
15,750
public void addPermissions ( String identity , String [ ] perm ) throws RepositoryException { for ( String p : perm ) { accessList . add ( new AccessControlEntry ( identity , p ) ) ; } }
Adds a set of permission types to a given identity
45
10
15,751
public void removePermissions ( String identity ) { for ( Iterator < AccessControlEntry > iter = accessList . iterator ( ) ; iter . hasNext ( ) ; ) { AccessControlEntry a = iter . next ( ) ; if ( a . getIdentity ( ) . equals ( identity ) ) iter . remove ( ) ; } }
Removes all the permissions of a given identity
71
9
15,752
public List < AccessControlEntry > getPermissionEntries ( ) { List < AccessControlEntry > list = new ArrayList < AccessControlEntry > ( ) ; for ( int i = 0 , length = accessList . size ( ) ; i < length ; i ++ ) { AccessControlEntry entry = accessList . get ( i ) ; list . add ( new AccessControlEntry ( entry . getIdentity ( ) , entry . getPermission ( ) ) ) ; } return list ; }
Gives all the permission entries
103
6
15,753
public LocationStepQueryNode [ ] getPathSteps ( ) { if ( operands == null ) { return EMPTY ; } else { return ( LocationStepQueryNode [ ] ) operands . toArray ( new LocationStepQueryNode [ operands . size ( ) ] ) ; } }
Returns an array of all currently set location step nodes .
61
11
15,754
protected boolean isResidualMatch ( InternalQName itemName , T [ ] recipientDefinition ) { boolean containsResidual = false ; for ( int i = 0 ; i < recipientDefinition . length ; i ++ ) { if ( itemName . equals ( recipientDefinition [ i ] . getName ( ) ) ) return false ; else if ( Constants . JCR_ANY_NAME . equals ( recipientDefinition [ i ] . getName ( ) ) ) containsResidual = true ; } return containsResidual ; }
Return true if recipientDefinition contains Constants . JCR_ANY_NAME and doesn t contain definition with name itemName .
111
25
15,755
private Thread createThreadFindNodesCount ( final Reindexable reindexableComponent ) { return new Thread ( "Nodes count(" + handler . getContext ( ) . getWorkspaceName ( ) + ")" ) { public void run ( ) { try { if ( reindexableComponent != null ) { Long value = reindexableComponent . getNodesCount ( ) ; if ( value != null ) { nodesCount = new AtomicLong ( value ) ; } } } catch ( RepositoryException e ) { LOG . error ( "Can't calculate nodes count : " + e . getMessage ( ) ) ; } } } ; }
Create thread finding count of nodes .
134
7
15,756
int numDocs ( ) throws IOException { if ( indexNames . size ( ) == 0 ) { return volatileIndex . getNumDocuments ( ) ; } else { CachingMultiIndexReader reader = getIndexReader ( ) ; try { return reader . numDocs ( ) ; } finally { reader . release ( ) ; } } }
Returns the number of documents in this index .
71
9
15,757
public void reindex ( ItemDataConsumer stateMgr ) throws IOException , RepositoryException { if ( stopped . get ( ) ) { throw new IllegalStateException ( "Can't invoke reindexing on closed index." ) ; } if ( online . get ( ) ) { throw new IllegalStateException ( "Can't invoke reindexing while index still online." ) ; } // traverse and index workspace executeAndLog ( new Start ( Action . INTERNAL_TRANSACTION ) ) ; long count ; // check if we have deal with RDBMS reindexing mechanism Reindexable rdbmsReindexableComponent = ( Reindexable ) handler . getContext ( ) . getContainer ( ) . getComponent ( Reindexable . class ) ; if ( handler . isRDBMSReindexing ( ) && rdbmsReindexableComponent != null && rdbmsReindexableComponent . isReindexingSupported ( ) ) { count = createIndex ( rdbmsReindexableComponent . getNodeDataIndexingIterator ( handler . getReindexingPageSize ( ) ) , indexingTree . getIndexingRoot ( ) ) ; } else { count = createIndex ( indexingTree . getIndexingRoot ( ) , stateMgr ) ; } executeAndLog ( new Commit ( getTransactionId ( ) ) ) ; LOG . info ( "Created initial index for {} nodes" , new Long ( count ) ) ; releaseMultiReader ( ) ; }
Recreates index by reindexing in runtime .
310
11
15,758
synchronized void update ( final Collection < String > remove , final Collection < Document > add ) throws IOException { if ( ! online . get ( ) ) { doUpdateOffline ( remove , add ) ; } else if ( modeHandler . getMode ( ) == IndexerIoMode . READ_WRITE && redoLog != null ) { doUpdateRW ( remove , add ) ; } else { doUpdateRO ( remove , add ) ; } }
Atomically updates the index by removing some documents and adding others .
96
14
15,759
private void doUpdateRW ( final Collection < String > remove , final Collection < Document > add ) throws IOException { // make sure a reader is available during long updates if ( add . size ( ) > handler . getBufferSize ( ) ) { try { releaseMultiReader ( ) ; } catch ( IOException e ) { // do not fail if an exception is thrown here LOG . warn ( "unable to prepare index reader " + "for queries during update" , e ) ; } } synchronized ( updateMonitor ) { //updateInProgress = true; indexUpdateMonitor . setUpdateInProgress ( true , false ) ; } boolean flush = false ; try { long transactionId = nextTransactionId ++ ; executeAndLog ( new Start ( transactionId ) ) ; for ( Iterator < String > it = remove . iterator ( ) ; it . hasNext ( ) ; ) { executeAndLog ( new DeleteNode ( transactionId , it . next ( ) ) ) ; } for ( Iterator < Document > it = add . iterator ( ) ; it . hasNext ( ) ; ) { Document doc = it . next ( ) ; if ( doc != null ) { executeAndLog ( new AddNode ( transactionId , doc ) ) ; // commit volatile index if needed flush |= checkVolatileCommit ( ) ; } } executeAndLog ( new Commit ( transactionId ) ) ; // flush whole index when volatile index has been commited. if ( flush ) { // if we are going to flush, need to set persistent update synchronized ( updateMonitor ) { indexUpdateMonitor . setUpdateInProgress ( true , true ) ; } flush ( ) ; } } finally { synchronized ( updateMonitor ) { //updateInProgress = false; indexUpdateMonitor . setUpdateInProgress ( false , flush ) ; updateMonitor . notifyAll ( ) ; releaseMultiReader ( ) ; } } }
For investigation purposes only
391
4
15,760
private void doUpdateOffline ( final Collection < String > remove , final Collection < Document > add ) throws IOException { SecurityHelper . doPrivilegedIOExceptionAction ( new PrivilegedExceptionAction < Object > ( ) { public Object run ( ) throws Exception { for ( Iterator < String > it = remove . iterator ( ) ; it . hasNext ( ) ; ) { Term idTerm = new Term ( FieldNames . UUID , it . next ( ) ) ; offlineIndex . removeDocument ( idTerm ) ; } for ( Iterator < Document > it = add . iterator ( ) ; it . hasNext ( ) ; ) { Document doc = it . next ( ) ; if ( doc != null ) { offlineIndex . addDocuments ( new Document [ ] { doc } ) ; // reset volatile index if needed if ( offlineIndex . getRamSizeInBytes ( ) >= handler . getMaxVolatileIndexSize ( ) ) { offlineIndex . commit ( ) ; } } } return null ; } } ) ; }
Performs indexing while re - indexing is in progress
212
12
15,761
void addDocument ( Document doc ) throws IOException { update ( Collections . < String > emptyList ( ) , Arrays . asList ( new Document [ ] { doc } ) ) ; }
Adds a document to the index .
40
7
15,762
private void initMerger ( ) throws IOException { if ( merger == null ) { merger = new IndexMerger ( this ) ; merger . setMaxMergeDocs ( handler . getMaxMergeDocs ( ) ) ; merger . setMergeFactor ( handler . getMergeFactor ( ) ) ; merger . setMinMergeDocs ( handler . getMinMergeDocs ( ) ) ; for ( Object index : indexes ) { merger . indexAdded ( ( ( PersistentIndex ) index ) . getName ( ) , ( ( PersistentIndex ) index ) . getNumDocuments ( ) ) ; } merger . start ( ) ; } }
Initialize IndexMerger .
139
6
15,763
private void scheduleFlushTask ( ) { // cancel task if ( flushTask != null ) { flushTask . cancel ( ) ; } // clear canceled tasks FLUSH_TIMER . purge ( ) ; // new flush task, cause canceled can't be re-used flushTask = new TimerTask ( ) { @ Override public void run ( ) { // check if volatile index should be flushed checkFlush ( ) ; } } ; FLUSH_TIMER . schedule ( flushTask , 0 , 1000 ) ; lastFlushTime = System . currentTimeMillis ( ) ; lastFileSystemFlushTime = System . currentTimeMillis ( ) ; }
Cancel flush task and add new one
140
8
15,764
private void resetVolatileIndex ( ) throws IOException { volatileIndex = new VolatileIndex ( handler . getTextAnalyzer ( ) , handler . getSimilarity ( ) ) ; volatileIndex . setUseCompoundFile ( handler . getUseCompoundFile ( ) ) ; volatileIndex . setMaxFieldLength ( handler . getMaxFieldLength ( ) ) ; volatileIndex . setBufferSize ( handler . getBufferSize ( ) ) ; }
Resets the volatile index to a new instance .
93
10
15,765
private void commitVolatileIndex ( ) throws IOException { // check if volatile index contains documents at all if ( volatileIndex . getNumDocuments ( ) > 0 ) { long time = 0 ; if ( LOG . isDebugEnabled ( ) ) { time = System . currentTimeMillis ( ) ; } // create index CreateIndex create = new CreateIndex ( getTransactionId ( ) , null ) ; executeAndLog ( create ) ; // commit volatile index executeAndLog ( new VolatileCommit ( getTransactionId ( ) , create . getIndexName ( ) ) ) ; // add new index AddIndex add = new AddIndex ( getTransactionId ( ) , create . getIndexName ( ) ) ; executeAndLog ( add ) ; // create new volatile index resetVolatileIndex ( ) ; if ( LOG . isDebugEnabled ( ) ) { time = System . currentTimeMillis ( ) - time ; LOG . debug ( "Committed in-memory index in " + time + "ms." ) ; } } }
Commits the volatile index to a persistent index . The new persistent index is added to the list of indexes but not written to disk . When this method returns a new volatile index has been created .
214
39
15,766
private void removeDeletable ( ) { String fileName = "deletable" ; try { if ( indexDir . fileExists ( fileName ) ) { indexDir . deleteFile ( fileName ) ; } } catch ( IOException e ) { LOG . warn ( "Unable to remove file 'deletable'." , e ) ; } }
Removes the deletable file if it exists . The file is not used anymore in Jackrabbit versions > = 1 . 5 .
75
27
15,767
protected void setReadOnly ( ) { // try to stop merger in safe way if ( merger != null ) { merger . dispose ( ) ; merger = null ; } if ( flushTask != null ) { flushTask . cancel ( ) ; } FLUSH_TIMER . purge ( ) ; this . redoLog = null ; }
Sets mode to READ_ONLY discarding flush task
70
12
15,768
protected void setReadWrite ( ) throws IOException { // Release all the current threads synchronized ( updateMonitor ) { indexUpdateMonitor . setUpdateInProgress ( false , true ) ; updateMonitor . notifyAll ( ) ; releaseMultiReader ( ) ; } this . redoLog = new RedoLog ( indexDir ) ; redoLogApplied = redoLog . hasEntries ( ) ; // run recovery Recovery . run ( this , redoLog ) ; // enqueue unused segments for deletion enqueueUnusedSegments ( ) ; attemptDelete ( ) ; // now that we are ready, start index merger initMerger ( ) ; if ( redoLogApplied ) { // wait for the index merge to finish pending jobs try { merger . waitUntilIdle ( ) ; } catch ( InterruptedException e ) { // move on } flush ( ) ; } if ( indexNames . size ( ) > 0 ) { scheduleFlushTask ( ) ; } }
Sets mode to READ_WRITE initiating recovery process
203
11
15,769
public void refreshIndexList ( ) throws IOException { synchronized ( updateMonitor ) { // release reader if any releaseMultiReader ( ) ; // prepare added/removed sets Set < String > newList = new HashSet < String > ( indexNames . getNames ( ) ) ; // remove removed indexes Iterator < PersistentIndex > iterator = indexes . iterator ( ) ; while ( iterator . hasNext ( ) ) { PersistentIndex index = iterator . next ( ) ; String name = index . getName ( ) ; // if current index not in new list, close it, cause it is deleted. if ( ! newList . contains ( name ) ) { index . close ( ) ; iterator . remove ( ) ; } else { // remove from list, cause this segment of index still present and indexes list contains // PersistentIndex instance related to this index.. newList . remove ( name ) ; // Release everything to make sure that we see the // latest changes index . releaseWriterAndReaders ( ) ; } } // now newList contains ONLY new, added indexes, deleted indexes, are removed from list. for ( String name : newList ) { // only open if it still exists // it is possible that indexNames still contains a name for // an index that has been deleted, but indexNames has not been // written to disk. if ( ! directoryManager . hasDirectory ( name ) ) { LOG . debug ( "index does not exist anymore: " + name ) ; // move on to next index continue ; } PersistentIndex index = new PersistentIndex ( name , handler . getTextAnalyzer ( ) , handler . getSimilarity ( ) , cache , directoryManager , modeHandler ) ; index . setMaxFieldLength ( handler . getMaxFieldLength ( ) ) ; index . setUseCompoundFile ( handler . getUseCompoundFile ( ) ) ; index . setTermInfosIndexDivisor ( handler . getTermInfosIndexDivisor ( ) ) ; indexes . add ( index ) ; } // Reset the volatile index to be exactly like the master resetVolatileIndex ( ) ; } }
Refresh list of indexes . Used to be called asynchronously when list changes . New actual list is read from IndexInfos .
442
27
15,770
public synchronized void setOnline ( boolean isOnline , boolean dropStaleIndexes , boolean initMerger ) throws IOException { // if mode really changed if ( online . get ( ) != isOnline ) { // switching to ONLINE if ( isOnline ) { LOG . info ( "Setting index ONLINE ({})" , handler . getContext ( ) . getWorkspacePath ( true ) ) ; if ( modeHandler . getMode ( ) == IndexerIoMode . READ_WRITE ) { offlineIndex . commit ( true ) ; online . set ( true ) ; // cleaning stale indexes for ( PersistentIndex staleIndex : staleIndexes ) { deleteIndex ( staleIndex ) ; } //invoking offline index invokeOfflineIndex ( ) ; staleIndexes . clear ( ) ; if ( initMerger ) { initMerger ( ) ; } } else { online . set ( true ) ; staleIndexes . clear ( ) ; } } // switching to OFFLINE else { LOG . info ( "Setting index OFFLINE ({})" , handler . getContext ( ) . getWorkspacePath ( true ) ) ; if ( initMerger && merger != null ) { merger . dispose ( ) ; merger = null ; } offlineIndex = new OfflinePersistentIndex ( handler . getTextAnalyzer ( ) , handler . getSimilarity ( ) , cache , directoryManager , modeHandler ) ; if ( modeHandler . getMode ( ) == IndexerIoMode . READ_WRITE ) { flush ( ) ; } releaseMultiReader ( ) ; if ( dropStaleIndexes ) { staleIndexes . addAll ( indexes ) ; } online . set ( false ) ; } } else if ( ! online . get ( ) ) { throw new IOException ( "Index is already in OFFLINE mode." ) ; } }
Switches index mode
385
4
15,771
private boolean recoveryIndexFromCoordinator ( ) throws IOException { File indexDirectory = new File ( handler . getContext ( ) . getIndexDirectory ( ) ) ; try { IndexRecovery indexRecovery = handler . getContext ( ) . getIndexRecovery ( ) ; // check if index not ready if ( ! indexRecovery . checkIndexReady ( ) ) { return false ; } //Switch index offline indexRecovery . setIndexOffline ( ) ; for ( String filePath : indexRecovery . getIndexList ( ) ) { File indexFile = new File ( indexDirectory , filePath ) ; if ( ! PrivilegedFileHelper . exists ( indexFile . getParentFile ( ) ) ) { PrivilegedFileHelper . mkdirs ( indexFile . getParentFile ( ) ) ; } // transfer file InputStream in = indexRecovery . getIndexFile ( filePath ) ; OutputStream out = PrivilegedFileHelper . fileOutputStream ( indexFile ) ; try { DirectoryHelper . transfer ( in , out ) ; } finally { DirectoryHelper . safeClose ( in ) ; DirectoryHelper . safeClose ( out ) ; } } //Switch index online indexRecovery . setIndexOnline ( ) ; return true ; } catch ( RepositoryException e ) { LOG . error ( "Cannot retrieve the indexes from the coordinator, the indexes will then be created from indexing" , e ) ; } catch ( IOException e ) { LOG . error ( "Cannot retrieve the indexes from the coordinator, the indexes will then be created from indexing" , e ) ; } LOG . info ( "Clean up index directory " + indexDirectory . getAbsolutePath ( ) ) ; DirectoryHelper . removeDirectory ( indexDirectory ) ; return false ; }
Retrieves index from other node .
366
8
15,772
private boolean rsyncRecoveryIndexFromCoordinator ( ) throws IOException { File indexDirectory = new File ( handler . getContext ( ) . getIndexDirectory ( ) ) ; RSyncConfiguration rSyncConfiguration = handler . getRsyncConfiguration ( ) ; try { IndexRecovery indexRecovery = handler . getContext ( ) . getIndexRecovery ( ) ; // check if index not ready if ( ! indexRecovery . checkIndexReady ( ) ) { return false ; } try { if ( rSyncConfiguration . isRsyncOffline ( ) ) { //Switch index offline indexRecovery . setIndexOffline ( ) ; } String indexPath = handler . getContext ( ) . getIndexDirectory ( ) ; String urlFormatString = rSyncConfiguration . generateRsyncSource ( indexPath ) ; RSyncJob rSyncJob = new RSyncJob ( String . format ( urlFormatString , indexRecovery . getCoordinatorAddress ( ) ) , indexPath , rSyncConfiguration . getRsyncUserName ( ) , rSyncConfiguration . getRsyncPassword ( ) , OfflinePersistentIndex . NAME ) ; rSyncJob . execute ( ) ; } finally { if ( rSyncConfiguration . isRsyncOffline ( ) ) { //Switch index online indexRecovery . setIndexOnline ( ) ; } } //recovery finish correctly return true ; } catch ( RepositoryException e ) { LOG . error ( "Cannot retrieve the indexes from the coordinator, the indexes will then be created from indexing" , e ) ; } catch ( RepositoryConfigurationException e ) { LOG . error ( "Cannot retrieve the indexes from the coordinator, the indexes will then be created from indexing" , e ) ; } LOG . info ( "Clean up index directory " + indexDirectory . getAbsolutePath ( ) ) ; DirectoryHelper . removeDirectory ( indexDirectory ) ; return false ; }
Retrieves index from other node using rsync server .
396
12
15,773
public boolean hasDeletions ( ) throws CorruptIndexException , IOException { boolean result = false ; for ( PersistentIndex index : indexes ) { IndexWriter writer = index . getIndexWriter ( ) ; result |= writer . hasDeletions ( ) ; } return result ; }
Checks if index has deletions .
61
8
15,774
protected String createDefaultExcerpt ( String text , String excerptStart , String excerptEnd , String fragmentStart , String fragmentEnd , int maxLength ) throws IOException { StringReader reader = new StringReader ( text ) ; StringBuilder excerpt = new StringBuilder ( excerptStart ) ; excerpt . append ( fragmentStart ) ; if ( ! text . isEmpty ( ) ) { int min = excerpt . length ( ) ; char [ ] buf = new char [ maxLength ] ; int len = reader . read ( buf ) ; StringBuilder tmp = new StringBuilder ( ) ; tmp . append ( buf , 0 , len ) ; if ( len == buf . length ) { for ( int i = tmp . length ( ) - 1 ; i > min ; i -- ) { if ( Character . isWhitespace ( tmp . charAt ( i ) ) ) { tmp . delete ( i , tmp . length ( ) ) ; tmp . append ( " ..." ) ; break ; } } } excerpt . append ( Text . encodeIllegalXMLCharacters ( tmp . toString ( ) ) ) ; } excerpt . append ( fragmentEnd ) . append ( excerptEnd ) ; return excerpt . toString ( ) ; }
Creates a default excerpt with the given text .
249
10
15,775
protected void putItem ( final ItemData data ) { cache . put ( new CacheId ( data . getIdentifier ( ) ) , new CacheValue ( data , System . currentTimeMillis ( ) + liveTime ) ) ; cache . put ( new CacheQPath ( data . getParentIdentifier ( ) , data . getQPath ( ) , ItemType . getItemType ( data ) ) , new CacheValue ( data , System . currentTimeMillis ( ) + liveTime ) ) ; }
Put item in cache C .
106
6
15,776
protected ItemData getItem ( final String identifier ) { long start = System . currentTimeMillis ( ) ; try { final CacheId k = new CacheId ( identifier ) ; final CacheValue v = cache . get ( k ) ; if ( v != null ) { final ItemData c = v . getItem ( ) ; if ( v . getExpiredTime ( ) > System . currentTimeMillis ( ) ) { // check if wasn't removed if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( name + ", getItem() " + identifier + " --> " + ( c != null ? c . getQPath ( ) . getAsString ( ) + " parent:" + c . getParentIdentifier ( ) : "[null]" ) ) ; } hits . incrementAndGet ( ) ; return c ; } // remove expired writeLock . lock ( ) ; try { cache . remove ( k ) ; // remove by parentId + path cache . remove ( new CacheQPath ( c . getParentIdentifier ( ) , c . getQPath ( ) , ItemType . getItemType ( c ) ) ) ; // remove cached child lists if ( c . isNode ( ) ) { nodesCache . remove ( c . getIdentifier ( ) ) ; propertiesCache . remove ( c . getIdentifier ( ) ) ; } } finally { writeLock . unlock ( ) ; } } miss . incrementAndGet ( ) ; return null ; } finally { totalGetTime += System . currentTimeMillis ( ) - start ; } }
Get item from cache C by item id . Checks is it expired calcs statistics .
326
17
15,777
public void setLiveTime ( long liveTime ) { writeLock . lock ( ) ; try { this . liveTime = liveTime ; } finally { writeLock . unlock ( ) ; } LOG . info ( name + " : set liveTime=" + liveTime + "ms. New value will be applied to items cached from this moment." ) ; }
Set liveTime of newly cached items .
73
8
15,778
protected void removeItem ( final ItemData item ) { final String itemId = item . getIdentifier ( ) ; cache . remove ( new CacheId ( itemId ) ) ; final CacheValue v2 = cache . remove ( new CacheQPath ( item . getParentIdentifier ( ) , item . getQPath ( ) , ItemType . getItemType ( item ) ) ) ; if ( v2 != null && ! v2 . getItem ( ) . getIdentifier ( ) . equals ( itemId ) ) { // same path but diff identifier node... phantom removeItem ( v2 . getItem ( ) ) ; } }
Remove item from cache C .
133
6
15,779
protected PropertyData removeChildProperty ( final String parentIdentifier , final String childIdentifier ) { final List < PropertyData > childProperties = propertiesCache . get ( parentIdentifier ) ; if ( childProperties != null ) { synchronized ( childProperties ) { // [PN] 17.01.07 for ( Iterator < PropertyData > i = childProperties . iterator ( ) ; i . hasNext ( ) ; ) { PropertyData cn = i . next ( ) ; if ( cn . getIdentifier ( ) . equals ( childIdentifier ) ) { i . remove ( ) ; if ( childProperties . size ( ) <= 0 ) { propertiesCache . remove ( parentIdentifier ) ; } return cn ; } } } } return null ; }
Remove property by id if parent properties are cached in CP .
164
12
15,780
protected NodeData removeChildNode ( final String parentIdentifier , final String childIdentifier ) { final List < NodeData > childNodes = nodesCache . get ( parentIdentifier ) ; if ( childNodes != null ) { synchronized ( childNodes ) { // [PN] 17.01.07 for ( Iterator < NodeData > i = childNodes . iterator ( ) ; i . hasNext ( ) ; ) { NodeData cn = i . next ( ) ; if ( cn . getIdentifier ( ) . equals ( childIdentifier ) ) { i . remove ( ) ; return cn ; } } } } return null ; }
Remove child node by id if parent child nodes are cached in CN .
140
14
15,781
String dump ( ) { StringBuilder res = new StringBuilder ( ) ; for ( Map . Entry < CacheKey , CacheValue > ce : cache . entrySet ( ) ) { res . append ( ce . getKey ( ) . hashCode ( ) ) ; res . append ( "\t\t" ) ; res . append ( ce . getValue ( ) . getItem ( ) . getIdentifier ( ) ) ; res . append ( ", " ) ; res . append ( ce . getValue ( ) . getItem ( ) . getQPath ( ) . getAsString ( ) ) ; res . append ( ", " ) ; res . append ( ce . getValue ( ) . getExpiredTime ( ) ) ; res . append ( ", " ) ; res . append ( ce . getKey ( ) . getClass ( ) . getSimpleName ( ) ) ; res . append ( "\r\n" ) ; } return res . toString ( ) ; }
For debug .
204
3
15,782
final protected void restore ( ) throws RepositoryRestoreExeption { try { stateRestore = REPOSITORY_RESTORE_STARTED ; startTime = Calendar . getInstance ( ) ; restoreRepository ( ) ; stateRestore = REPOSITORY_RESTORE_SUCCESSFUL ; endTime = Calendar . getInstance ( ) ; } catch ( Throwable t ) //NOSONAR { stateRestore = REPOSITORY_RESTORE_FAIL ; restoreException = t ; throw new RepositoryRestoreExeption ( t . getMessage ( ) , t ) ; } finally { if ( removeJobOnceOver ) { backupManager . restoreRepositoryJobs . remove ( this ) ; } } }
Restore repository . Provide information about start and finish process .
160
12
15,783
protected void removeRepository ( RepositoryService repositoryService , String repositoryName ) throws RepositoryException , RepositoryConfigurationException { ManageableRepository mr = null ; try { mr = repositoryService . getRepository ( repositoryName ) ; } catch ( RepositoryException e ) { // The repository not exist. if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "An exception occurred: " + e . getMessage ( ) ) ; } } if ( mr != null ) { closeAllSession ( mr ) ; repositoryService . removeRepository ( repositoryName ) ; repositoryService . getConfig ( ) . retain ( ) ; // save configuration to persistence (file or persister) } }
Remove repository .
152
3
15,784
private void closeAllSession ( ManageableRepository mr ) throws NoSuchWorkspaceException { for ( String wsName : mr . getWorkspaceNames ( ) ) { if ( ! mr . canRemoveWorkspace ( wsName ) ) { WorkspaceContainerFacade wc = mr . getWorkspaceContainer ( wsName ) ; SessionRegistry sessionRegistry = ( SessionRegistry ) wc . getComponent ( SessionRegistry . class ) ; sessionRegistry . closeSessions ( wsName ) ; } } }
Close all open session in repository
118
6
15,785
protected List < ItemState > findItemStates ( QPath itemPath ) { List < ItemState > istates = new ArrayList < ItemState > ( ) ; for ( ItemState istate : itemAddStates ) { if ( istate . getData ( ) . getQPath ( ) . equals ( itemPath ) ) istates . add ( istate ) ; } return istates ; }
Find item states .
89
4
15,786
protected ItemState findLastItemState ( QPath itemPath ) { for ( int i = itemAddStates . size ( ) - 1 ; i >= 0 ; i -- ) { ItemState istate = itemAddStates . get ( i ) ; if ( istate . getData ( ) . getQPath ( ) . equals ( itemPath ) ) return istate ; } return null ; }
Find last ItemState .
85
5
15,787
public List < NodeTypeData > read ( InputStream is ) throws RepositoryException { try { if ( is != null ) { /** Lexing input stream */ CNDLexer lex = new CNDLexer ( new ANTLRInputStream ( is ) ) ; CommonTokenStream tokens = new CommonTokenStream ( lex ) ; /** Parsing input stream */ CNDParser parser = new CNDParser ( tokens ) ; CNDParser . cnd_return r ; /** Throw exception if any lex errors found */ if ( lex . hasError ( ) ) { throw new RepositoryException ( "Lexer errors found " + lex . getErrors ( ) . toString ( ) ) ; } r = parser . cnd ( ) ; /** Throw exception if any parse errors found */ if ( parser . hasError ( ) ) { throw new RepositoryException ( "Parser errors found " + parser . getErrors ( ) . toString ( ) ) ; } CommonTreeNodeStream nodes = new CommonTreeNodeStream ( r . getTree ( ) ) ; CNDWalker walker = new CNDWalker ( nodes ) ; /** * Running tree walker to build nodetypes. Namespace registry is * provided to register namespaced mentioned in stream and * locationFactory is used to parse JCR names */ walker . cnd ( namespaceRegistry ) ; return walker . getNodeTypes ( ) ; } else { return new ArrayList < NodeTypeData > ( ) ; } } catch ( IOException e ) { throw new RepositoryException ( e . getMessage ( ) , e ) ; } catch ( RecognitionException e ) { throw new RepositoryException ( e . getMessage ( ) , e ) ; } }
Method which reads input stream as compact node type definition string . If any namespaces are placed in stream they are registered through namespace registry .
359
27
15,788
protected void execute ( List < String > scripts ) throws SQLException { SecurityHelper . validateSecurityPermission ( JCRRuntimePermissions . MANAGE_REPOSITORY_PERMISSION ) ; // set needed auto commit mode boolean autoCommit = connection . getAutoCommit ( ) ; if ( autoCommit != this . autoCommit ) { connection . setAutoCommit ( this . autoCommit ) ; } Statement st = connection . createStatement ( ) ; try { for ( String scr : scripts ) { String sql = JDBCUtils . cleanWhitespaces ( scr . trim ( ) ) ; if ( ! sql . isEmpty ( ) ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Execute script: \n[" + sql + "]" ) ; } executeQuery ( st , sql ) ; } } } finally { try { st . close ( ) ; } catch ( SQLException e ) { LOG . error ( "Can't close the Statement." + e . getMessage ( ) ) ; } // restore previous auto commit mode if ( autoCommit != this . autoCommit ) { connection . setAutoCommit ( autoCommit ) ; } } }
Execute script on database . Set auto commit mode if needed .
259
13
15,789
private void initOrderedIterator ( ) { if ( orderedNodes != null ) { return ; } long time = 0 ; if ( LOG . isDebugEnabled ( ) ) { time = System . currentTimeMillis ( ) ; } ScoreNode [ ] [ ] nodes = ( ScoreNode [ ] [ ] ) scoreNodes . toArray ( new ScoreNode [ scoreNodes . size ( ) ] [ ] ) ; final Set < String > invalidIDs = new HashSet < String > ( 2 ) ; /** Cache for Nodes obtainer during the order (comparator work) */ final Map < String , NodeData > lcache = new HashMap < String , NodeData > ( ) ; do { if ( invalidIDs . size ( ) > 0 ) { // previous sort run was not successful -> remove failed uuids List < ScoreNode [ ] > tmp = new ArrayList < ScoreNode [ ] > ( ) ; for ( int i = 0 ; i < nodes . length ; i ++ ) { if ( ! invalidIDs . contains ( nodes [ i ] [ selectorIndex ] . getNodeId ( ) ) ) { tmp . add ( nodes [ i ] ) ; } } nodes = ( ScoreNode [ ] [ ] ) tmp . toArray ( new ScoreNode [ tmp . size ( ) ] [ ] ) ; invalidIDs . clear ( ) ; } try { // sort the uuids Arrays . sort ( nodes , new ScoreNodeComparator ( lcache , invalidIDs ) ) ; } catch ( SortFailedException e ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "An exception occurred: " + e . getMessage ( ) ) ; } } } while ( invalidIDs . size ( ) > 0 ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "" + nodes . length + " node(s) ordered in " + ( System . currentTimeMillis ( ) - time ) + " ms" ) ; } orderedNodes = new ScoreNodeIteratorImpl ( nodes ) ; }
Initializes the NodeIterator in document order
433
8
15,790
public static File getFullBackupFile ( File restoreDir ) { Pattern p = Pattern . compile ( ".+\\.0" ) ; for ( File f : PrivilegedFileHelper . listFiles ( restoreDir , new FileFilter ( ) { public boolean accept ( File pathname ) { Pattern p = Pattern . compile ( ".+\\.[0-9]+" ) ; Matcher m = p . matcher ( pathname . getName ( ) ) ; return m . matches ( ) ; } } ) ) { Matcher m = p . matcher ( f . getName ( ) ) ; if ( m . matches ( ) ) { return f ; } } return null ; }
Returns file with full backup . In case of RDBMS backup it may be a directory .
143
19
15,791
public static List < File > getIncrementalFiles ( File restoreDir ) { ArrayList < File > list = new ArrayList < File > ( ) ; Pattern fullBackupPattern = Pattern . compile ( ".+\\.0" ) ; for ( File f : PrivilegedFileHelper . listFiles ( restoreDir , new FileFilter ( ) { public boolean accept ( File pathname ) { Pattern p = Pattern . compile ( ".+\\.[0-9]+" ) ; Matcher m = p . matcher ( pathname . getName ( ) ) ; return m . matches ( ) ; } } ) ) { if ( fullBackupPattern . matcher ( f . getName ( ) ) . matches ( ) == false ) { list . add ( f ) ; } } return list ; }
Get list of incremental backup files .
167
7
15,792
public void incrementalRestore ( File incrementalBackupFile ) throws FileNotFoundException , IOException , ClassNotFoundException , RepositoryException { ObjectInputStream ois = null ; try { ois = new ObjectInputStream ( PrivilegedFileHelper . fileInputStream ( incrementalBackupFile ) ) ; while ( true ) { TransactionChangesLog changesLog = readExternal ( ois ) ; changesLog . setSystemId ( Constants . JCR_CORE_RESTORE_WORKSPACE_INITIALIZER_SYSTEM_ID ) ; // mark changes ChangesLogIterator cli = changesLog . getLogIterator ( ) ; while ( cli . hasNextLog ( ) ) { if ( cli . nextLog ( ) . getEventType ( ) == ExtendedEvent . LOCK ) { cli . removeLog ( ) ; } } saveChangesLog ( changesLog ) ; } } catch ( EOFException ioe ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "An exception occurred: " + ioe . getMessage ( ) ) ; } } }
Perform incremental restore operation .
235
6
15,793
public long getNodeChangedSize ( String nodePath ) { Long delta = calculatedChangedNodesSize . get ( nodePath ) ; return delta == null ? 0 : delta ; }
Returns node data changed size if exists or zero otherwise .
37
11
15,794
public void merge ( ChangesItem changesItem ) { workspaceChangedSize += changesItem . getWorkspaceChangedSize ( ) ; for ( Entry < String , Long > changesEntry : changesItem . calculatedChangedNodesSize . entrySet ( ) ) { String nodePath = changesEntry . getKey ( ) ; Long currentDelta = changesEntry . getValue ( ) ; Long oldDelta = calculatedChangedNodesSize . get ( nodePath ) ; Long newDelta = currentDelta + ( oldDelta == null ? 0 : oldDelta ) ; calculatedChangedNodesSize . put ( nodePath , newDelta ) ; } for ( String path : changesItem . unknownChangedNodesSize ) { unknownChangedNodesSize . add ( path ) ; } for ( String path : changesItem . asyncUpdate ) { asyncUpdate . add ( path ) ; } }
Merges current changes with new one .
175
8
15,795
public static boolean isFile ( Node node ) { try { if ( ! node . isNodeType ( "nt:file" ) ) return false ; if ( ! node . getNode ( "jcr:content" ) . isNodeType ( "nt:resource" ) ) return false ; return true ; } catch ( RepositoryException exc ) { LOG . error ( exc . getMessage ( ) , exc ) ; return false ; } }
If the node is file .
92
6
15,796
public static boolean isVersion ( Node node ) { try { if ( node . isNodeType ( "nt:version" ) ) return true ; return false ; } catch ( RepositoryException exc ) { LOG . error ( exc . getMessage ( ) , exc ) ; return false ; } }
If the node is version .
61
6
15,797
private void validateNodeType ( NodeTypeData nodeType ) throws RepositoryException { if ( nodeType == null ) { throw new RepositoryException ( "NodeType object " + nodeType + " is null" ) ; } if ( nodeType . getName ( ) == null ) { throw new RepositoryException ( "NodeType implementation class " + nodeType . getClass ( ) . getName ( ) + " is not supported in this method" ) ; } for ( InternalQName sname : nodeType . getDeclaredSupertypeNames ( ) ) { if ( ! nodeType . getName ( ) . equals ( Constants . NT_BASE ) && nodeType . getName ( ) . equals ( sname ) ) { throw new RepositoryException ( "Invalid super type name" + sname . getAsString ( ) ) ; } } for ( PropertyDefinitionData pdef : nodeType . getDeclaredPropertyDefinitions ( ) ) { if ( ! pdef . getDeclaringNodeType ( ) . equals ( nodeType . getName ( ) ) ) { throw new RepositoryException ( "Invalid declared node type in property definitions with name " + pdef . getName ( ) . getAsString ( ) + " not registred" ) ; } // validate default values try { validateValueDefaults ( pdef . getRequiredType ( ) , pdef . getDefaultValues ( ) ) ; } catch ( ValueFormatException e ) { throw new ValueFormatException ( "Default value is incompatible with Property type " + PropertyType . nameFromValue ( pdef . getRequiredType ( ) ) + " of " + pdef . getName ( ) . getAsString ( ) + " in nodetype " + nodeType . getName ( ) . getAsString ( ) , e ) ; } try { validateValueConstraints ( pdef . getRequiredType ( ) , pdef . getValueConstraints ( ) ) ; } catch ( ValueFormatException e ) { throw new ValueFormatException ( "Constraints is incompatible with Property type " + PropertyType . nameFromValue ( pdef . getRequiredType ( ) ) + " of " + pdef . getName ( ) . getAsString ( ) + " in nodetype " + nodeType . getName ( ) . getAsString ( ) , e ) ; } } for ( NodeDefinitionData cndef : nodeType . getDeclaredChildNodeDefinitions ( ) ) { if ( ! cndef . getDeclaringNodeType ( ) . equals ( nodeType . getName ( ) ) ) { throw new RepositoryException ( "Invalid declared node type in child node definitions with name " + cndef . getName ( ) . getAsString ( ) + " not registred" ) ; } } }
Check according the JSR - 170
592
7
15,798
public Version version ( String versionName , boolean pool ) throws VersionException , RepositoryException { JCRName jcrVersionName = locationFactory . parseJCRName ( versionName ) ; VersionImpl version = ( VersionImpl ) dataManager . getItem ( nodeData ( ) , new QPathEntry ( jcrVersionName . getInternalName ( ) , 1 ) , pool , ItemType . NODE , false ) ; if ( version == null ) { throw new VersionException ( "There are no version with name '" + versionName + "' in the version history " + getPath ( ) ) ; } return version ; }
For internal use . Doesn t check InvalidItemStateException . May return unpooled Version object .
131
20
15,799
void migrate ( ) throws RepositoryException { try { LOG . info ( "Migration started." ) ; moveOldStructure ( ) ; service . createStructure ( ) ; //Migration order is important due to removal of nodes. migrateGroups ( ) ; migrateMembershipTypes ( ) ; migrateUsers ( ) ; migrateProfiles ( ) ; migrateMemberships ( ) ; removeOldStructure ( ) ; LOG . info ( "Migration completed." ) ; } catch ( Exception e ) //NOSONAR { throw new RepositoryException ( "Migration failed" , e ) ; } }
Method that aggregates all needed migration operations in needed order .
126
12