idx int64 0 165k | question stringlengths 73 4.15k | target stringlengths 5 918 | len_question int64 21 890 | len_target int64 3 255 |
|---|---|---|---|---|
33,200 | public void show ( String repository , final boolean changeHistory ) { this . repository = repository ; refreshWorkspacesAndReloadNode ( null , ROOT_PATH , changeHistory ) ; } | Shows content of the root node of the first reachable workspace of the given repository . | 39 | 18 |
33,201 | public void show ( final String repository , final String workspace , final String path , final boolean changeHistory ) { this . repository = repository ; this . refreshWorkspacesAndReloadNode ( null , path , changeHistory ) ; } | Shows nodes identified by repository workspace and path to node . | 47 | 12 |
33,202 | private void refreshWorkspacesAndReloadNode ( final String name , final String path , final boolean changeHistory ) { showLoadIcon ( ) ; console . jcrService ( ) . getWorkspaces ( repository , new AsyncCallback < String [ ] > ( ) { @ Override public void onFailure ( Throwable caught ) { hideLoadIcon ( ) ; RemoteException e = ( RemoteException ) caught ; SC . say ( caught . getMessage ( ) ) ; if ( e . code ( ) == RemoteException . SECURITY_ERROR ) { console . loadRepositoriesList ( ) ; } } @ Override public void onSuccess ( String [ ] workspaces ) { wsp . setWorkspaceNames ( workspaces ) ; getAndDisplayNode ( path , changeHistory ) ; hideLoadIcon ( ) ; } } ) ; } | Reloads values of the combo box with workspace names . | 175 | 12 |
33,203 | public void getAndDisplayNode ( final String path , final boolean changeHistory ) { showLoadIcon ( ) ; console . jcrService ( ) . node ( repository ( ) , workspace ( ) , path , new AsyncCallback < JcrNode > ( ) { @ Override public void onFailure ( Throwable caught ) { hideLoadIcon ( ) ; SC . say ( caught . getMessage ( ) ) ; } @ Override public void onSuccess ( JcrNode node ) { displayNode ( node ) ; console . changeWorkspaceInURL ( workspace ( ) , changeHistory ) ; console . changePathInURL ( path , changeHistory ) ; hideLoadIcon ( ) ; } } ) ; } | Reads node with given path and selected repository and workspace . | 147 | 12 |
33,204 | private void displayNode ( JcrNode node ) { this . node = node ; this . path = node . getPath ( ) ; pathLabel . display ( node . getPath ( ) ) ; //display childs, properties and ACLs childrenEditor . show ( node ) ; propertiesEditor . show ( node ) ; permissionsEditor . show ( node ) ; displayBinaryContent ( node ) ; //bring this page on top // console.display(Contents.this); } | Displays specified node . | 98 | 5 |
33,205 | public void save ( ) { SC . ask ( "Do you want to save changes" , new BooleanCallback ( ) { @ Override public void execute ( Boolean yesSelected ) { if ( yesSelected ) { jcrService ( ) . save ( repository ( ) , workspace ( ) , new BaseCallback < Object > ( ) { @ Override public void onSuccess ( Object result ) { session ( ) . setHasChanges ( false ) ; updateControls ( ) ; } } ) ; } } } ) ; } | Save session s changes . | 109 | 5 |
33,206 | public void showAddNodeDialog ( ) { jcrService ( ) . getPrimaryTypes ( node . getRepository ( ) , node . getWorkspace ( ) , null , false , new AsyncCallback < String [ ] > ( ) { @ Override public void onFailure ( Throwable caught ) { SC . say ( caught . getMessage ( ) ) ; } @ Override public void onSuccess ( String [ ] result ) { addNodeDialog . setPrimaryTypes ( result ) ; addNodeDialog . showModal ( ) ; } } ) ; } | Prepares dialog for creating new node . | 117 | 8 |
33,207 | public void exportXML ( String name , boolean skipBinary , boolean noRecurse ) { console . jcrService ( ) . export ( repository , workspace ( ) , path ( ) , name , true , true , new AsyncCallback < Object > ( ) { @ Override public void onFailure ( Throwable caught ) { SC . say ( caught . getMessage ( ) ) ; } @ Override public void onSuccess ( Object result ) { SC . say ( "Complete" ) ; } } ) ; } | Exports contents to the given file . | 108 | 8 |
33,208 | public void importXML ( String name , int option ) { console . jcrService ( ) . importXML ( repository , workspace ( ) , path ( ) , name , option , new AsyncCallback < Object > ( ) { @ Override public void onFailure ( Throwable caught ) { SC . say ( caught . getMessage ( ) ) ; } @ Override public void onSuccess ( Object result ) { SC . say ( "Complete" ) ; } } ) ; } | Imports contents from the given file . | 101 | 8 |
33,209 | protected String branchRefForName ( String branchName ) { String remoteName = connector . remoteName ( ) ; return remoteName != null ? remoteBranchPrefix ( remoteName ) + branchName : LOCAL_BRANCH_PREFIX + branchName ; } | Obtain the name of the branch reference | 57 | 8 |
33,210 | protected void addBranchesAsChildren ( Git git , CallSpecification spec , DocumentWriter writer ) throws GitAPIException { Set < String > remoteBranchPrefixes = remoteBranchPrefixes ( ) ; if ( remoteBranchPrefixes . isEmpty ( ) ) { // Generate the child references to the LOCAL branches, which will be sorted by name ... ListBranchCommand command = git . branchList ( ) ; List < Ref > branches = command . call ( ) ; // Reverse the sort of the branch names, since they might be version numbers ... Collections . sort ( branches , REVERSE_REF_COMPARATOR ) ; for ( Ref ref : branches ) { String name = ref . getName ( ) ; name = name . replace ( GitFunction . LOCAL_BRANCH_PREFIX , "" ) ; writer . addChild ( spec . childId ( name ) , name ) ; } return ; } // There is at least one REMOTE branch, so generate the child references to the REMOTE branches, // which will be sorted by name (by the command)... ListBranchCommand command = git . branchList ( ) ; command . setListMode ( ListMode . REMOTE ) ; List < Ref > branches = command . call ( ) ; // Reverse the sort of the branch names, since they might be version numbers ... Collections . sort ( branches , REVERSE_REF_COMPARATOR ) ; Set < String > uniqueNames = new HashSet < String > ( ) ; for ( Ref ref : branches ) { String name = ref . getName ( ) ; if ( uniqueNames . contains ( name ) ) continue ; // We only want the branch if it matches one of the listed remotes ... boolean skip = false ; for ( String remoteBranchPrefix : remoteBranchPrefixes ) { if ( name . startsWith ( remoteBranchPrefix ) ) { // Remove the prefix ... name = name . replaceFirst ( remoteBranchPrefix , "" ) ; break ; } // Otherwise, it's a remote branch from a different remote that we don't want ... skip = true ; } if ( skip ) continue ; if ( uniqueNames . add ( name ) ) writer . addChild ( spec . childId ( name ) , name ) ; } } | Add the names of the branches as children of the current node . | 482 | 13 |
33,211 | protected void addTagsAsChildren ( Git git , CallSpecification spec , DocumentWriter writer ) throws GitAPIException { // Generate the child references to the branches, which will be sorted by name (by the command). ListTagCommand command = git . tagList ( ) ; List < Ref > tags = command . call ( ) ; // Reverse the sort of the branch names, since they might be version numbers ... Collections . sort ( tags , REVERSE_REF_COMPARATOR ) ; for ( Ref ref : tags ) { String fullName = ref . getName ( ) ; String name = fullName . replaceFirst ( TAG_PREFIX , "" ) ; writer . addChild ( spec . childId ( name ) , name ) ; } } | Add the names of the tags as children of the current node . | 157 | 13 |
33,212 | protected void addCommitsAsChildren ( Git git , CallSpecification spec , DocumentWriter writer , int pageSize ) throws GitAPIException { // Add commits in the log ... LogCommand command = git . log ( ) ; command . setSkip ( 0 ) ; command . setMaxCount ( pageSize ) ; // Add the first set of commits ... int actual = 0 ; String commitId = null ; for ( RevCommit commit : command . call ( ) ) { commitId = commit . getName ( ) ; writer . addChild ( spec . childId ( commitId ) , commitId ) ; ++ actual ; } if ( actual == pageSize ) { // We wrote the maximum number of commits, so there's (probably) another page ... writer . addPage ( spec . getId ( ) , commitId , pageSize , PageWriter . UNKNOWN_TOTAL_SIZE ) ; } } | Add the first page of commits in the history names of the tags as children of the current node . | 187 | 20 |
33,213 | protected void addCommitsAsPageOfChildren ( Git git , Repository repository , CallSpecification spec , PageWriter writer , PageKey pageKey ) throws GitAPIException , IOException { RevWalk walker = new RevWalk ( repository ) ; try { // The offset is the ID of the last commit we read, so we'll need to skip the first commit String lastCommitIdName = pageKey . getOffsetString ( ) ; ObjectId lastCommitId = repository . resolve ( lastCommitIdName ) ; int pageSize = ( int ) pageKey . getBlockSize ( ) ; LogCommand command = git . log ( ) ; command . add ( lastCommitId ) ; command . setMaxCount ( pageSize + 1 ) ; // Add the first set of commits ... int actual = 0 ; String commitId = null ; for ( RevCommit commit : command . call ( ) ) { commitId = commit . getName ( ) ; if ( commitId . equals ( lastCommitIdName ) ) continue ; writer . addChild ( spec . childId ( commitId ) , commitId ) ; ++ actual ; } if ( actual == pageSize ) { assert commitId != null ; // We wrote the maximum number of commits, so there's (probably) another page ... writer . addPage ( pageKey . getParentId ( ) , commitId , pageSize , PageWriter . UNKNOWN_TOTAL_SIZE ) ; } } finally { walker . dispose ( ) ; } } | Add an additional page of commits in the history names of the tags as children of the current node . | 315 | 20 |
33,214 | boolean isAsOrMoreConstrainedThan ( PropertyDefinition other , ExecutionContext context ) { String [ ] otherConstraints = other . getValueConstraints ( ) ; if ( otherConstraints == null || otherConstraints . length == 0 ) { // The ancestor's definition is less constrained, so it's okay even if this definition has no constraints ... return true ; } String [ ] constraints = this . getValueConstraints ( ) ; if ( constraints == null || constraints . length == 0 ) { // This definition has no constraints, while the ancestor does have them ... return false ; } // There are constraints on both, so make sure they have the same types ... int type = this . getRequiredType ( ) ; int otherType = other . getRequiredType ( ) ; if ( type == otherType && type != PropertyType . UNDEFINED ) { ConstraintChecker thisChecker = createChecker ( context , type , constraints ) ; ConstraintChecker thatChecker = createChecker ( context , otherType , otherConstraints ) ; return thisChecker . isAsOrMoreConstrainedThan ( thatChecker ) ; } // We can only compare constraint literals, and we can only expect that every constraint literal in this // definition can be found in the other defintion (which can have more than this one) ... Set < String > thatLiterals = new HashSet < String > ( ) ; for ( String literal : otherConstraints ) { thatLiterals . add ( literal ) ; } for ( String literal : constraints ) { if ( ! thatLiterals . contains ( literal ) ) return false ; } return true ; } | Determine if the constraints on this definition are as - constrained or more - constrained than those on the supplied definition . | 358 | 24 |
33,215 | @ Override protected void initializeStorage ( File directory ) throws BinaryStoreException { // make sure the directory doesn't exist FileUtil . delete ( directory ) ; if ( ! directory . exists ( ) ) { logger . debug ( "Creating temporary directory for transient binary store: {0}" , directory . getAbsolutePath ( ) ) ; directory . mkdirs ( ) ; } if ( ! directory . canRead ( ) ) { throw new BinaryStoreException ( JcrI18n . unableToReadTemporaryDirectory . text ( directory . getAbsolutePath ( ) , JAVA_IO_TMPDIR ) ) ; } if ( ! directory . canWrite ( ) ) { throw new BinaryStoreException ( JcrI18n . unableToWriteTemporaryDirectory . text ( directory . getAbsolutePath ( ) , JAVA_IO_TMPDIR ) ) ; } } | Ensures that the directory used by this binary store exists and can be both read and written to . | 188 | 21 |
33,216 | public Workspace addWorkspace ( String name , String repositoryUrl ) { Workspace workspace = new Workspace ( name , repositoryUrl ) ; workspaces . add ( workspace ) ; return workspace ; } | Adds a new workspace to the list of workspaces . | 41 | 11 |
33,217 | protected void modifyProperties ( NodeKey key , Name primaryType , Set < Name > mixinTypes , Map < Name , AbstractPropertyChange > propChanges ) { } | Handle the addition change and removal of one or more properties of a single node . This method is called once for each existing node whose properties are modified . | 35 | 30 |
33,218 | protected void addNode ( String workspaceName , NodeKey key , Path path , Name primaryType , Set < Name > mixinTypes , Properties properties ) { } | Handle the addition of a node . | 33 | 7 |
33,219 | protected void removeNode ( String workspaceName , NodeKey key , NodeKey parentKey , Path path , Name primaryType , Set < Name > mixinTypes ) { } | Handle the removal of a node . | 35 | 7 |
33,220 | protected void changeNode ( String workspaceName , NodeKey key , Path path , Name primaryType , Set < Name > mixinTypes ) { } | Handle the change of a node . | 30 | 7 |
33,221 | protected void moveNode ( String workspaceName , NodeKey key , Name primaryType , Set < Name > mixinTypes , NodeKey oldParent , NodeKey newParent , Path newPath , Path oldPath ) { } | Handle the move of a node . | 45 | 7 |
33,222 | protected void renameNode ( String workspaceName , NodeKey key , Path newPath , Segment oldSegment , Name primaryType , Set < Name > mixinTypes ) { } | Handle the renaming of a node . | 37 | 8 |
33,223 | protected void reorderNode ( String workspaceName , NodeKey key , Name primaryType , Set < Name > mixinTypes , NodeKey parent , Path newPath , Path oldPath , Path reorderedBeforePath , Map < NodeKey , Map < Path , Path > > snsPathChangesByNodeKey ) { } | Handle the reordering of a node . | 66 | 8 |
33,224 | public static < T1 , T2 > TypeFactory < Tuple2 < T1 , T2 > > typeFactory ( TypeFactory < T1 > type1 , TypeFactory < T2 > type2 ) { return new Tuple2TypeFactory <> ( type1 , type2 ) ; } | Create a type factory for tuples of size 2 . | 63 | 11 |
33,225 | public static < T1 , T2 , T3 > TypeFactory < Tuple3 < T1 , T2 , T3 > > typeFactory ( TypeFactory < T1 > type1 , TypeFactory < T2 > type2 , TypeFactory < T3 > type3 ) { return new Tuple3TypeFactory <> ( type1 , type2 , type3 ) ; } | Create a type factory for tuples of size 3 . | 81 | 11 |
33,226 | public static < T1 , T2 , T3 , T4 > TypeFactory < Tuple4 < T1 , T2 , T3 , T4 > > typeFactory ( TypeFactory < T1 > type1 , TypeFactory < T2 > type2 , TypeFactory < T3 > type3 , TypeFactory < T4 > type4 ) { return new Tuple4TypeFactory <> ( type1 , type2 , type3 , type4 ) ; } | Create a type factory for tuples of size 4 . | 99 | 11 |
33,227 | public static TypeFactory < ? > typeFactory ( TypeFactory < ? > type , int tupleSize ) { if ( tupleSize <= 1 ) return type ; if ( tupleSize == 2 ) return typeFactory ( type , type ) ; if ( tupleSize == 3 ) return typeFactory ( type , type , type ) ; if ( tupleSize == 4 ) return typeFactory ( type , type , type , type ) ; Collection < TypeFactory < ? > > types = new ArrayList <> ( tupleSize ) ; for ( int i = 0 ; i != tupleSize ; ++ i ) { types . add ( type ) ; } return new TupleNTypeFactory ( types ) ; } | Create a type factory for uniform tuples . | 142 | 9 |
33,228 | private Privilege [ ] privileges ( Set < String > names ) throws ValueFormatException , AccessControlException , RepositoryException { Privilege [ ] privileges = new Privilege [ names . size ( ) ] ; int i = 0 ; for ( String name : names ) { privileges [ i ++ ] = privilegeFromName ( name ) ; } return privileges ; } | Constructs list of Privilege objects using privilege s name . | 74 | 12 |
33,229 | protected static String determineMethodsAllowed ( StoredObject so ) { try { if ( so != null ) { if ( so . isNullResource ( ) ) { return NULL_RESOURCE_METHODS_ALLOWED ; } else if ( so . isFolder ( ) ) { return RESOURCE_METHODS_ALLOWED + FOLDER_METHOD_ALLOWED ; } // else resource return RESOURCE_METHODS_ALLOWED ; } } catch ( Exception e ) { // we do nothing, just return less allowed methods } return LESS_ALLOWED_METHODS ; } | Determines the methods normally allowed for the resource . | 126 | 11 |
33,230 | protected AstNode parseCreateIndex ( DdlTokenStream tokens , AstNode parentNode ) throws ParsingException { assert tokens != null ; assert parentNode != null ; markStartOfStatement ( tokens ) ; // CREATE [UNIQUE] INDEX index-Name // ON table-Name ( Simple-column-Name [ ASC | DESC ] [ , Simple-column-Name [ ASC | DESC ]] * ) tokens . consume ( CREATE ) ; // CREATE boolean isUnique = tokens . canConsume ( "UNIQUE" ) ; tokens . consume ( "INDEX" ) ; String indexName = parseName ( tokens ) ; tokens . consume ( "ON" ) ; String tableName = parseName ( tokens ) ; AstNode indexNode = nodeFactory ( ) . node ( indexName , parentNode , TYPE_CREATE_INDEX_STATEMENT ) ; indexNode . setProperty ( UNIQUE_INDEX , isUnique ) ; indexNode . setProperty ( TABLE_NAME , tableName ) ; parseIndexTableColumns ( tokens , indexNode ) ; parseUntilTerminator ( tokens ) ; markEndOfStatement ( tokens , indexNode ) ; return indexNode ; } | Parses DDL CREATE INDEX | 252 | 9 |
33,231 | protected AstNode parseCreateRole ( DdlTokenStream tokens , AstNode parentNode ) throws ParsingException { assert tokens != null ; assert parentNode != null ; markStartOfStatement ( tokens ) ; tokens . consume ( CREATE , "ROLE" ) ; String functionName = parseName ( tokens ) ; AstNode functionNode = nodeFactory ( ) . node ( functionName , parentNode , TYPE_CREATE_ROLE_STATEMENT ) ; markEndOfStatement ( tokens , functionNode ) ; return functionNode ; } | Parses DDL CREATE ROLE statement | 111 | 10 |
33,232 | protected void parseColumns ( DdlTokenStream tokens , AstNode tableNode , boolean isAlterTable ) throws ParsingException { String tableElementString = getTableElementsString ( tokens , false ) ; DdlTokenStream localTokens = new DdlTokenStream ( tableElementString , DdlTokenStream . ddlTokenizer ( false ) , false ) ; localTokens . start ( ) ; StringBuilder unusedTokensSB = new StringBuilder ( ) ; do { if ( isColumnDefinitionStart ( localTokens ) ) { parseColumnDefinition ( localTokens , tableNode , isAlterTable ) ; } else { // THIS IS AN ERROR. NOTHING FOUND. // NEED TO absorb tokens unusedTokensSB . append ( SPACE ) . append ( localTokens . consume ( ) ) ; } } while ( localTokens . canConsume ( COMMA ) ) ; if ( unusedTokensSB . length ( ) > 0 ) { String msg = DdlSequencerI18n . unusedTokensParsingColumnDefinition . text ( tableNode . getName ( ) ) ; DdlParserProblem problem = new DdlParserProblem ( Problems . WARNING , getCurrentMarkedPosition ( ) , msg ) ; problem . setUnusedSource ( unusedTokensSB . toString ( ) ) ; addProblem ( problem , tableNode ) ; } } | Utility method designed to parse columns within an ALTER TABLE ADD statement . | 279 | 15 |
33,233 | public static Set < Column > getColumnsReferencedBy ( Visitable visitable ) { if ( visitable == null ) return Collections . emptySet ( ) ; final Set < Column > symbols = new HashSet < Column > ( ) ; // Walk the entire structure, so only supply a StrategyVisitor (that does no navigation) ... Visitors . visitAll ( visitable , new AbstractVisitor ( ) { protected void addColumnFor ( SelectorName selectorName , String property ) { symbols . add ( new Column ( selectorName , property , property ) ) ; } @ Override public void visit ( Column column ) { symbols . add ( column ) ; } @ Override public void visit ( EquiJoinCondition joinCondition ) { addColumnFor ( joinCondition . selector1Name ( ) , joinCondition . getProperty1Name ( ) ) ; addColumnFor ( joinCondition . selector2Name ( ) , joinCondition . getProperty2Name ( ) ) ; } @ Override public void visit ( PropertyExistence prop ) { addColumnFor ( prop . selectorName ( ) , prop . getPropertyName ( ) ) ; } @ Override public void visit ( PropertyValue prop ) { addColumnFor ( prop . selectorName ( ) , prop . getPropertyName ( ) ) ; } @ Override public void visit ( ReferenceValue ref ) { String propertyName = ref . getPropertyName ( ) ; if ( propertyName != null ) { addColumnFor ( ref . selectorName ( ) , propertyName ) ; } } } ) ; return symbols ; } | Get the set of Column objects that represent those columns referenced by the visitable object . | 325 | 17 |
33,234 | protected static void removeTralingZeros ( StringBuilder sb ) { int endIndex = sb . length ( ) ; if ( endIndex > 0 ) { -- endIndex ; int index = endIndex ; while ( sb . charAt ( index ) == ' ' ) { -- index ; } if ( index < endIndex ) sb . delete ( index + 1 , endIndex + 1 ) ; } } | Utility to remove the trailing 0 s . | 88 | 9 |
33,235 | public static void write ( String content , File file ) throws IOException { CheckArg . isNotNull ( file , "destination file" ) ; if ( content != null ) { write ( content , new FileOutputStream ( file ) ) ; } } | Write the entire contents of the supplied string to the given file . | 53 | 13 |
33,236 | public static void closeQuietly ( Closeable closeable ) { if ( closeable == null ) { return ; } try { closeable . close ( ) ; } catch ( Throwable t ) { LOGGER . debug ( t , "Ignored error at closing stream" ) ; } } | Closes the closable silently . Any exceptions are ignored . | 61 | 12 |
33,237 | public void setNullResource ( boolean f ) { this . isNullRessource = f ; this . isFolder = false ; this . creationDate = null ; this . lastModified = null ; // this.content = null; this . contentLength = 0 ; this . mimeType = null ; } | Sets a StoredObject as a lock - null resource | 65 | 12 |
33,238 | protected void endContent ( ) throws RepositoryException { // Process the content of the element ... String content = StringUtil . normalize ( contentBuilder . toString ( ) ) ; // Null-out builder to setup for subsequent content. // Must be done before call to startElement below to prevent infinite loop. contentBuilder = null ; // Skip if nothing in content but whitespace if ( content . length ( ) > 0 ) { // Create separate node for each content entry since entries can be interspersed amongst child elements startNode ( XmlLexicon . ELEMENT_CONTENT , XmlLexicon . ELEMENT_CONTENT ) ; currentNode . setProperty ( XmlLexicon . ELEMENT_CONTENT , content ) ; endNode ( ) ; } } | See if there is any element content that needs to be completed . | 160 | 13 |
33,239 | public void show ( int x , int y ) { disabledHLayout . setSize ( "100%" , "100%" ) ; disabledHLayout . setStyleName ( "disabledBackgroundStyle" ) ; disabledHLayout . show ( ) ; loadingImg . setSize ( "100px" , "100px" ) ; loadingImg . setTop ( y ) ; //loading image height is 50px loadingImg . setLeft ( x ) ; //loading image width is 50px loadingImg . show ( ) ; loadingImg . bringToFront ( ) ; } | Shows loading indicator at the given place of screen . | 124 | 11 |
33,240 | void checkout ( AbstractJcrNode node ) throws LockException , RepositoryException { checkVersionable ( node ) ; // Check this separately since it throws a different type of exception if ( node . isLocked ( ) && ! node . holdsLock ( ) ) { throw new LockException ( JcrI18n . lockTokenNotHeld . text ( node . getPath ( ) ) ) ; } if ( ! node . hasProperty ( JcrLexicon . BASE_VERSION ) ) { // This happens when we've added mix:versionable, but not saved it to create the base // version (and the rest of the version storage graph). See MODE-704. return ; } // Checking out an already checked-out node is supposed to return silently if ( node . getProperty ( JcrLexicon . IS_CHECKED_OUT ) . getBoolean ( ) ) { return ; } // Create a session that we'll used to change the node ... SessionCache versionSession = session . spawnSessionCache ( false ) ; MutableCachedNode versionable = versionSession . mutable ( node . key ( ) ) ; NodeKey baseVersionKey = node . getBaseVersion ( ) . key ( ) ; PropertyFactory props = propertyFactory ( ) ; Reference baseVersionRef = session . referenceFactory ( ) . create ( baseVersionKey , true ) ; versionable . setProperty ( versionSession , props . create ( JcrLexicon . PREDECESSORS , new Object [ ] { baseVersionRef } ) ) ; versionable . setProperty ( versionSession , props . create ( JcrLexicon . IS_CHECKED_OUT , Boolean . TRUE ) ) ; versionSession . save ( ) ; } | Checks out the given node updating version - related properties on the node as needed . | 359 | 17 |
33,241 | public ExecutionContext with ( Map < String , String > data ) { Map < String , String > newData = data ; if ( newData == null ) { if ( this . data . isEmpty ( ) ) return this ; } else { // Copy the data in the map ... newData = Collections . unmodifiableMap ( new HashMap < String , String > ( data ) ) ; } return new ExecutionContext ( securityContext , namespaceRegistry , propertyFactory , threadPools , binaryStore , newData , processId , decoder , encoder , stringFactory , binaryFactory , booleanFactory , dateFactory , decimalFactory , doubleFactory , longFactory , nameFactory , pathFactory , referenceFactory , weakReferenceFactory , simpleReferenceFactory , uriFactory , objectFactory , locale ) ; } | Create a new execution context that mirrors this context but that contains the supplied data . Note that the supplied map is always copied to ensure that it is immutable . | 165 | 31 |
33,242 | public ExecutionContext with ( String key , String value ) { Map < String , String > newData = data ; if ( value == null ) { // Remove the value with the key ... if ( this . data . isEmpty ( ) || ! this . data . containsKey ( key ) ) { // nothing to remove return this ; } newData = new HashMap < String , String > ( data ) ; newData . remove ( key ) ; newData = Collections . unmodifiableMap ( newData ) ; } else { // We are to add the value ... newData = new HashMap < String , String > ( data ) ; newData . put ( key , value ) ; newData = Collections . unmodifiableMap ( newData ) ; } return new ExecutionContext ( securityContext , namespaceRegistry , propertyFactory , threadPools , binaryStore , newData , processId , decoder , encoder , stringFactory , binaryFactory , booleanFactory , dateFactory , decimalFactory , doubleFactory , longFactory , nameFactory , pathFactory , referenceFactory , weakReferenceFactory , simpleReferenceFactory , uriFactory , objectFactory , locale ) ; } | Create a new execution context that mirrors this context but that contains the supplied key - value pair in the new context s data . | 240 | 25 |
33,243 | public ExecutionContext with ( Locale locale ) { return new ExecutionContext ( securityContext , namespaceRegistry , propertyFactory , threadPools , binaryStore , data , processId , decoder , encoder , stringFactory , binaryFactory , booleanFactory , dateFactory , decimalFactory , doubleFactory , longFactory , nameFactory , pathFactory , referenceFactory , weakReferenceFactory , simpleReferenceFactory , uriFactory , objectFactory , locale ) ; } | Create a new execution context that mirrors this context but that contains the supplied locale . | 92 | 16 |
33,244 | protected void initializeDefaultNamespaces ( NamespaceRegistry namespaceRegistry ) { if ( namespaceRegistry == null ) return ; namespaceRegistry . register ( JcrLexicon . Namespace . PREFIX , JcrLexicon . Namespace . URI ) ; namespaceRegistry . register ( JcrMixLexicon . Namespace . PREFIX , JcrMixLexicon . Namespace . URI ) ; namespaceRegistry . register ( JcrNtLexicon . Namespace . PREFIX , JcrNtLexicon . Namespace . URI ) ; namespaceRegistry . register ( ModeShapeLexicon . Namespace . PREFIX , ModeShapeLexicon . Namespace . URI ) ; } | Method that initializes the default namespaces for namespace registries . | 145 | 13 |
33,245 | void setFeature ( XMLReader reader , String featureName , boolean value ) { try { if ( reader . getFeature ( featureName ) != value ) { reader . setFeature ( featureName , value ) ; } } catch ( SAXException e ) { getLogger ( ) . warn ( e , "Cannot set feature " + featureName ) ; } } | Sets the reader s named feature to the supplied value only if the feature is not already set to that value . This method does nothing if the feature is not known to the reader . | 76 | 37 |
33,246 | public long consumeLong ( ) throws ParsingException , IllegalStateException { if ( completed ) throwNoMoreContent ( ) ; // Get the value from the current token ... String value = currentToken ( ) . value ( ) ; try { long result = Long . parseLong ( value ) ; moveToNextToken ( ) ; return result ; } catch ( NumberFormatException e ) { Position position = currentToken ( ) . position ( ) ; String msg = CommonI18n . expectingValidLongAtLineAndColumn . text ( value , position . getLine ( ) , position . getColumn ( ) ) ; throw new ParsingException ( position , msg ) ; } } | Convert the value of this token to a long return it and move to the next token . | 139 | 19 |
33,247 | public String consume ( ) throws ParsingException , IllegalStateException { if ( completed ) throwNoMoreContent ( ) ; // Get the value from the current token ... String result = currentToken ( ) . value ( ) ; moveToNextToken ( ) ; return result ; } | Return the value of this token and move to the next token . | 57 | 13 |
33,248 | final Token currentToken ( ) throws IllegalStateException , NoSuchElementException { if ( currentToken == null ) { if ( completed ) { throw new NoSuchElementException ( CommonI18n . noMoreContent . text ( ) ) ; } throw new IllegalStateException ( CommonI18n . startMethodMustBeCalledBeforeConsumingOrMatching . text ( ) ) ; } assert currentToken != null ; return currentToken ; } | Get the current token . | 92 | 5 |
33,249 | final Token previousToken ( ) throws IllegalStateException , NoSuchElementException { if ( currentToken == null ) { if ( completed ) { if ( tokens . isEmpty ( ) ) { throw new NoSuchElementException ( CommonI18n . noMoreContent . text ( ) ) ; } return tokens . get ( tokens . size ( ) - 1 ) ; } throw new IllegalStateException ( CommonI18n . startMethodMustBeCalledBeforeConsumingOrMatching . text ( ) ) ; } if ( tokenIterator . previousIndex ( ) == 0 ) { throw new NoSuchElementException ( CommonI18n . noMoreContent . text ( ) ) ; } return tokens . get ( tokenIterator . previousIndex ( ) - 1 ) ; } | Get the previous token . This does not modify the state . | 158 | 12 |
33,250 | static String generateFragment ( String content , int indexOfProblem , int charactersToIncludeBeforeAndAfter , String highlightText ) { assert content != null ; assert indexOfProblem < content . length ( ) ; // Find the substring that immediately precedes the current position ... int beforeStart = Math . max ( 0 , indexOfProblem - charactersToIncludeBeforeAndAfter ) ; String before = content . substring ( beforeStart , indexOfProblem ) ; // Find the substring that immediately follows the current position ... int afterEnd = Math . min ( indexOfProblem + charactersToIncludeBeforeAndAfter , content . length ( ) ) ; String after = content . substring ( indexOfProblem , afterEnd ) ; return before + ( highlightText != null ? highlightText : "" ) + after ; } | Utility method to generate a highlighted fragment of a particular point in the stream . | 169 | 16 |
33,251 | protected PlanNode createCanonicalPlan ( QueryContext context , Query query ) { PlanNode plan = null ; // Process the source of the query ... Map < SelectorName , Table > usedSources = new HashMap < SelectorName , Table > ( ) ; plan = createPlanNode ( context , query . source ( ) , usedSources ) ; // Attach criteria (on top) ... Map < String , Subquery > subqueriesByVariableName = new HashMap < String , Subquery > ( ) ; plan = attachCriteria ( context , plan , query . constraint ( ) , query . columns ( ) , subqueriesByVariableName ) ; // Attach groupbys (on top) ... // plan = attachGrouping(context,plan,query.getGroupBy()); // Attach the project ... plan = attachProject ( context , plan , query . columns ( ) , usedSources ) ; // Attach duplicate removal ... if ( query . isDistinct ( ) ) { plan = attachDuplicateRemoval ( context , plan ) ; } // Process the orderings and limits ... plan = attachSorting ( context , plan , query . orderings ( ) ) ; plan = attachLimits ( context , plan , query . getLimits ( ) ) ; // Capture if we're limiting the results to 1 row and no offset and no sorting ... if ( query . getLimits ( ) . isLimitedToSingleRowWithNoOffset ( ) && query . orderings ( ) . isEmpty ( ) ) { context . getHints ( ) . isExistsQuery = true ; } // Now add in the subqueries as dependent joins, in reverse order ... plan = attachSubqueries ( context , plan , subqueriesByVariableName ) ; // Validate that all the parts of the query are resolvable ... validate ( context , query , usedSources ) ; // Now we need to validate all of the subqueries ... for ( Subquery subquery : Visitors . subqueries ( query , false ) ) { // Just do it by creating a plan, even though we aren't doing anything with these plans ... createPlan ( context , subquery . getQuery ( ) ) ; } return plan ; } | Create a canonical query plan for the given query . | 465 | 10 |
33,252 | protected void validate ( QueryContext context , QueryCommand query , Map < SelectorName , Table > usedSelectors ) { // // Resolve everything ... // Visitors.visitAll(query, new Validator(context, usedSelectors)); // Resolve everything (except subqueries) ... Validator validator = new Validator ( context , usedSelectors ) ; query . accept ( new WalkAllVisitor ( validator ) { @ Override protected void enqueue ( Visitable objectToBeVisited ) { if ( objectToBeVisited instanceof Subquery ) return ; super . enqueue ( objectToBeVisited ) ; } } ) ; } | Validate the supplied query . | 139 | 6 |
33,253 | protected PlanNode createCanonicalPlan ( QueryContext context , SetQuery query ) { // Process the left and right parts of the query ... PlanNode left = createPlan ( context , query . getLeft ( ) ) ; PlanNode right = createPlan ( context , query . getRight ( ) ) ; // Wrap in a set operation node ... PlanNode plan = new PlanNode ( Type . SET_OPERATION ) ; plan . addChildren ( left , right ) ; plan . setProperty ( Property . SET_OPERATION , query . operation ( ) ) ; plan . setProperty ( Property . SET_USE_ALL , query . isAll ( ) ) ; // Process the orderings and limits ... plan = attachSorting ( context , plan , query . orderings ( ) ) ; plan = attachLimits ( context , plan , query . getLimits ( ) ) ; // Capture if we're limiting the results to 1 row and no offset and no sorting ... if ( query . getLimits ( ) . isLimitedToSingleRowWithNoOffset ( ) && query . orderings ( ) . isEmpty ( ) ) { context . getHints ( ) . isExistsQuery = true ; } return plan ; } | Create a canonical query plan for the given set query . | 253 | 11 |
33,254 | protected PlanNode createPlanNode ( QueryContext context , Source source , Map < SelectorName , Table > usedSelectors ) { if ( source instanceof Selector ) { // No join required ... assert source instanceof AllNodes || source instanceof NamedSelector ; Selector selector = ( Selector ) source ; PlanNode node = new PlanNode ( Type . SOURCE ) ; if ( selector . hasAlias ( ) ) { node . addSelector ( selector . alias ( ) ) ; node . setProperty ( Property . SOURCE_ALIAS , selector . alias ( ) ) ; node . setProperty ( Property . SOURCE_NAME , selector . name ( ) ) ; } else { node . addSelector ( selector . name ( ) ) ; node . setProperty ( Property . SOURCE_NAME , selector . name ( ) ) ; } // Validate the source name and set the available columns ... NameFactory nameFactory = context . getExecutionContext ( ) . getValueFactories ( ) . getNameFactory ( ) ; // Always use the qualified form when searching for tables Table table = context . getSchemata ( ) . getTable ( selector . name ( ) . qualifiedForm ( nameFactory ) ) ; if ( table != null ) { if ( table instanceof View ) context . getHints ( ) . hasView = true ; if ( usedSelectors . put ( selector . aliasOrName ( ) , table ) != null ) { // There was already a table with this alias or name ... I18n msg = GraphI18n . selectorNamesMayNotBeUsedMoreThanOnce ; context . getProblems ( ) . addError ( msg , selector . aliasOrName ( ) . getString ( ) ) ; } node . setProperty ( Property . SOURCE_COLUMNS , table . getColumns ( ) ) ; } else { context . getProblems ( ) . addError ( GraphI18n . tableDoesNotExist , selector . name ( ) ) ; } return node ; } if ( source instanceof Join ) { Join join = ( Join ) source ; JoinCondition joinCondition = join . getJoinCondition ( ) ; // Set up new join node corresponding to this join predicate PlanNode node = new PlanNode ( Type . JOIN ) ; node . setProperty ( Property . JOIN_TYPE , join . type ( ) ) ; node . setProperty ( Property . JOIN_ALGORITHM , JoinAlgorithm . NESTED_LOOP ) ; node . setProperty ( Property . JOIN_CONDITION , joinCondition ) ; context . getHints ( ) . hasJoin = true ; if ( join . type ( ) == JoinType . LEFT_OUTER ) { context . getHints ( ) . hasOptionalJoin = true ; } // Handle each child Source [ ] clauses = new Source [ ] { join . getLeft ( ) , join . getRight ( ) } ; for ( int i = 0 ; i < 2 ; i ++ ) { PlanNode sourceNode = createPlanNode ( context , clauses [ i ] , usedSelectors ) ; node . addLastChild ( sourceNode ) ; } // Add selectors to the joinNode for ( PlanNode child : node . getChildren ( ) ) { node . addSelectors ( child . getSelectors ( ) ) ; } return node ; } // should not get here; if we do, somebody added a new type of source assert false ; return null ; } | Create a JOIN or SOURCE node that contain the source information . | 733 | 14 |
33,255 | protected PlanNode attachCriteria ( final QueryContext context , PlanNode plan , Constraint constraint , List < ? extends Column > columns , Map < String , Subquery > subqueriesByVariableName ) { if ( constraint == null ) return plan ; context . getHints ( ) . hasCriteria = true ; // Extract the list of Constraint objects that all must be satisfied ... LinkedList < Constraint > andableConstraints = new LinkedList < Constraint > ( ) ; separateAndConstraints ( constraint , andableConstraints ) ; assert ! andableConstraints . isEmpty ( ) ; // Build up the map of aliases for the properties used in the criteria ... Map < String , String > propertyNameByAlias = new HashMap < String , String > ( ) ; for ( Column column : columns ) { if ( column . getColumnName ( ) != null && ! column . getColumnName ( ) . equals ( column . getPropertyName ( ) ) ) { propertyNameByAlias . put ( column . getColumnName ( ) , column . getPropertyName ( ) ) ; } } // For each of these constraints, create a criteria (SELECT) node above the supplied (JOIN or SOURCE) node. // Do this in reverse order so that the top-most SELECT node corresponds to the first constraint. while ( ! andableConstraints . isEmpty ( ) ) { Constraint criteria = andableConstraints . removeLast ( ) ; // Replace any subqueries with bind variables ... criteria = PlanUtil . replaceSubqueriesWithBindVariables ( context , criteria , subqueriesByVariableName ) ; // Replace any use of aliases with the actual properties ... criteria = PlanUtil . replaceAliasesWithProperties ( context , criteria , propertyNameByAlias ) ; // Create the select node ... PlanNode criteriaNode = new PlanNode ( Type . SELECT ) ; criteriaNode . setProperty ( Property . SELECT_CRITERIA , criteria ) ; // Add selectors to the criteria node ... criteriaNode . addSelectors ( Visitors . getSelectorsReferencedBy ( criteria ) ) ; // Is there at least one full-text search or subquery ... Visitors . visitAll ( criteria , new Visitors . AbstractVisitor ( ) { @ Override public void visit ( FullTextSearch obj ) { context . getHints ( ) . hasFullTextSearch = true ; } } ) ; criteriaNode . addFirstChild ( plan ) ; plan = criteriaNode ; } if ( ! subqueriesByVariableName . isEmpty ( ) ) { context . getHints ( ) . hasSubqueries = true ; } return plan ; } | Attach all criteria above the join nodes . The optimizer will push these criteria down to the appropriate source . | 570 | 21 |
33,256 | protected PlanNode attachLimits ( QueryContext context , PlanNode plan , Limit limit ) { if ( limit . isUnlimited ( ) ) return plan ; context . getHints ( ) . hasLimit = true ; PlanNode limitNode = new PlanNode ( Type . LIMIT ) ; boolean attach = false ; if ( limit . getOffset ( ) != 0 ) { limitNode . setProperty ( Property . LIMIT_OFFSET , limit . getOffset ( ) ) ; attach = true ; } if ( ! limit . isUnlimited ( ) ) { limitNode . setProperty ( Property . LIMIT_COUNT , limit . getRowLimit ( ) ) ; attach = true ; } if ( attach ) { limitNode . addLastChild ( plan ) ; plan = limitNode ; } return plan ; } | Attach a LIMIT node at the top of the plan tree . | 169 | 13 |
33,257 | protected PlanNode attachProject ( QueryContext context , PlanNode plan , List < ? extends Column > columns , Map < SelectorName , Table > selectors ) { PlanNode projectNode = new PlanNode ( Type . PROJECT ) ; List < Column > newColumns = new LinkedList < Column > ( ) ; List < String > newTypes = new ArrayList < String > ( ) ; final boolean multipleSelectors = selectors . size ( ) > 1 ; final boolean qualifyExpandedColumns = context . getHints ( ) . qualifyExpandedColumnNames ; if ( columns == null || columns . isEmpty ( ) ) { // SELECT *, so find all of the columns that are available from all the sources ... for ( Map . Entry < SelectorName , Table > entry : selectors . entrySet ( ) ) { SelectorName tableName = entry . getKey ( ) ; Table table = entry . getValue ( ) ; // Add the selector that is being used ... projectNode . addSelector ( tableName ) ; // Compute the columns from this selector ... allColumnsFor ( table , tableName , newColumns , newTypes , qualifyExpandedColumns ) ; } } else { // Add the selector used by each column ... for ( Column column : columns ) { SelectorName tableName = column . selectorName ( ) ; // Add the selector that is being used ... projectNode . addSelector ( tableName ) ; // Verify that each column is available in the appropriate source ... Table table = selectors . get ( tableName ) ; if ( table == null ) { context . getProblems ( ) . addError ( GraphI18n . tableDoesNotExist , tableName ) ; } else { // Make sure that the column is in the table ... String columnName = column . getPropertyName ( ) ; if ( "*" . equals ( columnName ) || columnName == null ) { // This is a 'SELECT *' on this source, but this source is one of multiple sources ... // See https://issues.apache.org/jira/browse/JCR-3313; TCK test expects 'true' for last param allColumnsFor ( table , tableName , newColumns , newTypes , qualifyExpandedColumns ) ; } else { // This is a particular column, so add it ... if ( ! newColumns . contains ( column ) ) { if ( multipleSelectors && column . getPropertyName ( ) . equals ( column . getColumnName ( ) ) ) { column = column . withColumnName ( column . getSelectorName ( ) + "." + column . getColumnName ( ) ) ; } newColumns . add ( column ) ; org . modeshape . jcr . query . validate . Schemata . Column schemaColumn = table . getColumn ( columnName ) ; if ( schemaColumn != null ) { newTypes . add ( schemaColumn . getPropertyTypeName ( ) ) ; } else { newTypes . add ( context . getTypeSystem ( ) . getDefaultType ( ) ) ; } } } boolean validateColumnExistance = context . getHints ( ) . validateColumnExistance && ! table . hasExtraColumns ( ) ; boolean columnNameIsWildcard = columnName == null || "*" . equals ( columnName ) ; if ( ! columnNameIsWildcard && table . getColumn ( columnName ) == null && validateColumnExistance ) { context . getProblems ( ) . addError ( GraphI18n . columnDoesNotExistOnTable , columnName , tableName ) ; } } } } projectNode . setProperty ( Property . PROJECT_COLUMNS , newColumns ) ; projectNode . setProperty ( Property . PROJECT_COLUMN_TYPES , newTypes ) ; projectNode . addLastChild ( plan ) ; return projectNode ; } | Attach a PROJECT node at the top of the plan tree . | 825 | 13 |
33,258 | protected PlanNode attachSubqueries ( QueryContext context , PlanNode plan , Map < String , Subquery > subqueriesByVariableName ) { // Order the variable names in reverse order ... List < String > varNames = new ArrayList < String > ( subqueriesByVariableName . keySet ( ) ) ; Collections . sort ( varNames ) ; Collections . reverse ( varNames ) ; for ( String varName : varNames ) { Subquery subquery = subqueriesByVariableName . get ( varName ) ; // Plan out the subquery ... PlanNode subqueryNode = createPlan ( context , subquery . getQuery ( ) ) ; setSubqueryVariableName ( subqueryNode , varName ) ; // Create a DEPENDENT_QUERY node, with the subquery on the LHS (so it is executed first) ... PlanNode depQuery = new PlanNode ( Type . DEPENDENT_QUERY ) ; depQuery . addChildren ( subqueryNode , plan ) ; depQuery . addSelectors ( subqueryNode . getSelectors ( ) ) ; depQuery . addSelectors ( plan . getSelectors ( ) ) ; plan = depQuery ; } return plan ; } | Attach plan nodes for each subquery resulting with the first subquery at the top of the plan tree . | 257 | 21 |
33,259 | public synchronized Repository getRepository ( ) throws ResourceException { if ( this . repository == null ) { LOGGER . debug ( "Deploying repository URL [{0}]" , repositoryURL ) ; this . repository = deployRepository ( repositoryURL ) ; } return this . repository ; } | Provides access to the configured repository . | 61 | 8 |
33,260 | @ Override public Object createConnectionFactory ( ConnectionManager cxManager ) throws ResourceException { JcrRepositoryHandle handle = new JcrRepositoryHandle ( this , cxManager ) ; return handle ; } | Creates a Connection Factory instance . | 42 | 7 |
33,261 | protected static void validate ( IndexDefinition defn , Problems problems ) { if ( ! defn . hasSingleColumn ( ) ) { problems . addError ( JcrI18n . localIndexProviderDoesNotSupportMultiColumnIndexes , defn . getName ( ) , defn . getProviderName ( ) ) ; } switch ( defn . getKind ( ) ) { case TEXT : // This is not valid ... problems . addError ( JcrI18n . localIndexProviderDoesNotSupportTextIndexes , defn . getName ( ) , defn . getProviderName ( ) ) ; } } | Validate whether the index definition is acceptable for this provider . | 129 | 12 |
33,262 | public boolean add ( T entry ) { assert entry != null ; if ( ! addEntries . get ( ) ) return false ; try { producerLock . lock ( ) ; long position = cursor . claim ( ) ; // blocks; if this fails, we will not have successfully claimed and nothing to do ... int index = ( int ) ( position & mask ) ; buffer [ index ] = entry ; return cursor . publish ( position ) ; } finally { producerLock . unlock ( ) ; } } | Add to this buffer a single entry . This method blocks if there is no room in the ring buffer providing back pressure on the caller in such cases . Note that if this method blocks for any length of time that means at least one consumer has yet to process all of the entries that are currently in the ring buffer . In such cases consider whether a larger ring buffer is warranted . | 102 | 75 |
33,263 | public boolean add ( T [ ] entries ) { assert entries != null ; if ( entries . length == 0 || ! addEntries . get ( ) ) return false ; try { producerLock . lock ( ) ; long position = cursor . claim ( entries . length ) ; // blocks for ( int i = 0 ; i != entries . length ; ++ i ) { int index = ( int ) ( position & mask ) ; buffer [ index ] = entries [ i ] ; } return cursor . publish ( position ) ; } finally { producerLock . unlock ( ) ; } } | Add to this buffer multiple entries . This method blocks until it is added . | 118 | 15 |
33,264 | public boolean remove ( C consumer ) { if ( consumer != null ) { // Iterate through the map to find the runner that owns this consumer ... ConsumerRunner match = null ; for ( ConsumerRunner runner : consumers ) { if ( runner . getConsumer ( ) . equals ( consumer ) ) { match = runner ; break ; } } // Try to remove the matching runner (if we found one) from our list ... if ( match != null ) { // Tell the thread to stop and wait for it, after which it will have been removed from our map ... match . close ( ) ; return true ; } } // We either didn't find it, or we found it but something else remove it while we searched ... return false ; } | Remove the supplied consumer and block until it stops running and is closed and removed from this buffer . The consumer is removed at the earliest conevenient point and will stop seeing entries as soon as it is removed . | 150 | 42 |
33,265 | public void shutdown ( ) { // Prevent new entries from being added ... this . addEntries . set ( false ) ; // Mark the cursor as being finished; this will stop all consumers from waiting for a batch ... this . cursor . complete ( ) ; // Each of the consumer threads will complete the batch they're working on, but will then terminate ... // Stop the garbage collection thread (if running) ... if ( this . gcConsumer != null ) this . gcConsumer . close ( ) ; // Now, block until all the runners have completed ... for ( ConsumerRunner runner : new HashSet <> ( consumers ) ) { // use a copy of the runners; they're removed when they close runner . waitForCompletion ( ) ; } assert consumers . isEmpty ( ) ; } | Shutdown this ring buffer by preventing any further entries but allowing all existing entries to be processed by all consumers . | 163 | 22 |
33,266 | public void put ( String name , Object value ) { if ( value instanceof EsRequest ) { document . setDocument ( name , ( ( EsRequest ) value ) . document ) ; } else { document . set ( name , value ) ; } } | Adds single property value . | 52 | 5 |
33,267 | public void put ( String name , Object [ ] values ) { if ( values instanceof EsRequest [ ] ) { Object [ ] docs = new Object [ values . length ] ; for ( int i = 0 ; i < docs . length ; i ++ ) { docs [ i ] = ( ( EsRequest ) values [ i ] ) . document ; } document . setArray ( name , docs ) ; } else { document . setArray ( name , values ) ; } } | Adds multivalued property value . | 98 | 7 |
33,268 | public static boolean isValidName ( String name ) { if ( name == null || name . length ( ) == 0 ) return false ; CharacterIterator iter = new StringCharacterIterator ( name ) ; char c = iter . first ( ) ; if ( ! isValidNameStart ( c ) ) return false ; while ( c != CharacterIterator . DONE ) { if ( ! isValidName ( c ) ) return false ; c = iter . next ( ) ; } return true ; } | Determine if the supplied name is a valid XML Name . | 100 | 13 |
33,269 | public static boolean isValidNcName ( String name ) { if ( name == null || name . length ( ) == 0 ) return false ; CharacterIterator iter = new StringCharacterIterator ( name ) ; char c = iter . first ( ) ; if ( ! isValidNcNameStart ( c ) ) return false ; while ( c != CharacterIterator . DONE ) { if ( ! isValidNcName ( c ) ) return false ; c = iter . next ( ) ; } return true ; } | Determine if the supplied name is a valid XML NCName . | 106 | 14 |
33,270 | protected final QueryEngine queryEngine ( ) { if ( queryEngine == null ) { try { engineInitLock . lock ( ) ; if ( queryEngine == null ) { QueryEngineBuilder builder = null ; if ( ! repoConfig . getIndexProviders ( ) . isEmpty ( ) ) { // There is at least one index provider ... builder = IndexQueryEngine . builder ( ) ; logger . debug ( "Queries with indexes are enabled for the '{0}' repository. Executing queries may require scanning the repository contents when the query cannot use the defined indexes." , repoConfig . getName ( ) ) ; } else { // There are no indexes ... builder = ScanningQueryEngine . builder ( ) ; logger . debug ( "Queries with no indexes are enabled for the '{0}' repository. Executing queries will always scan the repository contents." , repoConfig . getName ( ) ) ; } queryEngine = builder . using ( repoConfig , indexManager , runningState . context ( ) ) . build ( ) ; } } finally { engineInitLock . unlock ( ) ; } } return queryEngine ; } | Obtain the query engine which is created lazily and in a thread - safe manner . | 234 | 18 |
33,271 | protected void reindexIfNeeded ( boolean async , final boolean includeSystemContent ) { final ScanningRequest request = toBeScanned . drain ( ) ; if ( ! request . isEmpty ( ) ) { final RepositoryCache repoCache = runningState . repositoryCache ( ) ; scan ( async , ( ) -> { // Scan each of the workspace-path pairs ... ScanOperation op = ( workspaceName , path , writer ) -> { NodeCache workspaceCache = repoCache . getWorkspaceCache ( workspaceName ) ; if ( workspaceCache != null ) { // The workspace is still valid ... CachedNode node = workspaceCache . getNode ( workspaceCache . getRootKey ( ) ) ; if ( ! path . isRoot ( ) ) { for ( Segment segment : path ) { ChildReference child = node . getChildReferences ( workspaceCache ) . getChild ( segment ) ; if ( child == null ) { // The child no longer exists, so ignore this pair ... node = null ; break ; } node = workspaceCache . getNode ( child ) ; if ( node == null ) break ; } } if ( node != null ) { if ( logger . isDebugEnabled ( ) ) { logger . debug ( "Performing full reindexing for repository '{0}' and workspace '{1}'" , repoCache . getName ( ) , workspaceName ) ; } // If we find a node to start at, then scan the content ... // in certain cases (e.g. at startup) we have to index the system content (if it applies to // any of the indexes) boolean scanSystemContent = includeSystemContent || repoCache . getSystemWorkspaceName ( ) . equals ( workspaceName ) ; updateIndexesStatus ( workspaceName , IndexManager . IndexStatus . ENABLED , IndexManager . IndexStatus . REINDEXING ) ; if ( reindexContent ( workspaceName , workspaceCache , node , Integer . MAX_VALUE , scanSystemContent , writer ) ) { commitChanges ( workspaceName ) ; } updateIndexesStatus ( workspaceName , IndexManager . IndexStatus . REINDEXING , IndexManager . IndexStatus . ENABLED ) ; } } } ; request . onEachPathInWorkspace ( op ) ; return null ; } ) ; } } | Reindex the repository only if there is at least one provider that required scanning and reindexing . | 481 | 20 |
33,272 | protected void cleanAndReindex ( boolean async ) { final IndexWriter writer = getIndexWriter ( ) ; scan ( async , getIndexWriter ( ) , new Callable < Void > ( ) { @ SuppressWarnings ( "synthetic-access" ) @ Override public Void call ( ) throws Exception { writer . clearAllIndexes ( ) ; reindexContent ( true , writer ) ; return null ; } } ) ; } | Clean all indexes and reindex all content . | 93 | 9 |
33,273 | private void reindexContent ( boolean includeSystemContent , IndexWriter indexes ) { if ( indexes . canBeSkipped ( ) ) return ; // The node type schemata changes every time a node type is (un)registered, so get the snapshot that we'll use throughout RepositoryCache repoCache = runningState . repositoryCache ( ) ; logger . debug ( JcrI18n . reindexAll . text ( runningState . name ( ) ) ) ; if ( includeSystemContent ) { String systemWorkspaceName = repoCache . getSystemWorkspaceName ( ) ; updateIndexesStatus ( systemWorkspaceName , IndexManager . IndexStatus . ENABLED , IndexManager . IndexStatus . REINDEXING ) ; NodeCache systemWorkspaceCache = repoCache . getWorkspaceCache ( systemWorkspaceName ) ; CachedNode rootNode = systemWorkspaceCache . getNode ( repoCache . getSystemKey ( ) ) ; // Index the system content ... logger . debug ( "Starting reindex of system content in '{0}' repository." , runningState . name ( ) ) ; if ( reindexSystemContent ( rootNode , Integer . MAX_VALUE , indexes ) ) { commitChanges ( systemWorkspaceName ) ; } logger . debug ( "Completed reindex of system content in '{0}' repository." , runningState . name ( ) ) ; updateIndexesStatus ( systemWorkspaceName , IndexManager . IndexStatus . REINDEXING , IndexManager . IndexStatus . ENABLED ) ; } // Index the non-system workspaces ... for ( String workspaceName : repoCache . getWorkspaceNames ( ) ) { // change the status of the indexes to reindexing updateIndexesStatus ( workspaceName , IndexManager . IndexStatus . ENABLED , IndexManager . IndexStatus . REINDEXING ) ; NodeCache workspaceCache = repoCache . getWorkspaceCache ( workspaceName ) ; CachedNode rootNode = workspaceCache . getNode ( workspaceCache . getRootKey ( ) ) ; logger . debug ( "Starting reindex of workspace '{0}' content in '{1}' repository." , runningState . name ( ) , workspaceName ) ; if ( reindexContent ( workspaceName , workspaceCache , rootNode , Integer . MAX_VALUE , false , indexes ) ) { commitChanges ( workspaceName ) ; } logger . debug ( "Completed reindex of workspace '{0}' content in '{1}' repository." , runningState . name ( ) , workspaceName ) ; updateIndexesStatus ( workspaceName , IndexManager . IndexStatus . REINDEXING , IndexManager . IndexStatus . ENABLED ) ; } } | Crawl and index all of the repository content . | 571 | 10 |
33,274 | public void reindexContent ( JcrWorkspace workspace , Path path , int depth ) { if ( getIndexWriter ( ) . canBeSkipped ( ) ) { // There's no indexes that require updating ... return ; } CheckArg . isPositive ( depth , "depth" ) ; JcrSession session = workspace . getSession ( ) ; NodeCache cache = session . cache ( ) . getWorkspace ( ) ; String workspaceName = workspace . getName ( ) ; // Look for the node ... CachedNode node = cache . getNode ( cache . getRootKey ( ) ) ; for ( Segment segment : path ) { // Look for the child by name ... ChildReference ref = node . getChildReferences ( cache ) . getChild ( segment ) ; if ( ref == null ) return ; node = cache . getNode ( ref ) ; } updateIndexesStatus ( workspaceName , IndexManager . IndexStatus . ENABLED , IndexManager . IndexStatus . REINDEXING ) ; // If the node is in the system workspace ... RepositoryCache repoCache = runningState . repositoryCache ( ) ; String systemWorkspaceName = repoCache . getSystemWorkspaceName ( ) ; String systemWorkspaceKey = repoCache . getSystemWorkspaceKey ( ) ; if ( node . getKey ( ) . getWorkspaceKey ( ) . equals ( systemWorkspaceKey ) ) { if ( reindexSystemContent ( node , depth , getIndexWriter ( ) ) ) { commitChanges ( systemWorkspaceName ) ; } } else { // It's just a regular node in the workspace ... if ( reindexContent ( workspaceName , cache , node , depth , path . isRoot ( ) , getIndexWriter ( ) ) ) { commitChanges ( workspaceName ) ; } } updateIndexesStatus ( workspaceName , IndexManager . IndexStatus . REINDEXING , IndexManager . IndexStatus . ENABLED ) ; } | Crawl and index the content starting at the supplied path in the named workspace to the designated depth . | 408 | 20 |
33,275 | public Future < Boolean > reindexContentAsync ( final JcrWorkspace workspace ) { return indexingExecutorService . submit ( ( ) -> { reindexContent ( workspace ) ; return Boolean . TRUE ; } ) ; } | Asynchronously crawl and index the content in the named workspace . | 47 | 13 |
33,276 | public Future < Boolean > reindexContentAsync ( final JcrWorkspace workspace , final Path path , final int depth ) { return indexingExecutorService . submit ( ( ) -> { reindexContent ( workspace , path , depth ) ; return Boolean . TRUE ; } ) ; } | Asynchronously crawl and index the content starting at the supplied path in the named workspace to the designated depth . | 59 | 22 |
33,277 | public void register ( Map < String , String > namespaceUrisByPrefix ) { if ( namespaceUrisByPrefix == null || namespaceUrisByPrefix . isEmpty ( ) ) return ; final Lock lock = this . namespacesLock . writeLock ( ) ; try { lock . lock ( ) ; SystemContent systemContent = systemContent ( false ) ; systemContent . registerNamespaces ( namespaceUrisByPrefix ) ; systemContent . save ( ) ; for ( Map . Entry < String , String > entry : namespaceUrisByPrefix . entrySet ( ) ) { String prefix = entry . getKey ( ) . trim ( ) ; String uri = entry . getValue ( ) . trim ( ) ; if ( prefix . length ( ) == 0 ) continue ; this . cache . register ( prefix , uri ) ; } } finally { lock . unlock ( ) ; } } | Register a set of namespaces . | 189 | 7 |
33,278 | public static ReferrerCounts create ( Map < NodeKey , Integer > strongCountsByReferrerKey , Map < NodeKey , Integer > weakCountsByReferrerKey ) { if ( strongCountsByReferrerKey == null ) strongCountsByReferrerKey = EMPTY_COUNTS ; if ( weakCountsByReferrerKey == null ) weakCountsByReferrerKey = EMPTY_COUNTS ; if ( strongCountsByReferrerKey . isEmpty ( ) && weakCountsByReferrerKey . isEmpty ( ) ) return null ; return new ReferrerCounts ( strongCountsByReferrerKey , weakCountsByReferrerKey ) ; } | Create a new instance of the snapshot . | 149 | 8 |
33,279 | protected void process ( XSDSchema schema , String encoding , long contentSize , Node rootNode ) throws Exception { assert schema != null ; logger . debug ( "Target namespace: '{0}'" , schema . getTargetNamespace ( ) ) ; rootNode . setProperty ( SrampLexicon . CONTENT_TYPE , MimeTypeConstants . APPLICATION_XML ) ; if ( encoding != null ) { rootNode . setProperty ( SrampLexicon . CONTENT_ENCODING , encoding ) ; } rootNode . setProperty ( SrampLexicon . CONTENT_SIZE , contentSize ) ; // Parse the annotations first to aggregate them all into a single 'sramp:description' property ... @ SuppressWarnings ( "unchecked" ) List < XSDAnnotation > annotations = schema . getAnnotations ( ) ; processAnnotations ( annotations , rootNode ) ; processNonSchemaAttributes ( schema , rootNode , Collections . < String > emptySet ( ) ) ; // Parse the objects ... for ( EObject obj : schema . eContents ( ) ) { if ( obj instanceof XSDSimpleTypeDefinition ) { processSimpleTypeDefinition ( ( XSDSimpleTypeDefinition ) obj , rootNode ) ; } else if ( obj instanceof XSDComplexTypeDefinition ) { processComplexTypeDefinition ( ( XSDComplexTypeDefinition ) obj , rootNode ) ; } else if ( obj instanceof XSDElementDeclaration ) { processElementDeclaration ( ( XSDElementDeclaration ) obj , rootNode ) ; } else if ( obj instanceof XSDAttributeDeclaration ) { processAttributeDeclaration ( ( XSDAttributeDeclaration ) obj , rootNode , false ) ; } else if ( obj instanceof XSDImport ) { processImport ( ( XSDImport ) obj , rootNode ) ; } else if ( obj instanceof XSDInclude ) { processInclude ( ( XSDInclude ) obj , rootNode ) ; } else if ( obj instanceof XSDRedefine ) { processRedefine ( ( XSDRedefine ) obj , rootNode ) ; } else if ( obj instanceof XSDAttributeGroupDefinition ) { processAttributeGroupDefinition ( ( XSDAttributeGroupDefinition ) obj , rootNode ) ; } else if ( obj instanceof XSDAnnotation ) { // already processed above ... } } // Resolve any outstanding, unresolved references ... resolveReferences ( ) ; } | Read an XSDSchema instance and create the node hierarchy under the given root node . | 535 | 19 |
33,280 | public void externalNodeRemoved ( String externalNodeKey ) { if ( this . snapshot . get ( ) . containsProjectionForExternalNode ( externalNodeKey ) ) { // the external node was the root of a projection, so we need to remove that projection synchronized ( this ) { Snapshot current = this . snapshot . get ( ) ; Snapshot updated = current . withoutProjection ( externalNodeKey ) ; if ( current != updated ) { this . snapshot . compareAndSet ( current , updated ) ; } } } } | Signals that an external node with the given key has been removed . | 109 | 14 |
33,281 | public void internalNodeRemoved ( String internalNodeKey ) { if ( this . snapshot . get ( ) . containsProjectionForInternalNode ( internalNodeKey ) ) { // identify all the projections which from this internal (aka. federated node) and remove them synchronized ( this ) { Snapshot current = this . snapshot . get ( ) ; Snapshot updated = current ; for ( Projection projection : current . getProjections ( ) ) { if ( internalNodeKey . equalsIgnoreCase ( projection . getProjectedNodeKey ( ) ) ) { String externalNodeKey = projection . getExternalNodeKey ( ) ; removeStoredProjection ( externalNodeKey ) ; updated = updated . withoutProjection ( externalNodeKey ) ; } } if ( current != updated ) { this . snapshot . compareAndSet ( current , updated ) ; } } } } | Signals that an internal node with the given key has been removed . | 179 | 14 |
33,282 | public Connector getConnectorForSourceName ( String sourceName ) { assert sourceName != null ; return this . snapshot . get ( ) . getConnectorWithSourceKey ( NodeKey . keyForSourceName ( sourceName ) ) ; } | Returns a connector which was registered for the given source name . | 49 | 12 |
33,283 | public DocumentTranslator getDocumentTranslator ( ) { if ( translator == null ) { // We don't want the connectors to use a translator that converts large strings to binary values that are // managed within ModeShape's binary store. Instead, all of the connector-created string property values // should be kept as strings ... translator = repository . repositoryCache ( ) . getDocumentTranslator ( ) . withLargeStringSize ( Long . MAX_VALUE ) ; } return translator ; } | Returns the repository s document translator . | 98 | 7 |
33,284 | void start ( ScheduledExecutorService service ) { if ( rollupFuture . get ( ) != null ) { // already started ... return ; } // Pre-populate the metrics (overwriting any existing history object) ... durations . put ( DurationMetric . QUERY_EXECUTION_TIME , new DurationHistory ( TimeUnit . MILLISECONDS , MAXIMUM_LONG_RUNNING_QUERY_COUNT ) ) ; durations . put ( DurationMetric . SEQUENCER_EXECUTION_TIME , new DurationHistory ( TimeUnit . MILLISECONDS , MAXIMUM_LONG_RUNNING_SEQUENCING_COUNT ) ) ; durations . put ( DurationMetric . SESSION_LIFETIME , new DurationHistory ( TimeUnit . MILLISECONDS , MAXIMUM_LONG_RUNNING_SESSION_COUNT ) ) ; for ( ValueMetric metric : EnumSet . allOf ( ValueMetric . class ) ) { boolean resetUponRollup = ! metric . isContinuous ( ) ; values . put ( metric , new ValueHistory ( resetUponRollup ) ) ; } // Initialize the start times in a threadsafe manner ... DateTime now = timeFactory . create ( ) ; this . weeksStartTime . compareAndSet ( null , now ) ; this . daysStartTime . compareAndSet ( null , now ) ; this . hoursStartTime . compareAndSet ( null , now ) ; this . minutesStartTime . compareAndSet ( null , now ) ; this . secondsStartTime . compareAndSet ( null , now ) ; rollup ( ) ; // Then schedule the rollup to be done at a fixed rate ... this . rollupFuture . set ( service . scheduleAtFixedRate ( new Runnable ( ) { @ SuppressWarnings ( "synthetic-access" ) @ Override public void run ( ) { rollup ( ) ; } } , CAPTURE_DELAY_IN_SECONDS , CAPTURE_INTERVAL_IN_SECONDS , TimeUnit . SECONDS ) ) ; } | Start recording statistics . | 463 | 4 |
33,285 | void stop ( ) { ScheduledFuture < ? > future = this . rollupFuture . getAndSet ( null ) ; if ( future != null && ! future . isDone ( ) && ! future . isCancelled ( ) ) { // Stop running the scheduled job, letting any currently running rollup finish ... future . cancel ( false ) ; } } | Stop recording statistics . | 75 | 4 |
33,286 | @ SuppressWarnings ( "fallthrough" ) private void rollup ( ) { DateTime now = timeFactory . create ( ) ; Window largest = null ; for ( DurationHistory history : durations . values ( ) ) { largest = history . rollup ( ) ; } for ( ValueHistory history : values . values ( ) ) { largest = history . rollup ( ) ; } if ( largest == null ) return ; // Note that we do expect to fall through, as the 'largest' represents the largest window that was changed, // while all smaller windows were also changed ... switch ( largest ) { case PREVIOUS_52_WEEKS : this . weeksStartTime . set ( now ) ; // fall through!! case PREVIOUS_7_DAYS : this . daysStartTime . set ( now ) ; // fall through!! case PREVIOUS_24_HOURS : this . hoursStartTime . set ( now ) ; // fall through!! case PREVIOUS_60_MINUTES : this . minutesStartTime . set ( now ) ; // fall through!! case PREVIOUS_60_SECONDS : this . secondsStartTime . set ( now ) ; } } | Method called once every second by the scheduled job . | 253 | 10 |
33,287 | public void increment ( ValueMetric metric , long incrementalValue ) { assert metric != null ; ValueHistory history = values . get ( metric ) ; if ( history != null ) history . recordIncrement ( incrementalValue ) ; } | Record an incremental change to a value called by the code that knows when and how the metric changes . | 47 | 20 |
33,288 | public void set ( ValueMetric metric , long value ) { assert metric != null ; ValueHistory history = values . get ( metric ) ; if ( history != null ) history . recordNewValue ( value ) ; } | Record a specific value for a metric called by the code that knows when and how the metric changes . | 45 | 20 |
33,289 | void recordDuration ( DurationMetric metric , long duration , TimeUnit timeUnit , Map < String , String > payload ) { assert metric != null ; DurationHistory history = durations . get ( metric ) ; if ( history != null ) history . recordDuration ( duration , timeUnit , payload ) ; } | Record a new duration for the given metric called by the code that knows about the duration . | 63 | 18 |
33,290 | public static Statistics statisticsFor ( long [ ] values ) { int length = values . length ; if ( length == 0 ) return EMPTY_STATISTICS ; if ( length == 1 ) return statisticsFor ( values [ 0 ] ) ; long total = 0L ; long max = Long . MIN_VALUE ; long min = Long . MAX_VALUE ; for ( long value : values ) { total += value ; max = Math . max ( max , value ) ; min = Math . min ( min , value ) ; } double mean = ( ( double ) total ) / length ; double varianceSquared = 0.0d ; double distance = 0.0d ; for ( long value : values ) { distance = mean - value ; varianceSquared = varianceSquared + ( distance * distance ) ; } return new StatisticsImpl ( length , min , max , mean , Math . sqrt ( varianceSquared ) ) ; } | Utility method to construct the statistics for a series of values . | 192 | 13 |
33,291 | public static Statistics statisticsFor ( Statistics [ ] statistics ) { int length = statistics . length ; if ( length == 0 ) return EMPTY_STATISTICS ; if ( length == 1 ) return statistics [ 0 ] != null ? statistics [ 0 ] : EMPTY_STATISTICS ; int count = 0 ; long max = Long . MIN_VALUE ; long min = Long . MAX_VALUE ; double mean = 0.0d ; double variance = 0.0d ; // Compute the min, max, and mean ... for ( Statistics stat : statistics ) { if ( stat == null ) continue ; count += stat . getCount ( ) ; max = Math . max ( max , stat . getMaximum ( ) ) ; min = Math . min ( min , stat . getMinimum ( ) ) ; mean = mean + ( stat . getMean ( ) * stat . getCount ( ) ) ; } mean = mean / count ; // Compute the new variance using the new mean ... double meanDelta = 0.0d ; for ( Statistics stat : statistics ) { if ( stat == null ) continue ; meanDelta = stat . getMean ( ) - mean ; variance = variance + ( stat . getCount ( ) * ( stat . getVariance ( ) + ( meanDelta * meanDelta ) ) ) ; } return new StatisticsImpl ( count , min , max , mean , variance ) ; } | Utility method to construct the composite statistics for a series of sampled statistics . | 292 | 15 |
33,292 | public void setRecordsAmount ( int amount ) { int ipp = Integer . parseInt ( itemsPerPageEditor . getValueAsString ( ) ) ; pageTotal = amount % ipp == 0 ? amount / ipp : amount / ipp + 1 ; draw ( 0 ) ; } | Assigns total number of records . | 61 | 8 |
33,293 | public List < ParsingResult > parseUsing ( final String ddl , final String firstParserId , final String secondParserId , final String ... additionalParserIds ) throws ParsingException { CheckArg . isNotEmpty ( firstParserId , "firstParserId" ) ; CheckArg . isNotEmpty ( secondParserId , "secondParserId" ) ; if ( additionalParserIds != null ) { CheckArg . containsNoNulls ( additionalParserIds , "additionalParserIds" ) ; } final int numParsers = ( ( additionalParserIds == null ) ? 2 : ( additionalParserIds . length + 2 ) ) ; final List < DdlParser > selectedParsers = new ArrayList < DdlParser > ( numParsers ) ; { // add first parser final DdlParser parser = getParser ( firstParserId ) ; if ( parser == null ) { throw new ParsingException ( Position . EMPTY_CONTENT_POSITION , DdlSequencerI18n . unknownParser . text ( firstParserId ) ) ; } selectedParsers . add ( parser ) ; } { // add second parser final DdlParser parser = getParser ( secondParserId ) ; if ( parser == null ) { throw new ParsingException ( Position . EMPTY_CONTENT_POSITION , DdlSequencerI18n . unknownParser . text ( secondParserId ) ) ; } selectedParsers . add ( parser ) ; } // add remaining parsers if ( ( additionalParserIds != null ) && ( additionalParserIds . length != 0 ) ) { for ( final String id : additionalParserIds ) { final DdlParser parser = getParser ( id ) ; if ( parser == null ) { throw new ParsingException ( Position . EMPTY_CONTENT_POSITION , DdlSequencerI18n . unknownParser . text ( id ) ) ; } selectedParsers . add ( parser ) ; } } return parseUsing ( ddl , selectedParsers ) ; } | Parse the supplied DDL using multiple parsers returning the result of each parser with its score in the order of highest scoring to lowest scoring . | 435 | 29 |
33,294 | public ResolvedRequest withPath ( String path ) { assert repositoryName != null ; assert workspaceName != null ; return new ResolvedRequest ( request , repositoryName , workspaceName , path ) ; } | Create a new request that is similar to this request except with the supplied path . This can only be done if the repository name and workspace name are non - null | 41 | 32 |
33,295 | public String findJcrName ( String cmisName ) { for ( int i = 0 ; i < list . size ( ) ; i ++ ) { if ( list . get ( i ) . cmisName != null && list . get ( i ) . cmisName . equals ( cmisName ) ) { return list . get ( i ) . jcrName ; } } return cmisName ; } | Determines the name of the given property from cmis domain in jcr domain . | 86 | 18 |
33,296 | public String findCmisName ( String jcrName ) { for ( int i = 0 ; i < list . size ( ) ; i ++ ) { if ( list . get ( i ) . jcrName != null && list . get ( i ) . jcrName . equals ( jcrName ) ) { return list . get ( i ) . cmisName ; } } return jcrName ; } | Determines the name of the given property from jcr domain in cmis domain . | 86 | 18 |
33,297 | public int getJcrType ( PropertyType propertyType ) { switch ( propertyType ) { case BOOLEAN : return javax . jcr . PropertyType . BOOLEAN ; case DATETIME : return javax . jcr . PropertyType . DATE ; case DECIMAL : return javax . jcr . PropertyType . DECIMAL ; case HTML : return javax . jcr . PropertyType . STRING ; case INTEGER : return javax . jcr . PropertyType . LONG ; case URI : return javax . jcr . PropertyType . URI ; case ID : return javax . jcr . PropertyType . STRING ; default : return javax . jcr . PropertyType . UNDEFINED ; } } | Converts type of property . | 169 | 6 |
33,298 | public Object [ ] jcrValues ( Property < ? > property ) { @ SuppressWarnings ( "unchecked" ) List < Object > values = ( List < Object > ) property . getValues ( ) ; // convert CMIS values to JCR values switch ( property . getType ( ) ) { case STRING : return asStrings ( values ) ; case BOOLEAN : return asBooleans ( values ) ; case DECIMAL : return asDecimals ( values ) ; case INTEGER : return asIntegers ( values ) ; case DATETIME : return asDateTime ( values ) ; case URI : return asURI ( values ) ; case ID : return asIDs ( values ) ; case HTML : return asHTMLs ( values ) ; default : return null ; } } | Converts value of the property for the jcr domain . | 170 | 12 |
33,299 | private Boolean [ ] asBooleans ( List < Object > values ) { ValueFactory < Boolean > factory = valueFactories . getBooleanFactory ( ) ; Boolean [ ] res = new Boolean [ values . size ( ) ] ; for ( int i = 0 ; i < res . length ; i ++ ) { res [ i ] = factory . create ( values . get ( i ) ) ; } return res ; } | Converts CMIS value of boolean type into JCR value of boolean type . | 87 | 16 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.