idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
17,100 | protected ServiceTracker < IExecutors , ? > getExecutorsServiceTracker ( BundleContext bundleContext ) throws InvalidSyntaxException { ServiceTracker < IExecutors , ? > tracker = new ServiceTracker < IExecutors , Object > ( bundleContext , bundleContext . createFilter ( "(&(" + Constants . OBJECTCLASS + "=" + IExecutors . class . getName ( ) + ")(name=" + getServletBundleName ( ) + "))" ) , null ) ; tracker . open ( ) ; return tracker ; } | Returns an opened ServiceTracker for the Aggregator exectors provider . The executors provider is are created by the bundle activator and is shared by all Aggregator instances created from the same bundle . |
17,101 | public String createCacheBundle ( String bundleSymbolicName , String bundleFileName ) throws IOException { final String sourceMethod = "createCacheBundle" ; final boolean isTraceLogging = log . isLoggable ( Level . FINER ) ; if ( isTraceLogging ) { log . entering ( sourceClass , sourceMethod , new Object [ ] { bundleSymbolicName , bundleFileName } ) ; } getCacheManager ( ) . serializeCache ( ) ; File controlFile = new File ( getWorkingDirectory ( ) , CacheControl . CONTROL_SERIALIZATION_FILENAME ) ; CacheControl control = null ; ObjectInputStream ois = new ObjectInputStream ( new FileInputStream ( controlFile ) ) ; ; try { control = ( CacheControl ) ois . readObject ( ) ; } catch ( Exception ex ) { throw new IOException ( ex ) ; } finally { IOUtils . closeQuietly ( ois ) ; } if ( control . getInitStamp ( ) != 0 ) { return Messages . AggregatorImpl_3 ; } InputStream is = AggregatorImpl . class . getClassLoader ( ) . getResourceAsStream ( MANIFEST_TEMPLATE ) ; StringWriter writer = new StringWriter ( ) ; CopyUtil . copy ( is , writer ) ; String template = writer . toString ( ) ; String manifest = MessageFormat . format ( template , new Object [ ] { Long . toString ( new Date ( ) . getTime ( ) ) , getContributingBundle ( ) . getHeaders ( ) . get ( "Bundle-Version" ) , bundleSymbolicName , AggregatorUtil . getCacheBust ( this ) } ) ; File bundleFile = new File ( bundleFileName ) ; ZipUtil . Packer packer = new ZipUtil . Packer ( ) ; packer . open ( bundleFile ) ; try { packer . packEntryFromStream ( "META-INF/MANIFEST.MF" , new ByteArrayInputStream ( manifest . getBytes ( "UTF-8" ) ) , new Date ( ) . getTime ( ) ) ; packer . packDirectory ( getWorkingDirectory ( ) , JAGGR_CACHE_DIRECTORY ) ; } finally { packer . close ( ) ; } String result = bundleFile . getCanonicalPath ( ) ; if ( isTraceLogging ) { log . exiting ( sourceClass , sourceMethod , result ) ; } return result ; } | Command handler to create a cache primer bundle containing the contents of the cache directory . |
17,102 | public String processRequestUrl ( String requestUrl ) throws IOException , ServletException { ConsoleHttpServletRequest req = new ConsoleHttpServletRequest ( getServletConfig ( ) . getServletContext ( ) , requestUrl ) ; OutputStream nulOutputStream = new OutputStream ( ) { public void write ( int b ) throws IOException { } } ; ConsoleHttpServletResponse resp = new ConsoleHttpServletResponse ( nulOutputStream ) ; doGet ( req , resp ) ; return Integer . toString ( resp . getStatus ( ) ) ; } | Implementation of eponymous console command . Provided to allow cache priming requests to be issued via the server console by automation scripts . |
17,103 | public void add ( VMTransitionBuilder b ) { Map < VMState , VMTransitionBuilder > m = vmAMB2 . get ( b . getDestinationState ( ) ) ; for ( VMState src : b . getSourceStates ( ) ) { m . put ( src , b ) ; } } | Add a builder for a VM . Every builder that supports the same transition will be replaced . |
17,104 | public boolean remove ( VMTransitionBuilder b ) { Map < VMState , VMTransitionBuilder > m = vmAMB2 . get ( b . getDestinationState ( ) ) ; for ( VMState src : b . getSourceStates ( ) ) { m . remove ( src ) ; } return true ; } | Remove a builder for an action on a VM . |
17,105 | public VMTransitionBuilder getBuilder ( VMState srcState , VMState dstState ) { Map < VMState , VMTransitionBuilder > dstCompliant = vmAMB2 . get ( dstState ) ; if ( dstCompliant == null ) { return null ; } return dstCompliant . get ( srcState ) ; } | Get the model builder for a given transition |
17,106 | public static TransitionFactory newBundle ( ) { TransitionFactory f = new TransitionFactory ( ) ; f . add ( new BootVM . Builder ( ) ) ; f . add ( new ShutdownVM . Builder ( ) ) ; f . add ( new SuspendVM . Builder ( ) ) ; f . add ( new ResumeVM . Builder ( ) ) ; f . add ( new KillVM . Builder ( ) ) ; f . add ( new RelocatableVM . Builder ( ) ) ; f . add ( new ForgeVM . Builder ( ) ) ; f . add ( new StayAwayVM . BuilderReady ( ) ) ; f . add ( new StayAwayVM . BuilderSleeping ( ) ) ; f . add ( new StayAwayVM . BuilderInit ( ) ) ; f . add ( new BootableNode . Builder ( ) ) ; f . add ( new ShutdownableNode . Builder ( ) ) ; return f ; } | a new factory that embeds the default builders . |
17,107 | public int getMaxBW ( Node n1 , Node n2 ) { int max = Integer . MAX_VALUE ; for ( Link inf : getPath ( n1 , n2 ) ) { if ( inf . getCapacity ( ) < max ) { max = inf . getCapacity ( ) ; } Switch sw = inf . getSwitch ( ) ; if ( sw . getCapacity ( ) >= 0 && sw . getCapacity ( ) < max ) { max = sw . getCapacity ( ) ; } } return max ; } | Get the maximal bandwidth available between two nodes . |
17,108 | protected List < Link > getFirstPhysicalPath ( List < Link > currentPath , Switch sw , Node dst ) { for ( Link l : net . getConnectedLinks ( sw ) ) { if ( currentPath . contains ( l ) ) { continue ; } currentPath . add ( l ) ; if ( l . getElement ( ) instanceof Node ) { if ( l . getElement ( ) . equals ( dst ) ) { return currentPath ; } } else { List < Link > recall = getFirstPhysicalPath ( currentPath , l . getSwitch ( ) . equals ( sw ) ? ( Switch ) l . getElement ( ) : l . getSwitch ( ) , dst ) ; if ( ! recall . isEmpty ( ) ) { return recall ; } } currentPath . remove ( currentPath . size ( ) - 1 ) ; } return Collections . emptyList ( ) ; } | Recursive method to get the first physical path found from a switch to a destination node . |
17,109 | private int loadSlack ( int dim , int bin ) { return p . loads [ dim ] [ bin ] . getUB ( ) - p . loads [ dim ] [ bin ] . getLB ( ) ; } | compute the load slack of a bin |
17,110 | private void validate ( ) { if ( ( this . segments . length < 0 ) && ( ! this . snapshot ) ) { throw new IllegalArgumentException ( "segments.length:" + Integer . valueOf ( this . segments . length ) ) ; } for ( int i = 0 ; i < this . segments . length ; i ++ ) { if ( this . segments [ i ] < 0 ) { throw new IllegalArgumentException ( "segments[" + i + "]:" + Integer . valueOf ( this . segments [ i ] ) ) ; } } if ( this . phaseNumber != null ) { if ( this . phaseNumber . intValue ( ) < 0 ) { throw new IllegalArgumentException ( "phaseNumber:" + this . phaseNumber ) ; } if ( this . phase == null ) { throw new IllegalArgumentException ( "phaseNumber (phase==null):" + this . phaseNumber ) ; } if ( ( this . phase == DevelopmentPhase . RELEASE ) && ( this . phaseNumber . intValue ( ) != 0 ) ) { throw new IllegalArgumentException ( "phaseNumber (phase==RELEASE):" + this . phaseNumber ) ; } } } | This method validates the consistency of this version identifier . |
17,111 | public static Integer computeGrammarSize ( GrammarRules rules , Integer paaSize ) { int res = 0 ; for ( GrammarRuleRecord r : rules ) { String ruleStr = r . getRuleString ( ) ; String [ ] tokens = ruleStr . split ( "\\s+" ) ; int ruleSize = computeRuleSize ( paaSize , tokens ) ; res += ruleSize ; } return res ; } | Computes the size of a normal i . e . unpruned grammar . |
17,112 | public static GrammarRules performPruning ( double [ ] ts , GrammarRules grammarRules ) { RulePruningAlgorithm pruner = new RulePruningAlgorithm ( grammarRules , ts . length ) ; pruner . pruneRules ( ) ; return pruner . regularizePrunedRules ( ) ; } | Performs pruning . |
17,113 | public static double computeCover ( boolean [ ] cover ) { int covered = 0 ; for ( boolean i : cover ) { if ( i ) { covered ++ ; } } return ( double ) covered / ( double ) cover . length ; } | Compute the covered percentage . |
17,114 | private boolean compatible ( int i , int j , Set < Integer > T [ ] [ ] ) { if ( s2comp [ i ] != s2comp [ j ] ) return false ; for ( int k = 0 ; k < nbLabels ; k ++ ) { if ( ! T [ i ] [ k ] . equals ( T [ j ] [ k ] ) ) return false ; } return true ; } | transition in the same components |
17,115 | public boolean register ( E e , String name ) { if ( resolve . containsKey ( name ) ) { return false ; } resolve . put ( name , e ) ; rev . put ( e , name ) ; return true ; } | Register the name of an element . |
17,116 | public boolean substituteVM ( VM curId , VM nextId ) { if ( VM . TYPE . equals ( elemId ) ) { if ( rev . containsKey ( nextId ) ) { return false ; } String fqn = rev . remove ( curId ) ; if ( fqn != null ) { rev . put ( ( E ) nextId , fqn ) ; resolve . put ( fqn , ( E ) nextId ) ; } } return true ; } | Re - associate the name of a registered VM to a new VM . |
17,117 | @ SuppressWarnings ( "unchecked" ) public static NamingService < VM > getVMNames ( Model mo ) { return ( NamingService < VM > ) mo . getView ( ID + VM . TYPE ) ; } | Get the naming service for the VMs associated to a model . |
17,118 | @ SuppressWarnings ( "unchecked" ) public static NamingService < Node > getNodeNames ( Model mo ) { return ( NamingService < Node > ) mo . getView ( ID + Node . TYPE ) ; } | Get the naming service for the nodes associated to a model . |
17,119 | public boolean applyEvents ( Hook k , Model i ) { for ( Event n : getEvents ( k ) ) { if ( ! n . apply ( i ) ) { return false ; } } return true ; } | Apply the events attached to a given hook . |
17,120 | public boolean addEvent ( Hook k , Event n ) { Set < Event > l = events . get ( k ) ; if ( l == null ) { l = new HashSet < > ( ) ; events . put ( k , l ) ; } return l . add ( n ) ; } | Add an event to the action . The moment the event will be executed depends on its hook . |
17,121 | public Set < Event > getEvents ( Hook k ) { Set < Event > l = events . get ( k ) ; return l == null ? Collections . emptySet ( ) : l ; } | Get the events attached to a specific hook . |
17,122 | public void addDim ( int c , int [ ] cUse , IntVar [ ] dUse , int [ ] alias ) { capacities . add ( c ) ; cUsages . add ( cUse ) ; dUsages . add ( dUse ) ; aliases . add ( alias ) ; } | Add a constraint |
17,123 | public boolean beforeSolve ( ReconfigurationProblem r ) { super . beforeSolve ( r ) ; for ( int i = 0 ; i < aliases . size ( ) ; i ++ ) { int capa = capacities . get ( i ) ; int [ ] alias = aliases . get ( i ) ; int [ ] cUse = cUsages . get ( i ) ; int [ ] dUses = new int [ dUsages . get ( i ) . length ] ; for ( IntVar dUseDim : dUsages . get ( i ) ) { dUses [ i ++ ] = dUseDim . getLB ( ) ; } r . getModel ( ) . post ( new AliasedCumulatives ( alias , new int [ ] { capa } , cHosts , new int [ ] [ ] { cUse } , cEnds , dHosts , new int [ ] [ ] { dUses } , dStarts , associations ) ) ; } return true ; } | Get the generated constraints . |
17,124 | public static List < ChocoView > resolveDependencies ( Model mo , List < ChocoView > views , Collection < String > base ) throws SchedulerException { Set < String > done = new HashSet < > ( base ) ; List < ChocoView > remaining = new ArrayList < > ( views ) ; List < ChocoView > solved = new ArrayList < > ( ) ; while ( ! remaining . isEmpty ( ) ) { ListIterator < ChocoView > ite = remaining . listIterator ( ) ; boolean blocked = true ; while ( ite . hasNext ( ) ) { ChocoView s = ite . next ( ) ; if ( done . containsAll ( s . getDependencies ( ) ) ) { ite . remove ( ) ; done . add ( s . getIdentifier ( ) ) ; solved . add ( s ) ; blocked = false ; } } if ( blocked ) { throw new SchedulerModelingException ( mo , "Missing dependencies or cyclic dependencies prevent from using: " + remaining ) ; } } return solved ; } | Flatten the views while considering their dependencies . Operations over the views that respect the iteration order satisfies the dependencies . |
17,125 | public static List < RuleInterval > getZeroIntervals ( int [ ] coverageArray ) { ArrayList < RuleInterval > res = new ArrayList < RuleInterval > ( ) ; int start = - 1 ; boolean inInterval = false ; int intervalsCounter = - 1 ; for ( int i = 0 ; i < coverageArray . length ; i ++ ) { if ( 0 == coverageArray [ i ] && ! inInterval ) { start = i ; inInterval = true ; } if ( coverageArray [ i ] > 0 && inInterval ) { res . add ( new RuleInterval ( intervalsCounter , start , i , 0 ) ) ; inInterval = false ; intervalsCounter -- ; } } if ( inInterval ) { res . add ( new RuleInterval ( intervalsCounter , start , coverageArray . length , 0 ) ) ; } return res ; } | Run a quick scan along the time series coverage to find a zeroed intervals . |
17,126 | public static double getCoverAsFraction ( int seriesLength , GrammarRules rules ) { boolean [ ] coverageArray = new boolean [ seriesLength ] ; for ( GrammarRuleRecord rule : rules ) { if ( 0 == rule . ruleNumber ( ) ) { continue ; } ArrayList < RuleInterval > arrPos = rule . getRuleIntervals ( ) ; for ( RuleInterval saxPos : arrPos ) { int startPos = saxPos . getStart ( ) ; int endPos = saxPos . getEnd ( ) ; for ( int j = startPos ; j < endPos ; j ++ ) { coverageArray [ j ] = true ; } } } int coverSum = 0 ; for ( int i = 0 ; i < seriesLength ; i ++ ) { if ( coverageArray [ i ] ) { coverSum ++ ; } } return ( double ) coverSum / ( double ) seriesLength ; } | Computes which fraction of the time series is covered by the rules set . |
17,127 | public static double getMeanRuleCoverage ( int length , GrammarRules rules ) { int [ ] coverageArray = new int [ length ] ; for ( GrammarRuleRecord rule : rules ) { if ( 0 == rule . ruleNumber ( ) ) { continue ; } ArrayList < RuleInterval > arrPos = rule . getRuleIntervals ( ) ; for ( RuleInterval saxPos : arrPos ) { int startPos = saxPos . getStart ( ) ; int endPos = saxPos . getEnd ( ) ; for ( int j = startPos ; j < endPos ; j ++ ) { coverageArray [ j ] = coverageArray [ j ] + 1 ; } } } int coverageSum = 0 ; for ( int i : coverageArray ) { coverageSum += i ; } return ( double ) coverageSum / ( double ) length ; } | Gets the mean rule coverage . |
17,128 | public void removeLayerFromCache ( LayerImpl layer ) { if ( layer . getId ( ) != layerId ) { throw new IllegalStateException ( ) ; } LayerCacheImpl layerCache = layerCacheRef . get ( ) ; if ( layerCache != null ) { layerCache . remove ( layer . getKey ( ) , layer ) ; } } | Convenience method to remove the layer that this class is associated with from the layer cache . |
17,129 | public final V put ( K key , V value ) { BinTreeNode < K , V > prev = null ; BinTreeNode < K , V > node = root ; int key_hash_code = key . hashCode ( ) ; while ( node != null ) { prev = node ; if ( key_hash_code < node . keyHashCode ) { node = node . left ; } else { if ( ( key_hash_code > node . keyHashCode ) || ! node . key . equals ( key ) ) { node = node . right ; } else { cachedHashCode -= node . hashCode ( ) ; V temp = node . value ; node . value = value ; cachedHashCode += node . hashCode ( ) ; return temp ; } } } size ++ ; BinTreeNode < K , V > new_node = new BinTreeNode < K , V > ( key , value ) ; cachedHashCode += new_node . hashCode ( ) ; if ( prev == null ) { root = new_node ; return null ; } if ( key_hash_code < prev . keyHashCode ) { prev . left = new_node ; } else { prev . right = new_node ; } return null ; } | Associates the specified value with the specified key in this map . |
17,130 | private final V remove_semi_leaf ( BinTreeNode < K , V > node , BinTreeNode < K , V > prev , int son , BinTreeNode < K , V > m ) { if ( prev == null ) { root = m ; } else { if ( son == 0 ) prev . left = m ; else prev . right = m ; } return node . value ; } | to the only predecessor of node that could exist . |
17,131 | private final V finish_removal ( BinTreeNode < K , V > node , BinTreeNode < K , V > prev , int son , BinTreeNode < K , V > m ) { if ( m != null ) { m . left = node . left ; m . right = node . right ; } if ( prev == null ) root = m ; else { if ( son == 0 ) prev . left = m ; else prev . right = m ; } return node . value ; } | BinTreeNode that should replace node in the tree . |
17,132 | private final BinTreeNode < K , V > extract_next ( BinTreeNode < K , V > node ) { BinTreeNode < K , V > prev = node . right ; BinTreeNode < K , V > curr = prev . left ; if ( curr == null ) { node . right = node . right . right ; return prev ; } while ( curr . left != null ) { prev = curr ; curr = curr . left ; } prev . left = curr . right ; return curr ; } | removes it from that subtree and returns it . |
17,133 | public final Collection < V > values ( ) { return new AbstractCollection < V > ( ) { public Iterator < V > iterator ( ) { final Iterator < Map . Entry < K , V > > ite = entryIterator ( ) ; return new Iterator < V > ( ) { public boolean hasNext ( ) { return ite . hasNext ( ) ; } public void remove ( ) { ite . remove ( ) ; } public V next ( ) { return ite . next ( ) . getValue ( ) ; } } ; } public int size ( ) { return size ; } } ; } | Returns an unmodifiable collection view of the values from this map . |
17,134 | public final Set < Map . Entry < K , V > > entrySet ( ) { return new AbstractSet < Map . Entry < K , V > > ( ) { public Iterator < Map . Entry < K , V > > iterator ( ) { return entryIterator ( ) ; } public int size ( ) { return size ; } } ; } | Returns an unmodifiable set view of the map entries . |
17,135 | public final Set < K > keySet ( ) { return new AbstractSet < K > ( ) { public Iterator < K > iterator ( ) { final Iterator < Map . Entry < K , V > > ite = entryIterator ( ) ; return new Iterator < K > ( ) { public boolean hasNext ( ) { return ite . hasNext ( ) ; } public void remove ( ) { ite . remove ( ) ; } public K next ( ) { return ite . next ( ) . getKey ( ) ; } } ; } public int size ( ) { return size ; } } ; } | Returns an unmodifiable set view of the keys contained in this map . |
17,136 | public void processChildren ( Node node ) { for ( Node cursor = node . getFirstChild ( ) ; cursor != null ; cursor = cursor . getNext ( ) ) { if ( cursor . getType ( ) == Token . CALL ) { Node name = cursor . getFirstChild ( ) ; if ( name != null && name . getType ( ) == Token . NAME && name . getString ( ) . equals ( "define" ) ) { Node param = name . getNext ( ) ; if ( param != null && param . getType ( ) != Token . STRING ) { String expname = name . getProp ( Node . SOURCENAME_PROP ) . toString ( ) ; if ( source != null ) { PositionLocator locator = source . locate ( name . getLineno ( ) , name . getCharno ( ) + 6 ) ; char tok = locator . findNextJSToken ( ) ; if ( tok == '(' ) { source . insert ( "\"" + expname + "\"," , locator . getLineno ( ) , locator . getCharno ( ) + 1 ) ; } else { source . insert ( "\"" + expname + "\"," , param . getLineno ( ) , param . getCharno ( ) ) ; } } param . getParent ( ) . addChildBefore ( Node . newString ( expname ) , param ) ; } } } if ( cursor . hasChildren ( ) ) processChildren ( cursor ) ; } } | Recursively called to process AST nodes looking for anonymous define calls . If an anonymous define call is found then change it be a named define call specifying the module name for the file being processed . |
17,137 | public List < IntVar > getIncoming ( Node n ) { List < IntVar > l = incoming . get ( n ) ; if ( l == null ) { l = Collections . emptyList ( ) ; } return l ; } | Get the start moment of the movements that terminate on a given node |
17,138 | public List < IntVar > getOutgoing ( Node n ) { List < IntVar > l = outgoings . get ( n ) ; if ( l == null ) { l = Collections . emptyList ( ) ; } return l ; } | Get the start moment of the movements that leave from a given node |
17,139 | private void checkModel ( Model mo , boolean start ) throws SatConstraintViolationException { for ( SatConstraintChecker < ? > c : checkers ) { if ( start && ! c . startsWith ( mo ) ) { SatConstraint cs = c . getConstraint ( ) ; if ( cs != null ) { throw new DiscreteViolationException ( c . getConstraint ( ) , mo ) ; } } else if ( ! start && ! c . endsWith ( mo ) ) { SatConstraint cs = c . getConstraint ( ) ; if ( cs != null ) { throw new DiscreteViolationException ( c . getConstraint ( ) , mo ) ; } } } } | Check for the validity of a model . |
17,140 | private static JSONObject toJSON ( Mapping c ) { JSONObject o = new JSONObject ( ) ; o . put ( "offlineNodes" , nodesToJSON ( c . getOfflineNodes ( ) ) ) ; o . put ( "readyVMs" , vmsToJSON ( c . getReadyVMs ( ) ) ) ; JSONObject ons = new JSONObject ( ) ; for ( Node n : c . getOnlineNodes ( ) ) { JSONObject w = new JSONObject ( ) ; w . put ( "runningVMs" , vmsToJSON ( c . getRunningVMs ( n ) ) ) ; w . put ( "sleepingVMs" , vmsToJSON ( c . getSleepingVMs ( n ) ) ) ; ons . put ( Integer . toString ( n . id ( ) ) , w ) ; } o . put ( "onlineNodes" , ons ) ; return o ; } | Serialise the mapping . |
17,141 | public void fillMapping ( Model mo , JSONObject o ) throws JSONConverterException { Mapping c = mo . getMapping ( ) ; for ( Node u : newNodes ( mo , o , "offlineNodes" ) ) { c . addOfflineNode ( u ) ; } for ( VM u : newVMs ( mo , o , "readyVMs" ) ) { c . addReadyVM ( u ) ; } JSONObject ons = ( JSONObject ) o . get ( "onlineNodes" ) ; for ( Map . Entry < String , Object > e : ons . entrySet ( ) ) { int id = Integer . parseInt ( e . getKey ( ) ) ; Node u = mo . newNode ( id ) ; if ( u == null ) { throw JSONConverterException . nodeAlreadyDeclared ( id ) ; } JSONObject on = ( JSONObject ) e . getValue ( ) ; c . addOnlineNode ( u ) ; for ( VM vm : newVMs ( mo , on , "runningVMs" ) ) { c . addRunningVM ( vm , u ) ; } for ( VM vm : newVMs ( mo , on , "sleepingVMs" ) ) { c . addSleepingVM ( vm , u ) ; } } } | Create the elements inside the model and fill the mapping . |
17,142 | private static Set < Node > newNodes ( Model mo , JSONObject o , String key ) throws JSONConverterException { checkKeys ( o , key ) ; Object x = o . get ( key ) ; if ( ! ( x instanceof JSONArray ) ) { throw new JSONConverterException ( "array expected at key '" + key + "'" ) ; } Set < Node > s = new HashSet < > ( ( ( JSONArray ) x ) . size ( ) ) ; for ( Object i : ( JSONArray ) x ) { int id = ( Integer ) i ; Node n = mo . newNode ( id ) ; if ( n == null ) { throw JSONConverterException . nodeAlreadyDeclared ( id ) ; } s . add ( n ) ; } return s ; } | Build nodes from a key . |
17,143 | private static Set < VM > newVMs ( Model mo , JSONObject o , String key ) throws JSONConverterException { checkKeys ( o , key ) ; Object x = o . get ( key ) ; if ( ! ( x instanceof JSONArray ) ) { throw new JSONConverterException ( "array expected at key '" + key + "'" ) ; } Set < VM > s = new HashSet < > ( ( ( JSONArray ) x ) . size ( ) ) ; for ( Object i : ( JSONArray ) x ) { int id = ( Integer ) i ; VM vm = mo . newVM ( id ) ; if ( vm == null ) { throw JSONConverterException . vmAlreadyDeclared ( id ) ; } s . add ( vm ) ; } return s ; } | Build VMs from a key . |
17,144 | public boolean add ( String key , ModuleDepInfo info ) { if ( info == null ) { throw new NullPointerException ( ) ; } boolean modified = false ; ModuleDepInfo existing = get ( key ) ; if ( ! containsKey ( key ) || existing != info ) { if ( existing != null ) { modified = existing . add ( info ) ; } else { super . put ( key , info ) ; modified = true ; } } return modified ; } | Adds the specified pair to the map . If an entry for the key exists then the specified module dep info is added to the existing module dep info . |
17,145 | public boolean addAll ( ModuleDeps other ) { boolean modified = false ; for ( Map . Entry < String , ModuleDepInfo > entry : other . entrySet ( ) ) { modified |= add ( entry . getKey ( ) , new ModuleDepInfo ( entry . getValue ( ) ) ) ; } return modified ; } | Adds all of the map entries from other to this map |
17,146 | public int minVMAllocation ( int vmIdx , int v ) { int vv = Math . max ( v , vmAllocation . get ( vmIdx ) ) ; vmAllocation . set ( vmIdx , vv ) ; return vv ; } | Change the VM resource allocation . |
17,147 | public double capOverbookRatio ( int nIdx , double d ) { if ( d < 1 ) { return ratios . get ( nIdx ) ; } double v = Math . min ( ratios . get ( nIdx ) , d ) ; ratios . set ( nIdx , v ) ; return v ; } | Cap the overbooking ratio for a given node . |
17,148 | public boolean beforeSolve ( ReconfigurationProblem p ) throws SchedulerException { for ( VM vm : source . getMapping ( ) . getAllVMs ( ) ) { int vmId = p . getVM ( vm ) ; int v = getVMAllocation ( vmId ) ; if ( v < 0 ) { int prevUsage = rc . getConsumption ( vm ) ; minVMAllocation ( vmId , prevUsage ) ; } } ChocoView v = rp . getView ( Packing . VIEW_ID ) ; if ( v == null ) { throw SchedulerModelingException . missingView ( rp . getSourceModel ( ) , Packing . VIEW_ID ) ; } IntVar [ ] host = new IntVar [ p . getFutureRunningVMs ( ) . size ( ) ] ; int [ ] demand = new int [ host . length ] ; int i = 0 ; for ( VM vm : p . getFutureRunningVMs ( ) ) { host [ i ] = rp . getVMAction ( vm ) . getDSlice ( ) . getHoster ( ) ; demand [ i ] = getVMAllocation ( p . getVM ( vm ) ) ; i ++ ; } ( ( Packing ) v ) . addDim ( rc . getResourceIdentifier ( ) , virtRcUsage , demand , host ) ; return linkVirtualToPhysicalUsage ( ) ; } | Set the resource usage for each of the VM . If the LB is < 0 the previous consumption is used to maintain the resource usage . Otherwise the usage is set to the variable lower bound . |
17,149 | private boolean capHosting ( int nIdx , int min , int nbZeroes ) { Node n = rp . getNode ( nIdx ) ; double capa = getSourceResource ( ) . getCapacity ( n ) * getOverbookRatio ( nIdx ) ; int card = ( int ) ( capa / min ) + nbZeroes + 1 ; if ( card < source . getMapping ( ) . getRunningVMs ( n ) . size ( ) ) { return true ; } try { rp . getNbRunningVMs ( ) . get ( nIdx ) . updateUpperBound ( card , Cause . Null ) ; } catch ( ContradictionException ex ) { rp . getLogger ( ) . error ( "Unable to cap the hosting capacity of '" + n + " ' to " + card , ex ) ; return false ; } return true ; } | Reduce the cardinality wrt . the worst case scenario . |
17,150 | private void checkInitialSatisfaction ( ) { for ( Node n : rp . getSourceModel ( ) . getMapping ( ) . getOnlineNodes ( ) ) { int nIdx = rp . getNode ( n ) ; double ratio = getOverbookRatio ( nIdx ) ; double capa = getSourceResource ( ) . getCapacity ( n ) * ratio ; int usage = 0 ; for ( VM vm : rp . getSourceModel ( ) . getMapping ( ) . getRunningVMs ( n ) ) { usage += getSourceResource ( ) . getConsumption ( vm ) ; if ( usage > capa ) { throw new SchedulerModelingException ( rp . getSourceModel ( ) , "Usage of virtual resource " + getResourceIdentifier ( ) + " on node " + n + " (" + usage + ") exceeds its capacity (" + capa + ")" ) ; } } } } | Check if the initial capacity > sum current consumption The ratio is instantiated now so the computation is correct |
17,151 | public static TObjectIntMap < VM > getWeights ( ReconfigurationProblem rp , List < CShareableResource > rcs ) { Model mo = rp . getSourceModel ( ) ; int [ ] capa = new int [ rcs . size ( ) ] ; int [ ] cons = new int [ rcs . size ( ) ] ; TObjectIntMap < VM > cost = new TObjectIntHashMap < > ( ) ; for ( Node n : mo . getMapping ( ) . getAllNodes ( ) ) { for ( int i = 0 ; i < rcs . size ( ) ; i ++ ) { capa [ i ] += rcs . get ( i ) . virtRcUsage . get ( rp . getNode ( n ) ) . getUB ( ) * rcs . get ( i ) . ratios . get ( rp . getNode ( n ) ) ; } } for ( VM v : mo . getMapping ( ) . getAllVMs ( ) ) { for ( int i = 0 ; i < rcs . size ( ) ; i ++ ) { cons [ i ] += rcs . get ( i ) . getVMAllocation ( rp . getVM ( v ) ) ; } } for ( VM v : mo . getMapping ( ) . getAllVMs ( ) ) { double sum = 0 ; for ( int i = 0 ; i < rcs . size ( ) ; i ++ ) { double ratio = 0 ; if ( cons [ i ] > 0 ) { ratio = 1.0 * rcs . get ( i ) . getVMAllocation ( rp . getVM ( v ) ) / capa [ i ] ; } sum += ratio ; } cost . put ( v , ( int ) ( sum * 10000 ) ) ; } return cost ; } | Estimate the weight of each VMs with regards to multiple dimensions . In practice it sums the normalised size of each VM against the total capacity |
17,152 | private boolean distinctVMStates ( ) { boolean ok = vms . size ( ) == running . size ( ) + sleeping . size ( ) + ready . size ( ) + killed . size ( ) ; Map < VM , VMState > states = new HashMap < > ( ) ; for ( VM v : running ) { states . put ( v , VMState . RUNNING ) ; } for ( VM v : ready ) { VMState prev = states . put ( v , VMState . READY ) ; if ( prev != null ) { getLogger ( ) . debug ( "multiple destination state for {}: {} and {}" , v , prev , VMState . READY ) ; } } for ( VM v : sleeping ) { VMState prev = states . put ( v , VMState . SLEEPING ) ; if ( prev != null ) { getLogger ( ) . debug ( "multiple destination state for {}: {} and {}" , v , prev , VMState . SLEEPING ) ; } } for ( VM v : killed ) { VMState prev = states . put ( v , VMState . KILLED ) ; if ( prev != null ) { getLogger ( ) . debug ( "multiple destination state for {}: {} and {}" , v , prev , VMState . KILLED ) ; } } return ok ; } | Check if every VM has a single destination state |
17,153 | @ SuppressWarnings ( "squid:S3346" ) public ReconfigurationPlan buildReconfigurationPlan ( Solution s , Model src ) throws SchedulerException { ReconfigurationPlan plan = new DefaultReconfigurationPlan ( src ) ; for ( NodeTransition action : nodeActions ) { action . insertActions ( s , plan ) ; } for ( VMTransition action : vmActions ) { action . insertActions ( s , plan ) ; } assert plan . isApplyable ( ) : "The following plan cannot be applied:\n" + plan ; assert checkConsistency ( s , plan ) ; return plan ; } | Build a plan for a solution . |
17,154 | private void defaultHeuristic ( ) { IntStrategy intStrat = Search . intVarSearch ( new FirstFail ( csp ) , new IntDomainMin ( ) , csp . retrieveIntVars ( true ) ) ; SetStrategy setStrat = new SetStrategy ( csp . retrieveSetVars ( ) , new InputOrder < > ( csp ) , new SetDomainMin ( ) , true ) ; RealStrategy realStrat = new RealStrategy ( csp . retrieveRealVars ( ) , new Occurrence < > ( ) , new RealDomainMiddle ( ) ) ; solver . setSearch ( new StrategiesSequencer ( intStrat , realStrat , setStrat ) ) ; } | A naive heuristic to be sure every variables will be instantiated . |
17,155 | private void makeCardinalityVariables ( ) { vmsCountOnNodes = new ArrayList < > ( nodes . size ( ) ) ; int nbVMs = vms . size ( ) ; for ( Node n : nodes ) { vmsCountOnNodes . add ( csp . intVar ( makeVarLabel ( "nbVMsOn('" , n , "')" ) , 0 , nbVMs , true ) ) ; } vmsCountOnNodes = Collections . unmodifiableList ( vmsCountOnNodes ) ; } | Create the cardinality variables . |
17,156 | public Iterator < Action > iterator ( ) { Set < Action > sorted = new TreeSet < > ( startFirstComparator ) ; sorted . addAll ( actions ) ; return sorted . iterator ( ) ; } | Iterate over the actions . The action are automatically sorted increasingly by their starting moment . |
17,157 | private void addDirectionalEdge ( Edge e ) { if ( edgeAccess . getCapacity ( ) < getEdgeIndex ( edgePos ) + EDGE_SIZE ) edgeAccess . ensureCapacity ( edgeAccess . getCapacity ( ) + INITIAL_EDGE_FILE_SIZE ) ; long fromId = nodIdMapping . get ( e . getFromNodeId ( ) ) ; long toId = nodIdMapping . get ( e . getToNodeId ( ) ) ; long fromIndex = getNodeIndex ( fromId ) ; long toIndex = getNodeIndex ( toId ) ; long edgeIndex = getEdgeIndex ( edgePos ) ; edgeAccess . setLong ( edgeIndex , fromId ) ; edgeAccess . setLong ( edgeIndex + 8 , toId ) ; edgeAccess . setDouble ( edgeIndex + 16 , e . getWeight ( ) ) ; edgeAccess . setLong ( edgeIndex + 24 , nodeAccess . getLong ( fromIndex + 8 ) ) ; edgeAccess . setLong ( edgeIndex + 32 , nodeAccess . getLong ( toIndex + 16 ) ) ; nodeAccess . setLong ( fromIndex + 8 , edgePos ) ; nodeAccess . setLong ( toIndex + 16 , edgePos ) ; edgeAccess . setLong ( 0 , ++ edgePos ) ; } | Add a new directional edge into the graph . |
17,158 | public static DefaultConstraintsCatalog newBundle ( ) { DefaultConstraintsCatalog c = new DefaultConstraintsCatalog ( ) ; c . add ( new AmongBuilder ( ) ) ; c . add ( new BanBuilder ( ) ) ; c . add ( new ResourceCapacityBuilder ( ) ) ; c . add ( new RunningCapacityBuilder ( ) ) ; c . add ( new FenceBuilder ( ) ) ; c . add ( new GatherBuilder ( ) ) ; c . add ( new KilledBuilder ( ) ) ; c . add ( new LonelyBuilder ( ) ) ; c . add ( new OfflineBuilder ( ) ) ; c . add ( new OnlineBuilder ( ) ) ; c . add ( new OverbookBuilder ( ) ) ; c . add ( new PreserveBuilder ( ) ) ; c . add ( new QuarantineBuilder ( ) ) ; c . add ( new ReadyBuilder ( ) ) ; c . add ( new RootBuilder ( ) ) ; c . add ( new RunningBuilder ( ) ) ; c . add ( new SleepingBuilder ( ) ) ; c . add ( new SplitBuilder ( ) ) ; c . add ( new SplitAmongBuilder ( ) ) ; c . add ( new SpreadBuilder ( ) ) ; c . add ( new SeqBuilder ( ) ) ; c . add ( new MaxOnlineBuilder ( ) ) ; c . add ( new NoDelayBuilder ( ) ) ; c . add ( new BeforeBuilder ( ) ) ; c . add ( new SerializeBuilder ( ) ) ; c . add ( new SyncBuilder ( ) ) ; return c ; } | Build a catalog with a builder for every constraints in the current BtrPlace bundle . |
17,159 | public boolean add ( SatConstraintBuilder c ) { if ( this . builders . containsKey ( c . getIdentifier ( ) ) ) { return false ; } this . builders . put ( c . getIdentifier ( ) , c ) ; return true ; } | Add a constraint builder to the catalog . There must not be another builder with the same identifier in the catalog |
17,160 | public int compare ( T o1 , T o2 ) { if ( o1 == o2 ) return 0 ; String str1 = ( o1 == null ) ? "null" : o1 . toString ( ) ; String str2 = ( o2 == null ) ? "null" : o2 . toString ( ) ; return str1 . compareTo ( str2 ) ; } | Compares its two arguments for order . Returns a negative integer zero or a positive integer as the string representation of the first argument is less than equal to or greater to the string representation of the second . |
17,161 | public static CaseSyntax of ( Character separator , CaseConversion allCharCase ) { return of ( separator , allCharCase , allCharCase , allCharCase ) ; } | The constructor . |
17,162 | protected void registerDefaultDatatypes ( ) { registerStandardDatatype ( String . class ) ; registerStandardDatatype ( Boolean . class ) ; registerStandardDatatype ( Character . class ) ; registerStandardDatatype ( Currency . class ) ; registerCustomDatatype ( Datatype . class ) ; registerNumberDatatypes ( ) ; registerJavaTimeDatatypes ( ) ; registerJavaUtilDateCalendarDatatypes ( ) ; } | Registers the default datatypes . |
17,163 | public void setExtraDatatypes ( List < String > datatypeList ) { getInitializationState ( ) . requireNotInitilized ( ) ; for ( String fqn : datatypeList ) { registerCustomDatatype ( fqn ) ; } } | Adds a list of additional datatypes to register . E . g . for easy spring configuration and custom extension . |
17,164 | public static CompressionCodec getGzipCodec ( Configuration conf ) { try { return ( CompressionCodec ) ReflectionUtils . newInstance ( conf . getClassByName ( "org.apache.hadoop.io.compress.GzipCodec" ) . asSubclass ( CompressionCodec . class ) , conf ) ; } catch ( ClassNotFoundException e ) { logger . warn ( "GzipCodec could not be instantiated" , e ) ; return null ; } } | Instantiates a Hadoop codec for compressing and decompressing Gzip files . This is the most common compression applied to WARC files . |
17,165 | public static boolean isNearMultipleBorders ( final Point point , final Territory territory ) { checkDefined ( "point" , point ) ; if ( territory != Territory . AAA ) { final int territoryNumber = territory . getNumber ( ) ; if ( territory . getParentTerritory ( ) != null ) { if ( isNearMultipleBorders ( point , territory . getParentTerritory ( ) ) ) { return true ; } } int nrFound = 0 ; final int fromTerritoryRecord = DATA_MODEL . getDataFirstRecord ( territoryNumber ) ; final int uptoTerritoryRecord = DATA_MODEL . getDataLastRecord ( territoryNumber ) ; for ( int territoryRecord = uptoTerritoryRecord ; territoryRecord >= fromTerritoryRecord ; territoryRecord -- ) { if ( ! Data . isRestricted ( territoryRecord ) ) { final Boundary boundary = Boundary . createBoundaryForTerritoryRecord ( territoryRecord ) ; final int xdiv8 = Common . xDivider ( boundary . getLatMicroDegMin ( ) , boundary . getLatMicroDegMax ( ) ) / 4 ; if ( boundary . extendBoundary ( 60 , xdiv8 ) . containsPoint ( point ) ) { if ( ! boundary . extendBoundary ( - 60 , - xdiv8 ) . containsPoint ( point ) ) { nrFound ++ ; if ( nrFound > 1 ) { return true ; } } } } } } return false ; } | Is coordinate near multiple territory borders? |
17,166 | public void enqueue ( RepairDigramRecord digramRecord ) { if ( elements . containsKey ( digramRecord . str ) ) { throw new IllegalArgumentException ( "Element with payload " + digramRecord . str + " already exists in the queue..." ) ; } else { RepairQueueNode nn = new RepairQueueNode ( digramRecord ) ; if ( this . elements . isEmpty ( ) ) { this . head = nn ; } else if ( nn . getFrequency ( ) >= this . head . getFrequency ( ) ) { this . head . prev = nn ; nn . next = this . head ; this . head = nn ; } else { RepairQueueNode currentNode = head ; while ( null != currentNode . next ) { if ( nn . getFrequency ( ) >= currentNode . getFrequency ( ) ) { RepairQueueNode prevN = currentNode . prev ; prevN . next = nn ; nn . prev = prevN ; currentNode . prev = nn ; nn . next = currentNode ; break ; } currentNode = currentNode . next ; } if ( null == currentNode . next ) { if ( nn . getFrequency ( ) >= currentNode . getFrequency ( ) ) { RepairQueueNode prevN = currentNode . prev ; prevN . next = nn ; nn . prev = prevN ; currentNode . prev = nn ; nn . next = currentNode ; } else { nn . prev = currentNode ; currentNode . next = nn ; } } } this . elements . put ( nn . payload . str , nn ) ; } } | Places an element in the queue at the place based on its frequency . |
17,167 | public RepairDigramRecord get ( String key ) { RepairQueueNode el = this . elements . get ( key ) ; if ( null != el ) { return el . payload ; } return null ; } | Gets an element in the queue given its key . |
17,168 | private void removeNodeFromList ( RepairQueueNode el ) { if ( null == el . prev ) { if ( null != el . next ) { this . head = el . next ; this . head . prev = null ; el = null ; } else { this . head = null ; } } else if ( null == el . next ) { if ( null != el . prev ) { el . prev . next = null ; } else { throw new RuntimeException ( "Unrecognized situation here..." ) ; } } else { el . prev . next = el . next ; el . next . prev = el . prev ; } } | Removes a node from the doubly linked list which backs the queue . |
17,169 | public int evaluate ( Model mo , Class < ? extends Action > a , Element e ) throws SchedulerException { ActionDurationEvaluator < Element > ev = durations . get ( a ) ; if ( ev == null ) { throw new SchedulerModelingException ( null , "Unable to estimate the duration of action '" + a . getSimpleName ( ) + "' related to '" + e + "'" ) ; } int d = ev . evaluate ( mo , e ) ; if ( d <= 0 ) { throw new SchedulerModelingException ( null , "The duration for action " + a . getSimpleName ( ) + " over '" + e + "' has been evaluated to a negative value (" + d + "). Unsupported" ) ; } return d ; } | Evaluate the duration of given action on a given element . |
17,170 | protected void parseParameter ( String parameter , CliParserState parserState , CliParameterConsumer parameterConsumer ) { if ( parserState . isOptionsComplete ( ) ) { List < CliArgumentContainer > argumentList = this . cliState . getArguments ( parserState . requireCurrentMode ( this . cliState ) ) ; int argumentIndex = parserState . getArgumentIndex ( ) ; if ( argumentIndex >= argumentList . size ( ) ) { throw new NlsIllegalArgumentException ( parameter ) ; } else { parseArgument ( parserState , parameter , argumentList . get ( argumentIndex ) , parameterConsumer ) ; } } else { CliOptionContainer optionContainer = this . cliState . getOption ( parameter ) ; if ( optionContainer == null ) { parseParameterUndefinedOption ( parameter , parserState , parameterConsumer ) ; } else { String modeId = optionContainer . getOption ( ) . mode ( ) ; CliModeObject newMode = this . cliState . getMode ( modeId ) ; if ( newMode == null ) { newMode = new CliModeContainer ( modeId ) ; } if ( parserState . currentMode == null ) { parserState . setCurrentMode ( parameter , newMode ) ; } else if ( ! modeId . equals ( parserState . currentMode . getId ( ) ) ) { if ( newMode . isDescendantOf ( parserState . currentMode ) ) { parserState . setCurrentMode ( parameter , newMode ) ; } else if ( ! newMode . isAncestorOf ( parserState . currentMode ) ) { throw new CliOptionIncompatibleModesException ( parserState . modeOption , parameter ) ; } } parseOption ( parserState , parameter , optionContainer , parameterConsumer ) ; } } } | This method parses a single command - line argument . |
17,171 | private int printHelpOptions ( CliOutputSettings settings , Map < CliOption , CliOptionHelpInfo > option2HelpMap , StringBuilder parameters , Collection < CliOptionContainer > modeOptions ) { int maxOptionColumnWidth = 0 ; for ( CliOptionContainer option : modeOptions ) { CliOption cliOption = option . getOption ( ) ; if ( parameters . length ( ) > 0 ) { parameters . append ( " " ) ; } if ( ! cliOption . required ( ) ) { parameters . append ( "[" ) ; } parameters . append ( cliOption . name ( ) ) ; if ( ! option . getSetter ( ) . getPropertyClass ( ) . equals ( boolean . class ) ) { parameters . append ( " " ) ; parameters . append ( cliOption . operand ( ) ) ; if ( option . isArrayMapOrCollection ( ) ) { CliContainerStyle containerStyle = option . getContainerStyle ( this . cliState . getCliStyle ( ) ) ; switch ( containerStyle ) { case COMMA_SEPARATED : parameters . append ( ",..." ) ; break ; case MULTIPLE_OCCURRENCE : parameters . append ( "*" ) ; break ; default : throw new IllegalCaseException ( CliContainerStyle . class , containerStyle ) ; } } } if ( ! cliOption . required ( ) ) { parameters . append ( "]" ) ; } CliOptionHelpInfo helpInfo = option2HelpMap . get ( cliOption ) ; if ( helpInfo == null ) { helpInfo = new CliOptionHelpInfo ( option , this . dependencies , settings ) ; option2HelpMap . put ( cliOption , helpInfo ) ; } if ( helpInfo . length > maxOptionColumnWidth ) { maxOptionColumnWidth = helpInfo . length ; } } return maxOptionColumnWidth ; } | Prints the options for the help usage output . |
17,172 | public boolean contains ( Object o ) { for ( T e : elemArray ) { if ( ( o == e ) || ( ( o != null ) && o . equals ( e ) ) ) { return true ; } } return false ; } | Re - implement the contains method from AbstractSet for speed reasons |
17,173 | public Set < Action > getDependencies ( Action a ) { if ( ! demandingNodes . containsKey ( a ) ) { return Collections . emptySet ( ) ; } Node n = demandingNodes . get ( a ) ; Set < Action > allActions = getFreeings ( n ) ; Set < Action > pre = new HashSet < > ( ) ; for ( Action action : allActions ) { if ( ! action . equals ( a ) && a . getStart ( ) >= action . getEnd ( ) ) { pre . add ( action ) ; } } return pre ; } | Get the dependencies for an action . |
17,174 | public static boolean isExplodeRequires ( HttpServletRequest request ) { if ( isIncludeRequireDeps ( request ) ) { return false ; } boolean result = false ; IAggregator aggr = ( IAggregator ) request . getAttribute ( IAggregator . AGGREGATOR_REQATTRNAME ) ; IOptions options = aggr . getOptions ( ) ; IConfig config = aggr . getConfig ( ) ; Boolean reqattr = TypeUtil . asBoolean ( request . getAttribute ( IHttpTransport . EXPANDREQUIRELISTS_REQATTRNAME ) ) ; result = ( options == null || ! options . isDisableRequireListExpansion ( ) ) && ( config == null || ! isServerExpandedLayers ( request ) ) && reqattr != null && reqattr ; return result ; } | Static class method for determining if require list explosion should be performed . |
17,175 | public static boolean isHasFiltering ( HttpServletRequest request ) { IAggregator aggr = ( IAggregator ) request . getAttribute ( IAggregator . AGGREGATOR_REQATTRNAME ) ; IOptions options = aggr . getOptions ( ) ; return ( options != null ) ? ! options . isDisableHasFiltering ( ) : true ; } | Static method for determining if has filtering should be performed . |
17,176 | static Boundary createBoundaryForTerritoryRecord ( final int territoryRecord ) { return new Boundary ( DATA_MODEL . getLatMicroDegMin ( territoryRecord ) , DATA_MODEL . getLonMicroDegMin ( territoryRecord ) , DATA_MODEL . getLatMicroDegMax ( territoryRecord ) , DATA_MODEL . getLonMicroDegMax ( territoryRecord ) ) ; } | You have to use this factory method instead of a ctor . |
17,177 | boolean containsPoint ( final Point p ) { if ( ! p . isDefined ( ) ) { return false ; } final int latMicroDeg = p . getLatMicroDeg ( ) ; if ( ( latMicroDegMin > latMicroDeg ) || ( latMicroDeg >= latMicroDegMax ) ) { return false ; } final int lonMicroDeg = p . getLonMicroDeg ( ) ; if ( lonMicroDeg < lonMicroDegMin ) { return ( lonMicroDegMin <= ( lonMicroDeg + Point . MICRO_DEG_360 ) ) && ( ( lonMicroDeg + Point . MICRO_DEG_360 ) < lonMicroDegMax ) ; } else if ( lonMicroDeg >= lonMicroDegMax ) { return ( lonMicroDegMin <= ( lonMicroDeg - Point . MICRO_DEG_360 ) ) && ( ( lonMicroDeg - Point . MICRO_DEG_360 ) < lonMicroDegMax ) ; } else { return true ; } } | Check if a point falls within a boundary . Note that the min values are inclusive for a boundary and the max values are exclusive . \ |
17,178 | public static List < Precedence > newPrecedence ( VM vmBefore , Collection < VM > vmsAfter ) { return newPrecedence ( Collections . singleton ( vmBefore ) , vmsAfter ) ; } | Instantiate discrete constraints to force a set of VMs to migrate after a single one . |
17,179 | public static List < Precedence > newPrecedence ( Collection < VM > vmsBefore , VM vmAfter ) { return newPrecedence ( vmsBefore , Collections . singleton ( vmAfter ) ) ; } | Instantiate discrete constraints to force a single VM to migrate after a set of VMs . |
17,180 | public static List < Precedence > newPrecedence ( Collection < VM > vmsBefore , Collection < VM > vmsAfter ) { List < Precedence > l = new ArrayList < > ( vmsBefore . size ( ) * vmsAfter . size ( ) ) ; for ( VM vmb : vmsBefore ) { for ( VM vma : vmsAfter ) { l . add ( new Precedence ( vmb , vma ) ) ; } } return l ; } | Instantiate discrete constraints to force a set of VMs to migrate after an other set of VMs . |
17,181 | private ESat isConsistent ( ) { int [ ] [ ] l = new int [ nbDims ] [ nbBins ] ; for ( int i = 0 ; i < bins . length ; i ++ ) { if ( bins [ i ] . isInstantiated ( ) ) { for ( int d = 0 ; d < nbDims ; d ++ ) { int v = bins [ i ] . getValue ( ) ; l [ d ] [ v ] += iSizes [ d ] [ i ] ; if ( l [ d ] [ v ] > loads [ d ] [ v ] . getUB ( ) ) { return ESat . FALSE ; } } } } return ESat . TRUE ; } | check the consistency of the constraint . |
17,182 | public int getPropagationConditions ( int idx ) { return idx < bins . length ? IntEventType . all ( ) : IntEventType . BOUND . getMask ( ) + IntEventType . INSTANTIATE . getMask ( ) ; } | react on removal events on bins variables react on bound events on loads variables |
17,183 | private void computeSumItemSizes ( ) { for ( int d = 0 ; d < nbDims ; d ++ ) { long sum = 0 ; for ( int i = 0 ; i < iSizes [ d ] . length ; i ++ ) { sum += iSizes [ d ] [ i ] ; } this . sumISizes [ d ] = sum ; } } | Compute the sum of the item sizes for each dimension . |
17,184 | public void clearCached ( ICacheManager mgr ) { Map < String , CacheEntry > moduleBuilds ; synchronized ( this ) { moduleBuilds = _moduleBuilds ; _moduleBuilds = null ; } if ( moduleBuilds != null ) { for ( Map . Entry < String , CacheEntry > entry : moduleBuilds . entrySet ( ) ) { entry . getValue ( ) . delete ( mgr ) ; } moduleBuilds . clear ( ) ; } } | Asynchronously delete the set of cached files for this module . |
17,185 | public static SplittableElementSet < VM > newVMIndex ( Collection < VM > c , TIntIntHashMap idx ) { return new SplittableElementSet < > ( c , idx ) ; } | Make a new splittable set from a collection of VM . We consider the collection does not have duplicated elements . |
17,186 | public static SplittableElementSet < Node > newNodeIndex ( Collection < Node > c , TIntIntHashMap idx ) { return new SplittableElementSet < > ( c , idx ) ; } | Make a new splittable set from a collection of nodes . We consider the collection does not have duplicated elements . |
17,187 | public boolean forEachPartition ( IterateProcedure < E > p ) { int curIdx = index . get ( values . get ( 0 ) . id ( ) ) ; int from ; int to ; for ( from = 0 , to = 0 ; to < values . size ( ) ; to ++ ) { int cIdx = index . get ( values . get ( to ) . id ( ) ) ; if ( curIdx != cIdx ) { if ( ! p . extract ( this , curIdx , from , to ) ) { return false ; } from = to ; curIdx = cIdx ; } } return p . extract ( this , curIdx , from , to ) ; } | Execute a procedure on each partition . The partition is indicated by its bounds on the backend array . |
17,188 | public Set < E > getSubSet ( int k ) { int from = - 1 ; for ( int x = 0 ; x < values . size ( ) ; x ++ ) { int cIdx = index . get ( values . get ( x ) . id ( ) ) ; if ( cIdx == k && from == - 1 ) { from = x ; } if ( from >= 0 && cIdx > k ) { return new ElementSubSet < > ( this , k , from , x ) ; } } if ( from >= 0 ) { return new ElementSubSet < > ( this , k , from , values . size ( ) ) ; } return Collections . emptySet ( ) ; } | Get a subset for the given partition . |
17,189 | public List < ElementSubSet < E > > getPartitions ( ) { final List < ElementSubSet < E > > partitions = new ArrayList < > ( ) ; forEachPartition ( ( idx , key , from , to ) -> { partitions . add ( new ElementSubSet < > ( SplittableElementSet . this , key , from , to ) ) ; return true ; } ) ; return partitions ; } | Get all the partitions as subsets . |
17,190 | public ICacheKeyGenerator combine ( ICacheKeyGenerator otherKeyGen ) { if ( this . equals ( otherKeyGen ) ) { return this ; } @ SuppressWarnings ( "unchecked" ) AbstractCollectionCacheKeyGenerator < T > other = ( AbstractCollectionCacheKeyGenerator < T > ) otherKeyGen ; if ( isProvisional ( ) && other . isProvisional ( ) ) { throw new IllegalStateException ( ) ; } if ( isProvisional ( ) ) { return other ; } else if ( other . isProvisional ( ) ) { return this ; } if ( getCollection ( ) == null ) { return this ; } if ( other . getCollection ( ) == null ) { return other ; } int size = getCollection ( ) . size ( ) , otherSize = other . getCollection ( ) . size ( ) ; if ( size > otherSize && getCollection ( ) . containsAll ( other . getCollection ( ) ) ) { return this ; } if ( otherSize > size && other . getCollection ( ) . containsAll ( getCollection ( ) ) ) { return other ; } Set < T > combined = new HashSet < T > ( ) ; combined . addAll ( getCollection ( ) ) ; combined . addAll ( other . getCollection ( ) ) ; return newKeyGen ( combined , false ) ; } | Returns a cache key generator that is the combination of this cache key generator and the specified cache key generator ( i . e . the cache keys generated by the returned object vary according to the conditions honored by this generator and the specified generator . |
17,191 | public static final void setReader ( String format , TreeReader reader ) { String key = format . toLowerCase ( ) ; readers . put ( key , reader ) ; if ( JSON . equals ( key ) ) { cachedJsonReader = reader ; } } | Binds the given TreeReader instance to the specified data format . |
17,192 | protected int resolveInclude ( ) throws XMLStreamException { this . fallback = false ; this . depth ++ ; int eventType = - 1 ; String href = getAttributeValue ( null , "href" ) ; LOGGER . trace ( "Resolving xi:include to href {}" , href ) ; String xpointer = getAttributeValue ( null , "xpointer" ) ; DataResource includeResource = this . resource . navigate ( href ) ; boolean success = false ; if ( includeResource . isAvailable ( ) ) { String parse = getAttributeValue ( null , "parse" ) ; if ( ( parse == null ) || ( "xml" . equals ( parse ) ) ) { this . includeReader = new XIncludeStreamReader ( this . factory , includeResource , this ) ; if ( xpointer != null ) { this . includeReader = new XPointerStreamReader ( this . includeReader , xpointer ) ; } eventType = this . includeReader . nextTag ( ) ; setParent ( this . includeReader ) ; closeInitialInclude ( ) ; success = true ; } else if ( "text" . equals ( parse ) ) { String encoding = getAttributeValue ( null , "encoding" ) ; Charset charset ; if ( encoding == null ) { charset = Charset . defaultCharset ( ) ; } else { charset = Charset . forName ( encoding ) ; } InputStream textInputStream = includeResource . openStream ( ) ; Reader reader = new InputStreamReader ( textInputStream , charset ) ; this . includeText = read ( reader ) ; closeInitialInclude ( ) ; return XMLStreamConstants . CHARACTERS ; } else { throw new XMLStreamException ( "Unsupported XInclude parse type:" + parse ) ; } } if ( ! success ) { do { eventType = super . next ( ) ; } while ( ( eventType != XMLStreamConstants . START_ELEMENT ) && ( eventType != XMLStreamConstants . END_ELEMENT ) ) ; if ( eventType == XMLStreamConstants . START_ELEMENT ) { if ( ( XmlUtil . NAMESPACE_URI_XINCLUDE . equals ( getNamespaceURI ( ) ) ) && ( "fallback" . equals ( getLocalName ( ) ) ) ) { this . fallback = true ; return next ( ) ; } } closeInitialInclude ( ) ; return next ( ) ; } return eventType ; } | This method is called when an include tag of the XInclude namespace was started . It resolves the include and finds a fallback on failure . |
17,193 | protected void closeInitialInclude ( ) throws XMLStreamException { LOGGER . trace ( "Closing xi:include" ) ; int eventType = - 1 ; while ( this . depth > 0 ) { eventType = this . mainReader . next ( ) ; if ( eventType == XMLStreamConstants . START_ELEMENT ) { LOGGER . trace ( "Closing loop: Start {}" , this . mainReader . getLocalName ( ) ) ; this . depth ++ ; } else if ( eventType == XMLStreamConstants . END_ELEMENT ) { LOGGER . trace ( "Closing loop: End {}" , this . mainReader . getLocalName ( ) ) ; this . depth -- ; } } LOGGER . trace ( "Closing xi:include complete" ) ; } | This method ascends the XML until the initial include is closed . |
17,194 | public boolean read ( ) { if ( state == State . READING_FRAME_SIZE ) { if ( ! internalRead ( frameSizeBuffer ) ) return false ; if ( frameSizeBuffer . remaining ( ) == 0 ) { int frameSize = frameSizeBuffer . getInt ( 0 ) ; if ( frameSize <= 0 ) { logger . error ( "Read an invalid frame size of " + frameSize + ". Are you using TFramedTransport on the client side?" ) ; return false ; } if ( frameSize > thriftFactories . maxFrameSizeInBytes ) { logger . error ( "Invalid frame size got (" + frameSize + "), maximum expected " + thriftFactories . maxFrameSizeInBytes ) ; return false ; } reallocateDataBuffer ( frameSize ) ; frameSizeBuffer . clear ( ) ; state = State . READING_FRAME ; } else { state = State . READY_TO_READ_FRAME_SIZE ; return true ; } } if ( state == State . READING_FRAME ) { if ( ! internalRead ( dataBuffer ) ) return false ; state = ( dataBuffer . remaining ( ) == 0 ) ? State . READ_FRAME_COMPLETE : State . READY_TO_READ_FRAME ; if ( state == State . READ_FRAME_COMPLETE ) { switchMode ( State . READ_FRAME_COMPLETE ) ; } return true ; } logger . error ( "Read was called but state is invalid (" + state + ")" ) ; return false ; } | Give this Message a chance to read . The selector loop should have received a read event for this Message . |
17,195 | public boolean write ( ) { assert state == State . WRITING ; boolean writeFailed = false ; try { if ( response . streamTo ( transport ) < 0 ) { writeFailed = true ; return false ; } else if ( ! response . isFullyStreamed ( ) ) { switchToWrite ( ) ; return true ; } } catch ( IOException e ) { logger . error ( "Got an IOException during write!" , e ) ; writeFailed = true ; return false ; } finally { if ( writeFailed || response . isFullyStreamed ( ) ) response . close ( ) ; } switchToRead ( ) ; return true ; } | Give this Message a chance to write its output to the final client . |
17,196 | public void changeSelectInterests ( ) { switch ( state ) { case READY_TO_WRITE : state = State . WRITING ; break ; case READY_TO_READ_FRAME_SIZE : state = State . READING_FRAME_SIZE ; break ; case READY_TO_READ_FRAME : state = State . READING_FRAME ; break ; case AWAITING_CLOSE : close ( ) ; selectionKey . cancel ( ) ; break ; default : logger . error ( "changeSelectInterest was called, but state is invalid (" + state + ")" ) ; } } | Give this Message a chance to change its interests . |
17,197 | public void invoke ( ) { assert state == State . READ_FRAME_COMPLETE : "Invoke called in invalid state: " + state ; TTransport inTrans = getInputTransport ( ) ; TProtocol inProt = thriftFactories . inputProtocolFactory . getProtocol ( inTrans ) ; TProtocol outProt = thriftFactories . outputProtocolFactory . getProtocol ( getOutputTransport ( ) ) ; try { thriftFactories . processorFactory . getProcessor ( inTrans ) . process ( inProt , outProt ) ; responseReady ( ) ; return ; } catch ( TException te ) { logger . warn ( "Exception while invoking!" , te ) ; } catch ( Throwable t ) { logger . error ( "Unexpected throwable while invoking!" , t ) ; } state = State . AWAITING_CLOSE ; changeSelectInterests ( ) ; } | Actually invoke the method signified by this Message . |
17,198 | private boolean internalRead ( Buffer buffer ) { try { return ! ( buffer . readFrom ( transport ) < 0 ) ; } catch ( IOException e ) { logger . warn ( "Got an IOException in internalRead!" , e ) ; return false ; } } | Perform a read into dataBuffer . |
17,199 | public void close ( ) { freeDataBuffer ( ) ; frameSizeBuffer . free ( ) ; transport . close ( ) ; if ( response != null ) response . close ( ) ; } | Shut the connection down . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.