idx int64 0 41.2k | question stringlengths 74 4.21k | target stringlengths 5 888 |
|---|---|---|
20,400 | public String getCanonicalForm ( ) { StringBuilder buf = new StringBuilder ( ) ; for ( PathElement pe : elements ) { buf . append ( "." ) . append ( pe . getCanonicalForm ( ) ) ; } return buf . substring ( 1 ) ; } | Testing method . |
20,401 | public static < T > T navigateSafe ( final T defaultValue , final Object source , final Object ... paths ) { return navigateOrDefault ( defaultValue , source , paths ) ; } | Use navigateOrDefault which is a much better name . |
20,402 | public static boolean isVacantJson ( final Object obj ) { Collection values = null ; if ( obj instanceof Collection ) { if ( ( ( Collection ) obj ) . size ( ) == 0 ) { return true ; } values = ( Collection ) obj ; } if ( obj instanceof Map ) { if ( ( ( Map ) obj ) . size ( ) == 0 ) { return true ; } values = ( ( Map ) obj ) . values ( ) ; } int processedEmpty = 0 ; if ( values != null ) { for ( Object value : values ) { if ( ! isVacantJson ( value ) ) { return false ; } processedEmpty ++ ; } if ( processedEmpty == values . size ( ) ) { return true ; } } return false ; } | Vacant implies there are empty placeholders i . e . a vacant hotel Given a json document checks if it has any leaf values can handle deep nesting of lists and maps |
20,403 | public static List < Object [ ] > listKeyChains ( final Object source ) { List < Object [ ] > keyChainList = new LinkedList < > ( ) ; if ( source instanceof Map ) { Map sourceMap = ( Map ) source ; for ( Object key : sourceMap . keySet ( ) ) { keyChainList . addAll ( listKeyChains ( key , sourceMap . get ( key ) ) ) ; } } else if ( source instanceof List ) { List sourceList = ( List ) source ; for ( int i = 0 ; i < sourceList . size ( ) ; i ++ ) { keyChainList . addAll ( listKeyChains ( i , sourceList . get ( i ) ) ) ; } } else { return Collections . emptyList ( ) ; } return keyChainList ; } | Given a json document finds out absolute path to every leaf element |
20,404 | public static String toSimpleTraversrPath ( Object [ ] paths ) { StringBuilder pathBuilder = new StringBuilder ( ) ; for ( int i = 0 ; i < paths . length ; i ++ ) { Object path = paths [ i ] ; if ( path instanceof Integer ) { pathBuilder . append ( "[" ) . append ( ( ( Integer ) path ) . intValue ( ) ) . append ( "]" ) ; } else if ( path instanceof String ) { pathBuilder . append ( path . toString ( ) ) ; } else { throw new UnsupportedOperationException ( "Only Strings and Integers are supported as path element" ) ; } if ( ! ( i + 1 == paths . length ) ) { pathBuilder . append ( "." ) ; } } return pathBuilder . toString ( ) ; } | Converts a standard json path to human readable SimpleTraversr compatible path |
20,405 | @ SuppressWarnings ( "unchecked" ) public static Object compactJson ( Object source ) { if ( source == null ) return null ; if ( source instanceof List ) { for ( Object item : ( List ) source ) { if ( item instanceof List ) { compactJson ( item ) ; } else if ( item instanceof Map ) { compactJson ( item ) ; } } ( ( List ) source ) . removeAll ( Collections . singleton ( null ) ) ; } else if ( source instanceof Map ) { List keysToRemove = new LinkedList ( ) ; for ( Object key : ( ( Map ) source ) . keySet ( ) ) { Object value = ( ( Map ) source ) . get ( key ) ; if ( value instanceof List ) { if ( ( ( List ) value ) . size ( ) == 0 ) keysToRemove . add ( key ) ; else { compactJson ( value ) ; } } else if ( value instanceof Map ) { if ( ( ( Map ) value ) . size ( ) == 0 ) { keysToRemove . add ( key ) ; } else { compactJson ( value ) ; } } else if ( value == null ) { keysToRemove . add ( key ) ; } } for ( Object key : keysToRemove ) { ( ( Map ) source ) . remove ( key ) ; } } else { throw new UnsupportedOperationException ( "Only Map/String and List/Integer types are supported" ) ; } return source ; } | Given a fluffy json document it recursively removes all null elements to compact the json document |
20,406 | private void processChildren ( List < RemovrSpec > children , Object subInput ) { if ( subInput != null ) { if ( subInput instanceof List ) { List < Object > subList = ( List < Object > ) subInput ; Set < Integer > indiciesToRemove = new HashSet < > ( ) ; for ( RemovrSpec childSpec : children ) { indiciesToRemove . addAll ( childSpec . applyToList ( subList ) ) ; } List < Integer > uniqueIndiciesToRemove = new ArrayList < > ( indiciesToRemove ) ; Collections . sort ( uniqueIndiciesToRemove , new Comparator < Integer > ( ) { public int compare ( Integer o1 , Integer o2 ) { return o2 . compareTo ( o1 ) ; } } ) ; for ( int index : uniqueIndiciesToRemove ) { subList . remove ( index ) ; } } else if ( subInput instanceof Map ) { Map < String , Object > subInputMap = ( Map < String , Object > ) subInput ; List < String > keysToRemove = new LinkedList < > ( ) ; for ( RemovrSpec childSpec : children ) { keysToRemove . addAll ( childSpec . applyToMap ( subInputMap ) ) ; } subInputMap . keySet ( ) . removeAll ( keysToRemove ) ; } } } | Call our child nodes build up the set of keys or indices to actually remove and then remove them . |
20,407 | public static < T > T jsonTo ( String json , TypeReference < T > typeRef ) { return util . stringToType ( json , typeRef ) ; } | Use the stringToType method instead . |
20,408 | @ SuppressWarnings ( "unchecked" ) public static < T > T navigate ( Object source , Object ... paths ) throws NullPointerException , UnsupportedOperationException { Object destination = source ; for ( Object path : paths ) { if ( destination == null ) throw new NullPointerException ( "Navigation not possible on null object" ) ; if ( destination instanceof Map ) destination = ( ( Map ) destination ) . get ( path ) ; else if ( path instanceof Integer && destination instanceof List ) destination = ( ( List ) destination ) . get ( ( Integer ) path ) ; else throw new UnsupportedOperationException ( "Navigation supports only Map and List source types and non-null String and Integer path types" ) ; } return ( T ) destination ; } | Navigate inside a json object in quick and dirty way . |
20,409 | public static boolean isBlank ( CharSequence sourceSequence ) { int sequenceLength ; if ( sourceSequence == null || ( sequenceLength = sourceSequence . length ( ) ) == 0 ) { return true ; } for ( int i = 0 ; i < sequenceLength ; i ++ ) { if ( ( Character . isWhitespace ( sourceSequence . charAt ( i ) ) == false ) ) { return false ; } } return true ; } | Check if a sequence is blank |
20,410 | public void intializeSubCommand ( Subparsers subparsers ) { Subparser sortParser = subparsers . addParser ( "sort" ) . description ( "Jolt CLI Sort Tool. This tool will ingest one JSON input (from a file or standard input) and " + "perform the Jolt sort operation on it. The sort order is standard alphabetical ascending, with a " + "special case for \"~\" prefixed keys to be bumped to the top. The program will return an exit code " + "of 0 if the sort operation is performed successfully or a 1 if an error is encountered." ) . defaultHelp ( true ) ; sortParser . addArgument ( "input" ) . help ( "File path to the input JSON that the sort operation should be performed on. " + "This file should contain valid JSON. " + "If this argument is not specified then standard input will be used." ) . type ( Arguments . fileType ( ) . verifyExists ( ) . verifyIsFile ( ) . verifyCanRead ( ) ) . nargs ( "?" ) . setDefault ( ( File ) null ) . required ( false ) ; sortParser . addArgument ( "-u" ) . help ( "Turns off pretty print for the output. Output will be raw json with no formatting." ) . action ( Arguments . storeTrue ( ) ) ; } | Initialize the arg parser for the Sort sub command |
20,411 | protected Integer getNonNegativeIntegerFromLiteralPathElement ( ) { Integer pathElementInt = null ; try { pathElementInt = Integer . parseInt ( pathElement . getRawKey ( ) ) ; if ( pathElementInt < 0 ) { return null ; } } catch ( NumberFormatException nfe ) { } return pathElementInt ; } | Try to interpret the spec String value as a non - negative integer . |
20,412 | public void intializeSubCommand ( Subparsers subparsers ) { Subparser transformParser = subparsers . addParser ( "transform" ) . description ( "Jolt CLI Transform Tool. This tool will ingest a JSON spec file and an JSON input (from a file or " + "standard input) and run the transforms specified in the spec file on the input. The program will return an " + "exit code of 0 if the input is transformed successfully or a 1 if an error is encountered" ) . defaultHelp ( true ) ; File nullFile = null ; transformParser . addArgument ( "spec" ) . help ( "File path to Jolt Transform Spec to execute on the input. " + "This file should contain valid JSON." ) . type ( Arguments . fileType ( ) . verifyExists ( ) . verifyIsFile ( ) . verifyCanRead ( ) ) ; transformParser . addArgument ( "input" ) . help ( "File path to the input JSON for the Jolt Transform operation. " + "This file should contain valid JSON. " + "If this argument is not specified then standard input will be used." ) . type ( Arguments . fileType ( ) . verifyExists ( ) . verifyIsFile ( ) . verifyCanRead ( ) ) . nargs ( "?" ) . setDefault ( nullFile ) ; transformParser . addArgument ( "-u" ) . help ( "Turns off pretty print for the output. Output will be raw json with no formatting." ) . action ( Arguments . storeTrue ( ) ) ; } | Initialize the arg parser for the Transform sub command |
20,413 | public boolean process ( Namespace ns ) { Chainr chainr ; try { chainr = ChainrFactory . fromFile ( ( File ) ns . get ( "spec" ) ) ; } catch ( Exception e ) { JoltCliUtilities . printToStandardOut ( "Chainr failed to load spec file." , SUPPRESS_OUTPUT ) ; e . printStackTrace ( System . out ) ; return false ; } File file = ns . get ( "input" ) ; Object input = JoltCliUtilities . readJsonInput ( file , SUPPRESS_OUTPUT ) ; Object output ; try { output = chainr . transform ( input ) ; } catch ( Exception e ) { JoltCliUtilities . printToStandardOut ( "Chainr failed to run spec file." , SUPPRESS_OUTPUT ) ; return false ; } Boolean uglyPrint = ns . getBoolean ( "u" ) ; return JoltCliUtilities . printJsonObject ( output , uglyPrint , SUPPRESS_OUTPUT ) ; } | Process the transform sub command |
20,414 | public static Chainr fromClassPath ( String chainrSpecClassPath , ChainrInstantiator chainrInstantiator ) { Object chainrSpec = JsonUtils . classpathToObject ( chainrSpecClassPath ) ; return getChainr ( chainrInstantiator , chainrSpec ) ; } | Builds a Chainr instance using the spec described in the data via the class path that is passed in . |
20,415 | public static Chainr fromFileSystem ( String chainrSpecFilePath , ChainrInstantiator chainrInstantiator ) { Object chainrSpec = JsonUtils . filepathToObject ( chainrSpecFilePath ) ; return getChainr ( chainrInstantiator , chainrSpec ) ; } | Builds a Chainr instance using the spec described in the data via the file path that is passed in . |
20,416 | public static Chainr fromFile ( File chainrSpecFile , ChainrInstantiator chainrInstantiator ) { Object chainrSpec ; try { FileInputStream fileInputStream = new FileInputStream ( chainrSpecFile ) ; chainrSpec = JsonUtils . jsonToObject ( fileInputStream ) ; } catch ( Exception e ) { throw new RuntimeException ( "Unable to load chainr spec file " + chainrSpecFile . getAbsolutePath ( ) ) ; } return getChainr ( chainrInstantiator , chainrSpec ) ; } | Builds a Chainr instance using the spec described in the File that is passed in . |
20,417 | private static Chainr getChainr ( ChainrInstantiator chainrInstantiator , Object chainrSpec ) { Chainr chainr ; if ( chainrInstantiator == null ) { chainr = Chainr . fromSpec ( chainrSpec ) ; } else { chainr = Chainr . fromSpec ( chainrSpec , chainrInstantiator ) ; } return chainr ; } | The main engine in ChainrFactory for building a Chainr Instance . |
20,418 | public Object transform ( Object input ) { Map < String , Object > wrappedMap = new HashMap < > ( ) ; wrappedMap . put ( ROOT_KEY , input ) ; rootSpec . applyToMap ( wrappedMap ) ; return input ; } | Recursively removes data from the input JSON . |
20,419 | public Object transform ( Object input ) { Map < String , Object > output = new HashMap < > ( ) ; MatchedElement rootLpe = new MatchedElement ( ROOT_KEY ) ; WalkedPath walkedPath = new WalkedPath ( ) ; walkedPath . add ( input , rootLpe ) ; rootSpec . apply ( ROOT_KEY , Optional . of ( input ) , walkedPath , output , null ) ; return output . get ( ROOT_KEY ) ; } | Applies the Shiftr transform . |
20,420 | public static MatchablePathElement buildMatchablePathElement ( String rawJsonKey ) { PathElement pe = PathElementBuilder . parseSingleKeyLHS ( rawJsonKey ) ; if ( ! ( pe instanceof MatchablePathElement ) ) { throw new SpecException ( "Spec LHS key=" + rawJsonKey + " is not a valid LHS key." ) ; } return ( MatchablePathElement ) pe ; } | Create a path element and ensures it is a Matchable Path Element |
20,421 | public static PathElement parseSingleKeyLHS ( String origKey ) { String elementKey ; String keyToInspect ; if ( origKey . contains ( "\\" ) ) { keyToInspect = removeEscapedValues ( origKey ) ; elementKey = removeEscapeChars ( origKey ) ; } else { keyToInspect = origKey ; elementKey = origKey ; } if ( "@" . equals ( keyToInspect ) ) { return new AtPathElement ( elementKey ) ; } else if ( "*" . equals ( keyToInspect ) ) { return new StarAllPathElement ( elementKey ) ; } else if ( keyToInspect . startsWith ( "[" ) ) { if ( StringTools . countMatches ( keyToInspect , "[" ) != 1 || StringTools . countMatches ( keyToInspect , "]" ) != 1 ) { throw new SpecException ( "Invalid key:" + origKey + " has too many [] references." ) ; } return new ArrayPathElement ( elementKey ) ; } else if ( keyToInspect . startsWith ( "@" ) || keyToInspect . contains ( "@(" ) ) { return TransposePathElement . parse ( origKey ) ; } else if ( keyToInspect . contains ( "@" ) ) { throw new SpecException ( "Invalid key:" + origKey + " can not have an @ other than at the front." ) ; } else if ( keyToInspect . contains ( "$" ) ) { return new DollarPathElement ( elementKey ) ; } else if ( keyToInspect . contains ( "[" ) ) { if ( StringTools . countMatches ( keyToInspect , "[" ) != 1 || StringTools . countMatches ( keyToInspect , "]" ) != 1 ) { throw new SpecException ( "Invalid key:" + origKey + " has too many [] references." ) ; } return new ArrayPathElement ( elementKey ) ; } else if ( keyToInspect . contains ( "&" ) ) { if ( keyToInspect . contains ( "*" ) ) { throw new SpecException ( "Invalid key:" + origKey + ", Can't mix * with & ) " ) ; } return new AmpPathElement ( elementKey ) ; } else if ( keyToInspect . contains ( "*" ) ) { int numOfStars = StringTools . countMatches ( keyToInspect , "*" ) ; if ( numOfStars == 1 ) { return new StarSinglePathElement ( elementKey ) ; } else if ( numOfStars == 2 ) { return new StarDoublePathElement ( elementKey ) ; } else { return new StarRegexPathElement ( elementKey ) ; } } else if ( keyToInspect . contains ( "#" ) ) { return new HashPathElement ( elementKey ) ; } else { return new LiteralPathElement ( elementKey ) ; } } | Visible for Testing . |
20,422 | public static List < PathElement > parseDotNotationRHS ( String dotNotation ) { String fixedNotation = fixLeadingBracketSugar ( dotNotation ) ; List < String > pathStrs = parseDotNotation ( new LinkedList < String > ( ) , stringIterator ( fixedNotation ) , dotNotation ) ; return parseList ( pathStrs , dotNotation ) ; } | Parse the dotNotation of the RHS . |
20,423 | public void intializeSubCommand ( Subparsers subparsers ) { Subparser diffyParser = subparsers . addParser ( "diffy" ) . description ( "Jolt CLI Diffy Tool. This tool will ingest two JSON inputs (from files or standard input) and " + "perform the Jolt Diffy operation to detect any differences. The program will return an exit code of " + "0 if no differences are found or a 1 if a difference is found or an error is encountered." ) . defaultHelp ( true ) ; diffyParser . addArgument ( "filePath1" ) . help ( "File path to feed to Input #1 for the Diffy operation. " + "This file should contain valid JSON." ) . type ( Arguments . fileType ( ) . verifyExists ( ) . verifyIsFile ( ) . verifyCanRead ( ) ) ; diffyParser . addArgument ( "filePath2" ) . help ( "File path to feed to Input #2 for the Diffy operation. " + "This file should contain valid JSON. " + "If this argument is not specified then standard input will be used." ) . type ( Arguments . fileType ( ) . verifyExists ( ) . verifyIsFile ( ) . verifyCanRead ( ) ) . nargs ( "?" ) . setDefault ( ( File ) null ) ; diffyParser . addArgument ( "-s" ) . help ( "Diffy will suppress output and run silently." ) . action ( Arguments . storeTrue ( ) ) ; diffyParser . addArgument ( "-a" ) . help ( "Diffy will not consider array order when detecting differences" ) . action ( Arguments . storeTrue ( ) ) ; } | Initialize the arg parser for the Diffy sub command |
20,424 | public boolean process ( Namespace ns ) { boolean suppressOutput = ns . getBoolean ( "s" ) ; Object jsonObject1 = JoltCliUtilities . createJsonObjectFromFile ( ( File ) ns . get ( "filePath1" ) , suppressOutput ) ; File file = ns . get ( "filePath2" ) ; Object jsonObject2 = JoltCliUtilities . readJsonInput ( file , suppressOutput ) ; Diffy diffy ; if ( ns . getBoolean ( "a" ) ) { diffy = new ArrayOrderObliviousDiffy ( ) ; } else { diffy = new Diffy ( ) ; } Diffy . Result result = diffy . diff ( jsonObject1 , jsonObject2 ) ; if ( result . isEmpty ( ) ) { JoltCliUtilities . printToStandardOut ( "Diffy found no differences" , suppressOutput ) ; return true ; } else { try { JoltCliUtilities . printToStandardOut ( "Differences found. Input #1 contained this:\n" + JsonUtils . toPrettyJsonString ( result . expected ) + "\n" + "Input #2 contained this:\n" + JsonUtils . toPrettyJsonString ( result . actual ) , suppressOutput ) ; } catch ( Exception e ) { JoltCliUtilities . printToStandardOut ( "Differences found, but diffy encountered an error while writing the result." , suppressOutput ) ; } return false ; } } | Process the Diffy Subcommand |
20,425 | public static List < String > parseDotNotation ( List < String > pathStrings , Iterator < Character > iter , String dotNotationRef ) { if ( ! iter . hasNext ( ) ) { return pathStrings ; } boolean prevIsEscape = false ; boolean currIsEscape = false ; StringBuilder sb = new StringBuilder ( ) ; char c ; while ( iter . hasNext ( ) ) { c = iter . next ( ) ; currIsEscape = false ; if ( c == '\\' && ! prevIsEscape ) { currIsEscape = true ; } if ( prevIsEscape && c != '.' && c != '\\' ) { sb . append ( '\\' ) ; sb . append ( c ) ; } else if ( c == '@' ) { sb . append ( '@' ) ; sb . append ( parseAtPathElement ( iter , dotNotationRef ) ) ; boolean isPartOfArray = sb . indexOf ( "[" ) != - 1 && sb . indexOf ( "]" ) == - 1 ; if ( ! isPartOfArray ) { pathStrings . add ( sb . toString ( ) ) ; sb = new StringBuilder ( ) ; } } else if ( c == '.' ) { if ( prevIsEscape ) { sb . append ( '.' ) ; } else { if ( sb . length ( ) != 0 ) { pathStrings . add ( sb . toString ( ) ) ; } return parseDotNotation ( pathStrings , iter , dotNotationRef ) ; } } else if ( ! currIsEscape ) { sb . append ( c ) ; } prevIsEscape = currIsEscape ; } if ( sb . length ( ) != 0 ) { pathStrings . add ( sb . toString ( ) ) ; } return pathStrings ; } | Method that recursively parses a dotNotation String based on an iterator . |
20,426 | public static String removeEscapeChars ( String origKey ) { StringBuilder sb = new StringBuilder ( ) ; boolean prevWasEscape = false ; for ( char c : origKey . toCharArray ( ) ) { if ( '\\' == c ) { if ( prevWasEscape ) { sb . append ( c ) ; prevWasEscape = false ; } else { prevWasEscape = true ; } } else { sb . append ( c ) ; prevWasEscape = false ; } } return sb . toString ( ) ; } | given rating \\ pants - > rating \ pants escape the escape char |
20,427 | public void sendOutState ( State < Serializable , Serializable > state , String checkpointId , boolean spillState , String location ) { outputter . sendOutState ( state , checkpointId , spillState , location ) ; } | Flush the states |
20,428 | @ SuppressWarnings ( "deprecation" ) public StormTopology getRawTopology ( ) { StormTopology stormTopology = new StormTopology ( ) ; Map < String , SpoutSpec > spouts = new HashMap < > ( ) ; for ( TopologyAPI . Spout spout : this . delegate . getRawTopology ( ) . getSpoutsList ( ) ) { spouts . put ( spout . getComp ( ) . getName ( ) , new SpoutSpec ( spout ) ) ; } Map < String , Bolt > bolts = new HashMap < > ( ) ; for ( TopologyAPI . Bolt bolt : this . delegate . getRawTopology ( ) . getBoltsList ( ) ) { bolts . put ( bolt . getComp ( ) . getName ( ) , new Bolt ( bolt ) ) ; } stormTopology . set_spouts ( spouts ) ; stormTopology . set_bolts ( bolts ) ; return stormTopology ; } | Gets the Thrift object representing the topology . |
20,429 | public void handleInstanceExecutor ( ) { for ( InstanceExecutor executor : taskIdToInstanceExecutor . values ( ) ) { boolean isLocalSpout = spoutSets . contains ( executor . getComponentName ( ) ) ; int taskId = executor . getTaskId ( ) ; int items = executor . getStreamOutQueue ( ) . size ( ) ; for ( int i = 0 ; i < items ; i ++ ) { Message msg = executor . getStreamOutQueue ( ) . poll ( ) ; if ( msg instanceof HeronTuples . HeronTupleSet ) { HeronTuples . HeronTupleSet tupleSet = ( HeronTuples . HeronTupleSet ) msg ; if ( tupleSet . hasData ( ) ) { HeronTuples . HeronDataTupleSet d = tupleSet . getData ( ) ; TopologyAPI . StreamId streamId = d . getStream ( ) ; for ( HeronTuples . HeronDataTuple tuple : d . getTuplesList ( ) ) { List < Integer > outTasks = this . topologyManager . getListToSend ( streamId , tuple ) ; outTasks . addAll ( tuple . getDestTaskIdsList ( ) ) ; if ( outTasks . isEmpty ( ) ) { LOG . severe ( "Nobody to send the tuple to" ) ; } copyDataOutBound ( taskId , isLocalSpout , streamId , tuple , outTasks ) ; } } if ( tupleSet . hasControl ( ) ) { HeronTuples . HeronControlTupleSet c = tupleSet . getControl ( ) ; for ( HeronTuples . AckTuple ack : c . getAcksList ( ) ) { copyControlOutBound ( tupleSet . getSrcTaskId ( ) , ack , true ) ; } for ( HeronTuples . AckTuple fail : c . getFailsList ( ) ) { copyControlOutBound ( tupleSet . getSrcTaskId ( ) , fail , false ) ; } } } } } } | Handle the execution of the instance |
20,430 | protected boolean isSendTuplesToInstance ( List < Integer > taskIds ) { for ( Integer taskId : taskIds ) { if ( taskIdToInstanceExecutor . get ( taskId ) . getStreamInQueue ( ) . remainingCapacity ( ) <= 0 ) { return false ; } } return true ; } | Check whether target destination task has free room to receive more tuples |
20,431 | protected void copyDataOutBound ( int sourceTaskId , boolean isLocalSpout , TopologyAPI . StreamId streamId , HeronTuples . HeronDataTuple tuple , List < Integer > outTasks ) { boolean firstIteration = true ; boolean isAnchored = tuple . getRootsCount ( ) > 0 ; for ( Integer outTask : outTasks ) { long tupleKey = tupleCache . addDataTuple ( sourceTaskId , outTask , streamId , tuple , isAnchored ) ; if ( isAnchored ) { if ( isLocalSpout ) { if ( firstIteration ) { xorManager . create ( sourceTaskId , tuple . getRoots ( 0 ) . getKey ( ) , tupleKey ) ; } else { xorManager . anchor ( sourceTaskId , tuple . getRoots ( 0 ) . getKey ( ) , tupleKey ) ; } } else { for ( HeronTuples . RootId rootId : tuple . getRootsList ( ) ) { HeronTuples . AckTuple t = HeronTuples . AckTuple . newBuilder ( ) . addRoots ( rootId ) . setAckedtuple ( tupleKey ) . build ( ) ; tupleCache . addEmitTuple ( sourceTaskId , rootId . getTaskid ( ) , t ) ; } } } firstIteration = false ; } } | Process HeronDataTuple and insert it into cache |
20,432 | protected void copyControlOutBound ( int srcTaskId , HeronTuples . AckTuple control , boolean isSuccess ) { for ( HeronTuples . RootId rootId : control . getRootsList ( ) ) { HeronTuples . AckTuple t = HeronTuples . AckTuple . newBuilder ( ) . addRoots ( rootId ) . setAckedtuple ( control . getAckedtuple ( ) ) . build ( ) ; if ( isSuccess ) { tupleCache . addAckTuple ( srcTaskId , rootId . getTaskid ( ) , t ) ; } else { tupleCache . addFailTuple ( srcTaskId , rootId . getTaskid ( ) , t ) ; } } } | Process HeronAckTuple and insert it into cache |
20,433 | protected void processAcksAndFails ( int srcTaskId , int taskId , HeronTuples . HeronControlTupleSet controlTupleSet ) { HeronTuples . HeronTupleSet . Builder out = HeronTuples . HeronTupleSet . newBuilder ( ) ; out . setSrcTaskId ( srcTaskId ) ; for ( HeronTuples . AckTuple emitTuple : controlTupleSet . getEmitsList ( ) ) { for ( HeronTuples . RootId rootId : emitTuple . getRootsList ( ) ) { xorManager . anchor ( taskId , rootId . getKey ( ) , emitTuple . getAckedtuple ( ) ) ; } } for ( HeronTuples . AckTuple ackTuple : controlTupleSet . getAcksList ( ) ) { for ( HeronTuples . RootId rootId : ackTuple . getRootsList ( ) ) { if ( xorManager . anchor ( taskId , rootId . getKey ( ) , ackTuple . getAckedtuple ( ) ) ) { HeronTuples . AckTuple . Builder a = out . getControlBuilder ( ) . addAcksBuilder ( ) ; HeronTuples . RootId . Builder r = a . addRootsBuilder ( ) ; r . setKey ( rootId . getKey ( ) ) ; r . setTaskid ( taskId ) ; a . setAckedtuple ( 0 ) ; xorManager . remove ( taskId , rootId . getKey ( ) ) ; } } } for ( HeronTuples . AckTuple failTuple : controlTupleSet . getFailsList ( ) ) { for ( HeronTuples . RootId rootId : failTuple . getRootsList ( ) ) { if ( xorManager . remove ( taskId , rootId . getKey ( ) ) ) { HeronTuples . AckTuple . Builder f = out . getControlBuilder ( ) . addFailsBuilder ( ) ; HeronTuples . RootId . Builder r = f . addRootsBuilder ( ) ; r . setKey ( rootId . getKey ( ) ) ; r . setTaskid ( taskId ) ; f . setAckedtuple ( 0 ) ; } } } if ( out . hasControl ( ) ) { sendMessageToInstance ( taskId , out . build ( ) ) ; } } | Do the XOR control and send the ack tuples to instance if necessary |
20,434 | protected void drainCache ( ) { Map < Integer , List < HeronTuples . HeronTupleSet > > cache = tupleCache . getCache ( ) ; if ( ! isSendTuplesToInstance ( new LinkedList < Integer > ( cache . keySet ( ) ) ) ) { return ; } for ( Map . Entry < Integer , List < HeronTuples . HeronTupleSet > > entry : cache . entrySet ( ) ) { int taskId = entry . getKey ( ) ; for ( HeronTuples . HeronTupleSet message : entry . getValue ( ) ) { sendInBound ( taskId , message ) ; } } tupleCache . clear ( ) ; } | Drain the TupleCache if there are room in destination tasks |
20,435 | protected void sendInBound ( int taskId , HeronTuples . HeronTupleSet message ) { if ( message . hasData ( ) ) { sendMessageToInstance ( taskId , message ) ; } if ( message . hasControl ( ) ) { processAcksAndFails ( message . getSrcTaskId ( ) , taskId , message . getControl ( ) ) ; } } | Send Stream to instance |
20,436 | protected void sendMessageToInstance ( int taskId , HeronTuples . HeronTupleSet message ) { taskIdToInstanceExecutor . get ( taskId ) . getStreamInQueue ( ) . offer ( message ) ; } | Send one message to target task |
20,437 | public static Map < String , Resource > getComponentResourceMap ( Set < String > components , Map < String , ByteAmount > componentRamMap , Map < String , Double > componentCpuMap , Map < String , ByteAmount > componentDiskMap , Resource defaultInstanceResource ) { Map < String , Resource > componentResourceMap = new HashMap < > ( ) ; for ( String component : components ) { ByteAmount instanceRam = componentRamMap . getOrDefault ( component , defaultInstanceResource . getRam ( ) ) ; double instanceCpu = componentCpuMap . getOrDefault ( component , defaultInstanceResource . getCpu ( ) ) ; ByteAmount instanceDisk = componentDiskMap . getOrDefault ( component , defaultInstanceResource . getDisk ( ) ) ; componentResourceMap . put ( component , new Resource ( instanceCpu , instanceRam , instanceDisk ) ) ; } return componentResourceMap ; } | Compose the component resource map by reading from user configs or default |
20,438 | public static Map < String , Integer > getComponentsToScale ( Map < String , Integer > componentChanges , ScalingDirection scalingDirection ) { Map < String , Integer > componentsToScale = new HashMap < String , Integer > ( ) ; for ( String component : componentChanges . keySet ( ) ) { int parallelismChange = componentChanges . get ( component ) ; if ( scalingDirection . includes ( parallelismChange ) ) { componentsToScale . put ( component , parallelismChange ) ; } } return componentsToScale ; } | Identifies which components need to be scaled given specific scaling direction |
20,439 | public static Resource computeTotalResourceChange ( TopologyAPI . Topology topology , Map < String , Integer > componentChanges , Resource defaultInstanceResources , ScalingDirection scalingDirection ) { double cpu = 0 ; ByteAmount ram = ByteAmount . ZERO ; ByteAmount disk = ByteAmount . ZERO ; Map < String , ByteAmount > ramMap = TopologyUtils . getComponentRamMapConfig ( topology ) ; Map < String , Integer > componentsToScale = PackingUtils . getComponentsToScale ( componentChanges , scalingDirection ) ; for ( String component : componentsToScale . keySet ( ) ) { int parallelismChange = Math . abs ( componentChanges . get ( component ) ) ; cpu += parallelismChange * defaultInstanceResources . getCpu ( ) ; disk = disk . plus ( defaultInstanceResources . getDisk ( ) . multiply ( parallelismChange ) ) ; if ( ramMap . containsKey ( component ) ) { ram = ram . plus ( ramMap . get ( component ) . multiply ( parallelismChange ) ) ; } else { ram = ram . plus ( defaultInstanceResources . getRam ( ) . multiply ( parallelismChange ) ) ; } } return new Resource ( cpu , ram , disk ) ; } | Identifies the resources reclaimed by the components that will be scaled down |
20,440 | private static Options constructHelpOptions ( ) { Options options = new Options ( ) ; Option help = Option . builder ( "h" ) . desc ( "List all options and their description" ) . longOpt ( "help" ) . build ( ) ; options . addOption ( help ) ; return options ; } | construct command line help options |
20,441 | public void manageTopology ( ) throws TopologyRuntimeManagementException , TMasterException , PackingException { String topologyName = Context . topologyName ( config ) ; String statemgrClass = Context . stateManagerClass ( config ) ; IStateManager statemgr ; try { statemgr = ReflectionUtils . newInstance ( statemgrClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new TopologyRuntimeManagementException ( String . format ( "Failed to instantiate state manager class '%s'" , statemgrClass ) , e ) ; } try { statemgr . initialize ( config ) ; SchedulerStateManagerAdaptor adaptor = new SchedulerStateManagerAdaptor ( statemgr , 5000 ) ; boolean hasExecutionData = validateRuntimeManage ( adaptor , topologyName ) ; LOG . log ( Level . FINE , "Topology: {0} to be {1}ed" , new Object [ ] { topologyName , command } ) ; Config runtime = Config . newBuilder ( ) . put ( Key . TOPOLOGY_NAME , Context . topologyName ( config ) ) . put ( Key . SCHEDULER_STATE_MANAGER_ADAPTOR , adaptor ) . build ( ) ; ISchedulerClient schedulerClient = getSchedulerClient ( runtime ) ; callRuntimeManagerRunner ( runtime , schedulerClient , ! hasExecutionData ) ; } finally { SysUtils . closeIgnoringExceptions ( statemgr ) ; } } | Manager a topology 1 . Instantiate necessary resources 2 . Valid whether the runtime management is legal 3 . Complete the runtime management for a specific command |
20,442 | protected void validateExecutionState ( String topologyName , ExecutionEnvironment . ExecutionState executionState ) throws TopologyRuntimeManagementException { String stateCluster = executionState . getCluster ( ) ; String stateRole = executionState . getRole ( ) ; String stateEnv = executionState . getEnviron ( ) ; String configCluster = Context . cluster ( config ) ; String configRole = Context . role ( config ) ; String configEnv = Context . environ ( config ) ; if ( ! stateCluster . equals ( configCluster ) || ! stateRole . equals ( configRole ) || ! stateEnv . equals ( configEnv ) ) { String currentState = String . format ( "%s/%s/%s" , stateCluster , stateRole , stateEnv ) ; String configState = String . format ( "%s/%s/%s" , configCluster , configRole , configEnv ) ; throw new TopologyRuntimeManagementException ( String . format ( "cluster/role/environ does not match. Topology '%s' is running at %s, not %s" , topologyName , currentState , configState ) ) ; } } | Verify that the environment information in execution state matches the request |
20,443 | public int fieldIndex ( String field ) { Integer ret = mIndex . get ( field ) ; if ( ret == null ) { throw new IllegalArgumentException ( field + " does not exist" ) ; } return ret ; } | Returns the position of the specified field . |
20,444 | private Configuration getClientConf ( ) { return HeronClientConfiguration . CONF . set ( ClientConfiguration . ON_JOB_RUNNING , ReefClientSideHandlers . RunningJobHandler . class ) . set ( ClientConfiguration . ON_JOB_FAILED , ReefClientSideHandlers . FailedJobHandler . class ) . set ( ClientConfiguration . ON_RUNTIME_ERROR , ReefClientSideHandlers . RuntimeErrorHandler . class ) . set ( HeronClientConfiguration . TOPOLOGY_NAME , topologyName ) . build ( ) ; } | Builds and returns configuration needed by REEF client to launch topology as a REEF job and track it . |
20,445 | private void handleSelectedKeys ( ) { Set < SelectionKey > selectedKeys = selector . selectedKeys ( ) ; Iterator < SelectionKey > keyIterator = selectedKeys . iterator ( ) ; while ( keyIterator . hasNext ( ) ) { SelectionKey key = keyIterator . next ( ) ; keyIterator . remove ( ) ; ISelectHandler callback = ( ISelectHandler ) key . attachment ( ) ; if ( ! key . isValid ( ) ) { callback . handleError ( key . channel ( ) ) ; continue ; } if ( key . isValid ( ) && key . isWritable ( ) ) { callback . handleWrite ( key . channel ( ) ) ; } if ( key . isValid ( ) && key . isReadable ( ) ) { callback . handleRead ( key . channel ( ) ) ; } if ( key . isValid ( ) && key . isConnectable ( ) ) { callback . handleConnect ( key . channel ( ) ) ; } if ( key . isValid ( ) && key . isAcceptable ( ) ) { callback . handleAccept ( key . channel ( ) ) ; } } } | Handle the selected keys |
20,446 | public void registerRead ( SelectableChannel channel , ISelectHandler callback ) throws ClosedChannelException { assert channel . keyFor ( selector ) == null || ( channel . keyFor ( selector ) . interestOps ( ) & SelectionKey . OP_CONNECT ) == 0 ; addInterest ( channel , SelectionKey . OP_READ , callback ) ; } | Followings are the register unregister isRegister for different operations for the selector and channel |
20,447 | private void removeInterest ( SelectableChannel channel , int operation ) { SelectionKey key = channel . keyFor ( selector ) ; key . interestOps ( key . interestOps ( ) & ( ~ operation ) ) ; } | Remove one operation interest on a SelectableChannel . The SelectableChannel has to be registered with Selector ahead . Otherwise NullPointerExceptions would throw The key for SelectableChannel has to be valid . Otherwise InvalidValid Exception would throw . |
20,448 | private boolean isInterestRegistered ( SelectableChannel channel , int operation ) { SelectionKey key = channel . keyFor ( selector ) ; return key != null && ( key . interestOps ( ) & operation ) != 0 ; } | Check whether an operation interest was registered on a SelectableChannel There are two cases that interest is not registered 1 . The whole key does not exist ; no interests ever registered for this channel 2 . The key exists due to other interests registered but not the one we are adding If the key exists the key for SelectableChannel has to be valid . Otherwise InvalidValid Exception would throw . |
20,449 | public URI uploadPackage ( ) throws UploaderException { boolean fileExists = new File ( topologyPackageLocation ) . isFile ( ) ; if ( ! fileExists ) { throw new UploaderException ( String . format ( "Topology package does not exist at '%s'" , topologyPackageLocation ) ) ; } Path filePath = Paths . get ( destTopologyFile ) ; File parentDirectory = filePath . getParent ( ) . toFile ( ) ; assert parentDirectory != null ; if ( ! parentDirectory . exists ( ) ) { LOG . fine ( String . format ( "Working directory does not exist. Creating it now at %s" , parentDirectory . getPath ( ) ) ) ; if ( ! parentDirectory . mkdirs ( ) ) { throw new UploaderException ( String . format ( "Failed to create directory for topology package at %s" , parentDirectory . getPath ( ) ) ) ; } } fileExists = new File ( filePath . toString ( ) ) . isFile ( ) ; if ( fileExists ) { LOG . fine ( String . format ( "Target topology package already exists at '%s'. Overwriting it now" , filePath . toString ( ) ) ) ; } LOG . fine ( String . format ( "Copying topology package at '%s' to target working directory '%s'" , topologyPackageLocation , filePath . toString ( ) ) ) ; Path source = Paths . get ( topologyPackageLocation ) ; try { CopyOption [ ] options = new CopyOption [ ] { StandardCopyOption . REPLACE_EXISTING } ; Files . copy ( source , filePath , options ) ; } catch ( IOException e ) { throw new UploaderException ( String . format ( "Unable to copy topology file from '%s' to '%s'" , source , filePath ) , e ) ; } return getUri ( destTopologyFile ) ; } | Upload the topology package to the destined location in local file system |
20,450 | public boolean undo ( ) { if ( destTopologyFile != null ) { LOG . info ( "Clean uploaded jar" ) ; File file = new File ( destTopologyFile ) ; return file . delete ( ) ; } return true ; } | Remove the uploaded topology package for cleaning up |
20,451 | private void setPackingConfigs ( Config config ) { List < TopologyAPI . Config . KeyValue > topologyConfig = topology . getTopologyConfig ( ) . getKvsList ( ) ; this . defaultInstanceResources = new Resource ( Context . instanceCpu ( config ) , Context . instanceRam ( config ) , Context . instanceDisk ( config ) ) ; int paddingPercentage = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_PADDING_PERCENTAGE , PackingUtils . DEFAULT_CONTAINER_PADDING_PERCENTAGE ) ; ByteAmount ramPadding = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_RAM_PADDING , PackingUtils . DEFAULT_CONTAINER_RAM_PADDING ) ; double cpuPadding = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_CPU_PADDING , PackingUtils . DEFAULT_CONTAINER_CPU_PADDING ) ; Resource preliminaryPadding = new Resource ( cpuPadding , ramPadding , PackingUtils . DEFAULT_CONTAINER_DISK_PADDING ) ; this . maxNumInstancesPerContainer = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_MAX_NUM_INSTANCES , PackingUtils . DEFAULT_MAX_NUM_INSTANCES_PER_CONTAINER ) ; double containerDefaultCpu = this . defaultInstanceResources . getCpu ( ) * maxNumInstancesPerContainer ; ByteAmount containerDefaultRam = this . defaultInstanceResources . getRam ( ) . multiply ( maxNumInstancesPerContainer ) ; ByteAmount containerDefaultDisk = this . defaultInstanceResources . getDisk ( ) . multiply ( maxNumInstancesPerContainer ) ; double containerCpu = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_CPU_REQUESTED , containerDefaultCpu ) ; ByteAmount containerRam = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_RAM_REQUESTED , containerDefaultRam ) ; ByteAmount containerDisk = TopologyUtils . getConfigWithDefault ( topologyConfig , TOPOLOGY_CONTAINER_DISK_REQUESTED , containerDefaultDisk ) ; Resource containerResource = new Resource ( containerCpu , containerRam , containerDisk ) ; this . padding = PackingUtils . finalizePadding ( containerResource , preliminaryPadding , paddingPercentage ) ; this . maxContainerResources = containerResource ; this . componentResourceMap = PackingUtils . getComponentResourceMap ( TopologyUtils . getComponentParallelism ( topology ) . keySet ( ) , TopologyUtils . getComponentRamMapConfig ( topology ) , TopologyUtils . getComponentCpuMapConfig ( topology ) , TopologyUtils . getComponentDiskMapConfig ( topology ) , defaultInstanceResources ) ; } | Instatiate the packing algorithm parameters related to this topology . |
20,452 | public Set < String > getComponentStreams ( String componentId ) { if ( outputs . containsKey ( componentId ) ) { Set < String > streams = new HashSet < > ( ) ; List < TopologyAPI . OutputStream > olist = outputs . get ( componentId ) ; for ( TopologyAPI . OutputStream ostream : olist ) { streams . add ( ostream . getStream ( ) . getId ( ) ) ; } return streams ; } else { return null ; } } | Gets the set of streams declared for the specified component . |
20,453 | public Map < TopologyAPI . StreamId , TopologyAPI . Grouping > getSources ( String componentId ) { if ( inputs . containsKey ( componentId ) ) { Map < TopologyAPI . StreamId , TopologyAPI . Grouping > retVal = new HashMap < > ( ) ; for ( TopologyAPI . InputStream istream : inputs . get ( componentId ) ) { retVal . put ( istream . getStream ( ) , istream . getGtype ( ) ) ; } return retVal ; } else { return null ; } } | Gets the declared inputs to the specified component . |
20,454 | public void submitTopology ( ) throws TopologySubmissionException { Config primaryRuntime = Config . newBuilder ( ) . putAll ( LauncherUtils . getInstance ( ) . createPrimaryRuntime ( topology ) ) . build ( ) ; if ( Context . dryRun ( config ) ) { callLauncherRunner ( primaryRuntime ) ; return ; } String statemgrClass = Context . stateManagerClass ( config ) ; IStateManager statemgr ; String launcherClass = Context . launcherClass ( config ) ; ILauncher launcher ; String uploaderClass = Context . uploaderClass ( config ) ; IUploader uploader ; try { statemgr = ReflectionUtils . newInstance ( statemgrClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new TopologySubmissionException ( String . format ( "Failed to instantiate state manager class '%s'" , statemgrClass ) , e ) ; } try { launcher = ReflectionUtils . newInstance ( launcherClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new LauncherException ( String . format ( "Failed to instantiate launcher class '%s'" , launcherClass ) , e ) ; } try { uploader = ReflectionUtils . newInstance ( uploaderClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new UploaderException ( String . format ( "Failed to instantiate uploader class '%s'" , uploaderClass ) , e ) ; } try { statemgr . initialize ( config ) ; uploader . initialize ( config ) ; SchedulerStateManagerAdaptor adaptor = new SchedulerStateManagerAdaptor ( statemgr , 5000 ) ; validateSubmit ( adaptor , topology . getName ( ) ) ; LOG . log ( Level . FINE , "Topology {0} to be submitted" , topology . getName ( ) ) ; Config runtimeWithoutPackageURI = Config . newBuilder ( ) . putAll ( primaryRuntime ) . putAll ( LauncherUtils . getInstance ( ) . createAdaptorRuntime ( adaptor ) ) . put ( Key . LAUNCHER_CLASS_INSTANCE , launcher ) . build ( ) ; PackingPlan packingPlan = LauncherUtils . getInstance ( ) . createPackingPlan ( config , runtimeWithoutPackageURI ) ; runtimeWithoutPackageURI = updateNumContainersIfNeeded ( runtimeWithoutPackageURI , topology , packingPlan ) ; URI packageURI = uploadPackage ( uploader ) ; Config runtimeAll = Config . newBuilder ( ) . putAll ( runtimeWithoutPackageURI ) . put ( Key . TOPOLOGY_PACKAGE_URI , packageURI ) . build ( ) ; callLauncherRunner ( runtimeAll ) ; } catch ( LauncherException | PackingException e ) { uploader . undo ( ) ; throw e ; } finally { SysUtils . closeIgnoringExceptions ( uploader ) ; SysUtils . closeIgnoringExceptions ( launcher ) ; SysUtils . closeIgnoringExceptions ( statemgr ) ; } } | Submit a topology 1 . Instantiate necessary resources 2 . Valid whether it is legal to submit a topology 3 . Call LauncherRunner |
20,455 | void scheduleHeronWorkers ( PackingPlan topologyPacking ) throws ContainerAllocationException { this . componentRamMap = topologyPacking . getComponentRamDistribution ( ) ; scheduleHeronWorkers ( topologyPacking . getContainers ( ) ) ; } | Container allocation is asynchronous . Requests all containers in the input packing plan serially to ensure allocated resources match the required resources . |
20,456 | void scheduleHeronWorkers ( Set < ContainerPlan > containers ) throws ContainerAllocationException { for ( ContainerPlan containerPlan : containers ) { int id = containerPlan . getId ( ) ; if ( containerPlans . containsKey ( containerPlan . getId ( ) ) ) { throw new ContainerAllocationException ( "Received duplicate allocation request for " + id ) ; } Resource reqResource = containerPlan . getRequiredResource ( ) ; containerPlans . put ( id , containerPlan ) ; requestContainerForWorker ( id , new HeronWorker ( id , reqResource ) ) ; } } | Container allocation is asynchronous . Requests all containers in the input set serially to ensure allocated resources match the required resources . |
20,457 | public void killWorkers ( Set < ContainerPlan > containers ) { for ( ContainerPlan container : containers ) { LOG . log ( Level . INFO , "Find and kill container for worker {0}" , container . getId ( ) ) ; Optional < HeronWorker > worker = multiKeyWorkerMap . lookupByWorkerId ( container . getId ( ) ) ; if ( worker . isPresent ( ) ) { LOG . log ( Level . INFO , "Killing container {0} for worker {1}" , new Object [ ] { worker . get ( ) . evaluator . getId ( ) , worker . get ( ) . workerId } ) ; AllocatedEvaluator evaluator = multiKeyWorkerMap . detachEvaluatorAndRemove ( worker . get ( ) ) ; evaluator . close ( ) ; } else { LOG . log ( Level . WARNING , "Did not find worker for {0}" , container . getId ( ) ) ; } containerPlans . remove ( container . getId ( ) ) ; } } | Terminates any yarn containers associated with the given containers . |
20,458 | @ SuppressWarnings ( "unchecked" ) public void restoreState ( Map < String , Serializable > state ) { LOG . info ( "Restoring window manager state" ) ; if ( state . get ( EVICTION_STATE_KEY ) != null ) { ( ( EvictionPolicy ) evictionPolicy ) . restoreState ( state . get ( EVICTION_STATE_KEY ) ) ; } if ( state . get ( TRIGGER_STATE_KEY ) != null ) { ( ( TriggerPolicy ) triggerPolicy ) . restoreState ( state . get ( TRIGGER_STATE_KEY ) ) ; } this . queue . addAll ( ( Collection < Event < T > > ) state . get ( QUEUE ) ) ; this . expiredEvents . addAll ( ( List < T > ) state . get ( EXPIRED_EVENTS ) ) ; this . prevWindowEvents . addAll ( ( Set < Event < T > > ) state . get ( PRE_WINDOW_EVENTS ) ) ; this . eventsSinceLastExpiry . set ( ( int ) state . get ( EVENTS_SINCE_LAST_EXPIRY ) ) ; } | Restore state associated with the window manager |
20,459 | public Map < String , Serializable > getState ( ) { Map < String , Serializable > ret = new HashMap < > ( ) ; if ( evictionPolicy . getState ( ) != null ) { ret . put ( EVICTION_STATE_KEY , ( Serializable ) evictionPolicy . getState ( ) ) ; } if ( triggerPolicy . getState ( ) != null ) { ret . put ( TRIGGER_STATE_KEY , ( Serializable ) triggerPolicy . getState ( ) ) ; } ret . put ( QUEUE , ( Serializable ) this . queue ) ; ret . put ( EXPIRED_EVENTS , ( Serializable ) this . expiredEvents ) ; ret . put ( PRE_WINDOW_EVENTS , ( Serializable ) this . prevWindowEvents ) ; ret . put ( EVENTS_SINCE_LAST_EXPIRY , this . eventsSinceLastExpiry . get ( ) ) ; return ret ; } | Get the state of the window manager |
20,460 | public static String [ ] schedulerCommand ( Config config , Config runtime , List < Integer > freePorts ) { List < String > commands = new ArrayList < > ( ) ; String javaExecutable = String . format ( "%s/%s" , Context . clusterJavaHome ( config ) , "bin/java" ) ; commands . add ( javaExecutable ) ; commands . add ( "-cp" ) ; String completeSchedulerProcessClassPath = String . format ( "%s:%s:%s" , Context . schedulerClassPath ( config ) , Context . packingClassPath ( config ) , Context . stateManagerClassPath ( config ) ) ; commands . add ( completeSchedulerProcessClassPath ) ; commands . add ( "org.apache.heron.scheduler.SchedulerMain" ) ; String [ ] commandArgs = schedulerCommandArgs ( config , runtime , freePorts ) ; commands . addAll ( Arrays . asList ( commandArgs ) ) ; return commands . toArray ( new String [ 0 ] ) ; } | Utils method to construct the command to start heron - scheduler |
20,461 | public static String [ ] schedulerCommandArgs ( Config config , Config runtime , List < Integer > freePorts ) { if ( freePorts . size ( ) < PORTS_REQUIRED_FOR_SCHEDULER ) { throw new RuntimeException ( "Failed to find enough ports for executor" ) ; } for ( int port : freePorts ) { if ( port == - 1 ) { throw new RuntimeException ( "Failed to find available ports for executor" ) ; } } int httpPort = freePorts . get ( 0 ) ; List < String > commands = new ArrayList < > ( ) ; commands . add ( "--cluster" ) ; commands . add ( Context . cluster ( config ) ) ; commands . add ( "--role" ) ; commands . add ( Context . role ( config ) ) ; commands . add ( "--environment" ) ; commands . add ( Context . environ ( config ) ) ; commands . add ( "--topology_name" ) ; commands . add ( Context . topologyName ( config ) ) ; commands . add ( "--topology_bin" ) ; commands . add ( Context . topologyBinaryFile ( config ) ) ; commands . add ( "--http_port" ) ; commands . add ( Integer . toString ( httpPort ) ) ; return commands . toArray ( new String [ 0 ] ) ; } | Util method to get the arguments to the heron scheduler . |
20,462 | public static String [ ] executorCommandArgs ( Config config , Config runtime , Map < ExecutorPort , String > ports , String containerIndex ) { List < String > args = new ArrayList < > ( ) ; addExecutorTopologyArgs ( args , config , runtime ) ; addExecutorContainerArgs ( args , ports , containerIndex ) ; return args . toArray ( new String [ args . size ( ) ] ) ; } | Util method to get the arguments to the heron executor . This method creates the arguments without the container index which is the first argument to the executor |
20,463 | public static void addExecutorContainerArgs ( List < String > args , Map < ExecutorPort , String > ports , String containerIndex ) { String masterPort = ExecutorPort . getPort ( ExecutorPort . MASTER_PORT , ports ) ; String tmasterControllerPort = ExecutorPort . getPort ( ExecutorPort . TMASTER_CONTROLLER_PORT , ports ) ; String tmasterStatsPort = ExecutorPort . getPort ( ExecutorPort . TMASTER_STATS_PORT , ports ) ; String shellPort = ExecutorPort . getPort ( ExecutorPort . SHELL_PORT , ports ) ; String metricsmgrPort = ExecutorPort . getPort ( ExecutorPort . METRICS_MANAGER_PORT , ports ) ; String schedulerPort = ExecutorPort . getPort ( ExecutorPort . SCHEDULER_PORT , ports ) ; String metricsCacheMasterPort = ExecutorPort . getPort ( ExecutorPort . METRICS_CACHE_MASTER_PORT , ports ) ; String metricsCacheStatsPort = ExecutorPort . getPort ( ExecutorPort . METRICS_CACHE_STATS_PORT , ports ) ; String ckptmgrPort = ExecutorPort . getPort ( ExecutorPort . CHECKPOINT_MANAGER_PORT , ports ) ; String remoteDebuggerPorts = ExecutorPort . getPort ( ExecutorPort . JVM_REMOTE_DEBUGGER_PORTS , ports ) ; if ( containerIndex != null ) { args . add ( createCommandArg ( ExecutorFlag . Shard , containerIndex ) ) ; } args . add ( createCommandArg ( ExecutorFlag . MasterPort , masterPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . TMasterControllerPort , tmasterControllerPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . TMasterStatsPort , tmasterStatsPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . ShellPort , shellPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . MetricsManagerPort , metricsmgrPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . SchedulerPort , schedulerPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . MetricsCacheManagerMasterPort , metricsCacheMasterPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . MetricsCacheManagerStatsPort , metricsCacheStatsPort ) ) ; args . add ( createCommandArg ( ExecutorFlag . CheckpointManagerPort , ckptmgrPort ) ) ; if ( remoteDebuggerPorts != null ) { args . add ( createCommandArg ( ExecutorFlag . JvmRemoteDebuggerPorts , remoteDebuggerPorts ) ) ; } } | Util method to parse port map and container id and translate them into arguments to be used by executor |
20,464 | public static boolean setLibSchedulerLocation ( Config runtime , IScheduler scheduler , boolean isService ) { final String endpoint = "scheduler_as_lib_no_endpoint" ; return setSchedulerLocation ( runtime , endpoint , scheduler ) ; } | Set the location of scheduler for other processes to discover when invoke IScheduler as a library on client side |
20,465 | public static boolean setSchedulerLocation ( Config runtime , String schedulerEndpoint , IScheduler scheduler ) { Scheduler . SchedulerLocation . Builder builder = Scheduler . SchedulerLocation . newBuilder ( ) . setTopologyName ( Runtime . topologyName ( runtime ) ) . setHttpEndpoint ( schedulerEndpoint ) ; List < String > jobLinks = scheduler . getJobLinks ( ) ; if ( jobLinks != null ) { builder . addAllJobPageLink ( jobLinks ) ; } Scheduler . SchedulerLocation location = builder . build ( ) ; LOG . log ( Level . INFO , "Setting Scheduler locations: {0}" , location ) ; SchedulerStateManagerAdaptor statemgr = Runtime . schedulerStateManagerAdaptor ( runtime ) ; Boolean result = statemgr . setSchedulerLocation ( location , Runtime . topologyName ( runtime ) ) ; if ( result == null || ! result ) { LOG . severe ( "Failed to set Scheduler location" ) ; return false ; } return true ; } | Set the location of scheduler for other processes to discover |
20,466 | public static Scheduler . SchedulerResponse constructSchedulerResponse ( boolean isOK ) { Common . Status . Builder status = Common . Status . newBuilder ( ) ; if ( isOK ) { status . setStatus ( Common . StatusCode . OK ) ; } else { status . setStatus ( Common . StatusCode . NOTOK ) ; } return Scheduler . SchedulerResponse . newBuilder ( ) . setStatus ( status ) . build ( ) ; } | construct heron scheduler response basing on the given result |
20,467 | public static boolean createOrCleanDirectory ( String directory ) { if ( ! FileUtils . isDirectoryExists ( directory ) ) { LOG . fine ( "The directory does not exist; creating it." ) ; if ( ! FileUtils . createDirectory ( directory ) ) { LOG . severe ( "Failed to create directory: " + directory ) ; return false ; } } if ( ! FileUtils . cleanDir ( directory ) ) { LOG . severe ( "Failed to clean directory: " + directory ) ; return false ; } return true ; } | Create the directory if it does not exist otherwise clean the directory . |
20,468 | public static boolean curlAndExtractPackage ( String workingDirectory , String packageURI , String packageDestination , boolean isDeletePackage , boolean isVerbose ) { LOG . log ( Level . FINE , "Fetching package {0}" , packageURI ) ; LOG . fine ( "Fetched package can overwrite old one." ) ; if ( ! ShellUtils . curlPackage ( packageURI , packageDestination , isVerbose , false ) ) { LOG . severe ( "Failed to fetch package." ) ; return false ; } LOG . log ( Level . FINE , "Extracting the package {0}" , packageURI ) ; if ( ! ShellUtils . extractPackage ( packageDestination , workingDirectory , isVerbose , false ) ) { LOG . severe ( "Failed to extract package." ) ; return false ; } if ( isDeletePackage && ! FileUtils . deleteFile ( packageDestination ) ) { LOG . warning ( "Failed to delete the package: " + packageDestination ) ; } return true ; } | Curl a package extract it to working directory |
20,469 | public static void persistUpdatedPackingPlan ( String topologyName , PackingPlan updatedPackingPlan , SchedulerStateManagerAdaptor stateManager ) { LOG . log ( Level . INFO , "Updating scheduled-resource in packing plan: {0}" , topologyName ) ; PackingPlanProtoSerializer serializer = new PackingPlanProtoSerializer ( ) ; if ( ! stateManager . updatePackingPlan ( serializer . toProto ( updatedPackingPlan ) , topologyName ) ) { throw new RuntimeException ( String . format ( "Failed to update packing plan for topology %s" , topologyName ) ) ; } } | Replaces persisted packing plan in state manager . |
20,470 | public PackingPlan build ( ) { assertResourceSettings ( ) ; Set < PackingPlan . ContainerPlan > containerPlans = buildContainerPlans ( this . containers ) ; return new PackingPlan ( topologyId , containerPlans ) ; } | build container plan sets by summing up instance resources |
20,471 | private static Set < PackingPlan . ContainerPlan > buildContainerPlans ( Map < Integer , Container > containerInstances ) { Set < PackingPlan . ContainerPlan > containerPlans = new LinkedHashSet < > ( ) ; for ( Integer containerId : containerInstances . keySet ( ) ) { Container container = containerInstances . get ( containerId ) ; if ( container . getInstances ( ) . size ( ) == 0 ) { continue ; } Resource totalUsedResources = container . getTotalUsedResources ( ) ; Resource resource = new Resource ( Math . round ( totalUsedResources . getCpu ( ) ) , totalUsedResources . getRam ( ) , totalUsedResources . getDisk ( ) ) ; PackingPlan . ContainerPlan containerPlan = new PackingPlan . ContainerPlan ( containerId , container . getInstances ( ) , resource ) ; containerPlans . add ( containerPlan ) ; } return containerPlans ; } | Estimate the per instance and topology resources for the packing plan based on the ramMap instance defaults and paddingPercentage . |
20,472 | static Map < Integer , Container > getContainers ( PackingPlan currentPackingPlan , Resource maxContainerResource , Resource padding , Map < String , TreeSet < Integer > > componentIndexes , TreeSet < Integer > taskIds ) { Map < Integer , Container > containers = new HashMap < > ( ) ; Resource capacity = maxContainerResource ; for ( PackingPlan . ContainerPlan currentContainerPlan : currentPackingPlan . getContainers ( ) ) { Container container = new Container ( currentContainerPlan . getId ( ) , capacity , padding ) ; for ( PackingPlan . InstancePlan instancePlan : currentContainerPlan . getInstances ( ) ) { addToContainer ( container , instancePlan , componentIndexes , taskIds ) ; } containers . put ( currentContainerPlan . getId ( ) , container ) ; } return containers ; } | Generates the containers that correspond to the current packing plan along with their associated instances . |
20,473 | private static void addToContainer ( Container container , PackingPlan . InstancePlan instancePlan , Map < String , TreeSet < Integer > > componentIndexes , Set < Integer > taskIds ) { container . add ( instancePlan ) ; String componentName = instancePlan . getComponentName ( ) ; componentIndexes . computeIfAbsent ( componentName , k -> new TreeSet < Integer > ( ) ) ; componentIndexes . get ( componentName ) . add ( instancePlan . getComponentIndex ( ) ) ; taskIds . add ( instancePlan . getTaskId ( ) ) ; } | Add instancePlan to container and update the componentIndexes and taskIds indexes |
20,474 | public static < T > T randomFromList ( List < T > ls ) { return ls . get ( new Random ( ) . nextInt ( ls . size ( ) ) ) ; } | Selects a random item from a list . Used in many example source streamlets . |
20,475 | public static int getParallelism ( String [ ] args , int defaultParallelism ) { return ( args . length > 1 ) ? Integer . parseInt ( args [ 1 ] ) : defaultParallelism ; } | Fetches the topology s parallelism from the second - command - line argument or defers to a supplied default . |
20,476 | public static String intListAsString ( List < Integer > ls ) { return String . join ( ", " , ls . stream ( ) . map ( i -> i . toString ( ) ) . collect ( Collectors . toList ( ) ) ) ; } | Converts a list of integers into a comma - separated string . |
20,477 | public void sendRequest ( Message request , Object context , Message . Builder responseBuilder , Duration timeout ) { final REQID rid = REQID . generate ( ) ; contextMap . put ( rid , context ) ; responseMessageMap . put ( rid , responseBuilder ) ; if ( timeout . getSeconds ( ) > 0 ) { registerTimerEvent ( timeout , new Runnable ( ) { public void run ( ) { handleTimeout ( rid ) ; } } ) ; } OutgoingPacket opk = new OutgoingPacket ( rid , request ) ; socketChannelHelper . sendPacket ( opk ) ; } | A negative value of the timeout means no timeout . |
20,478 | public void sendRequest ( Message request , Message . Builder responseBuilder ) { sendRequest ( request , null , responseBuilder , Duration . ZERO ) ; } | Convenience method of the above method with no timeout or context |
20,479 | protected void handleTimeout ( REQID rid ) { if ( contextMap . containsKey ( rid ) ) { Object ctx = contextMap . get ( rid ) ; contextMap . remove ( rid ) ; responseMessageMap . remove ( rid ) ; onResponse ( StatusCode . TIMEOUT_ERROR , ctx , null ) ; } else { } } | Handle the timeout for a particular REQID |
20,480 | protected void startSchedulerDriver ( ) { driver . start ( ) ; LOG . info ( "Waiting for Mesos Framework get registered" ) ; long timeout = MesosContext . getHeronMesosFrameworkStagingTimeoutMs ( config ) ; if ( ! mesosFramework . waitForRegistered ( timeout , TimeUnit . MILLISECONDS ) ) { throw new RuntimeException ( "Failed to register with Mesos Master in time" ) ; } } | Start the scheduler driver and wait it to get registered |
20,481 | protected void joinSchedulerDriver ( long timeout , TimeUnit unit ) { ExecutorService service = Executors . newFixedThreadPool ( 1 ) ; final CountDownLatch closeLatch = new CountDownLatch ( 1 ) ; Runnable driverJoin = new Runnable ( ) { public void run ( ) { driver . join ( ) ; closeLatch . countDown ( ) ; } } ; service . submit ( driverJoin ) ; LOG . info ( "Waiting for Mesos Driver got stopped" ) ; try { if ( ! closeLatch . await ( timeout , unit ) ) { LOG . severe ( "Mesos Driver failed to stop in time." ) ; } else { LOG . info ( "Mesos Driver stopped." ) ; } } catch ( InterruptedException e ) { LOG . log ( Level . SEVERE , "Close latch thread is interrupted: " , e ) ; } service . shutdownNow ( ) ; } | Waits for the driver to be stopped or aborted |
20,482 | protected BaseContainer getBaseContainer ( Integer containerIndex , PackingPlan packing ) { BaseContainer container = new BaseContainer ( ) ; container . name = TaskUtils . getTaskNameForContainerIndex ( containerIndex ) ; container . runAsUser = Context . role ( config ) ; container . description = String . format ( "Container %d for topology %s" , containerIndex , Context . topologyName ( config ) ) ; fillResourcesRequirementForBaseContainer ( container , containerIndex , packing ) ; container . shell = true ; container . retries = Integer . MAX_VALUE ; container . dependencies = new ArrayList < > ( ) ; String topologyPath = Runtime . schedulerProperties ( runtime ) . getProperty ( Key . TOPOLOGY_PACKAGE_URI . value ( ) ) ; String heronCoreReleasePath = Context . corePackageUri ( config ) ; container . dependencies . add ( topologyPath ) ; container . dependencies . add ( heronCoreReleasePath ) ; return container ; } | Get BaseContainer info . |
20,483 | public Protos . TaskInfo constructMesosTaskInfo ( Config heronConfig , Config heronRuntime ) { String taskIdStr = this . taskId ; Protos . TaskID mesosTaskID = Protos . TaskID . newBuilder ( ) . setValue ( taskIdStr ) . build ( ) ; Protos . TaskInfo . Builder taskInfo = Protos . TaskInfo . newBuilder ( ) . setName ( baseContainer . name ) . setTaskId ( mesosTaskID ) ; Protos . Environment . Builder environment = Protos . Environment . newBuilder ( ) ; Set < String > builtinEnvNames = new HashSet < > ( ) ; for ( Protos . Environment . Variable variable : environment . getVariablesList ( ) ) { builtinEnvNames . add ( variable . getName ( ) ) ; } for ( BaseContainer . EnvironmentVariable ev : baseContainer . environmentVariables ) { environment . addVariables ( Protos . Environment . Variable . newBuilder ( ) . setName ( ev . name ) . setValue ( ev . value ) ) ; } taskInfo . addResources ( scalarResource ( TaskResources . CPUS_RESOURCE_NAME , baseContainer . cpu ) ) . addResources ( scalarResource ( TaskResources . MEM_RESOURCE_NAME , baseContainer . memInMB ) ) . addResources ( scalarResource ( TaskResources . DISK_RESOURCE_NAME , baseContainer . diskInMB ) ) . addResources ( rangeResource ( TaskResources . PORT_RESOURCE_NAME , this . freePorts . get ( 0 ) , this . freePorts . get ( this . freePorts . size ( ) - 1 ) ) ) . setSlaveId ( this . offer . getSlaveId ( ) ) ; int containerIndex = TaskUtils . getContainerIndexForTaskId ( taskIdStr ) ; String commandStr = executorCommand ( heronConfig , heronRuntime , containerIndex ) ; Protos . CommandInfo . Builder command = Protos . CommandInfo . newBuilder ( ) ; List < Protos . CommandInfo . URI > uriProtos = new ArrayList < > ( ) ; for ( String uri : baseContainer . dependencies ) { uriProtos . add ( Protos . CommandInfo . URI . newBuilder ( ) . setValue ( uri ) . setExtract ( true ) . build ( ) ) ; } command . setValue ( commandStr ) . setShell ( baseContainer . shell ) . setEnvironment ( environment ) . addAllUris ( uriProtos ) ; if ( ! baseContainer . runAsUser . isEmpty ( ) ) { command . setUser ( baseContainer . runAsUser ) ; } taskInfo . setCommand ( command ) ; return taskInfo . build ( ) ; } | Construct the Mesos TaskInfo in Protos to launch basing on the LaunchableTask |
20,484 | @ SuppressWarnings ( "unchecked" ) private void gatherOneMetric ( String metricName , Metrics . MetricPublisherPublishMessage . Builder builder ) { Object metricValue = metrics . get ( metricName ) . getValueAndReset ( ) ; if ( metricValue == null ) { return ; } if ( metricValue instanceof Map ) { for ( Map . Entry < Object , Object > entry : ( ( Map < Object , Object > ) metricValue ) . entrySet ( ) ) { if ( entry . getKey ( ) != null && entry . getValue ( ) != null ) { addDataToMetricPublisher ( builder , metricName + "/" + entry . getKey ( ) . toString ( ) , entry . getValue ( ) ) ; } } } else if ( metricValue instanceof Collection ) { int index = 0 ; for ( Object value : ( Collection ) metricValue ) { addDataToMetricPublisher ( builder , metricName + "/" + ( index ++ ) , value ) ; } } else { addDataToMetricPublisher ( builder , metricName , metricValue ) ; } } | and add it to MetricPublisherPublishMessage builder given . |
20,485 | public static MetricRequest fromProtobuf ( TopologyMaster . MetricRequest request ) { String componentName = request . getComponentName ( ) ; Map < String , Set < String > > componentNameInstanceId = new HashMap < > ( ) ; if ( request . getInstanceIdCount ( ) == 0 ) { componentNameInstanceId . put ( componentName , null ) ; } else { Set < String > instances = new HashSet < > ( ) ; componentNameInstanceId . put ( componentName , instances ) ; instances . addAll ( request . getInstanceIdList ( ) ) ; } Set < String > metricNames = new HashSet < > ( ) ; if ( request . getMetricCount ( ) > 0 ) { metricNames . addAll ( request . getMetricList ( ) ) ; } long startTime = 0 ; long endTime = Long . MAX_VALUE ; if ( request . hasInterval ( ) ) { endTime = System . currentTimeMillis ( ) ; long interval = request . getInterval ( ) ; if ( interval <= 0 ) { startTime = 0 ; } else { startTime = endTime - interval * 1000 ; } } else { startTime = request . getExplicitInterval ( ) . getStart ( ) * 1000 ; endTime = request . getExplicitInterval ( ) . getEnd ( ) * 1000 ; } MetricGranularity aggregationGranularity = AGGREGATE_ALL_METRICS ; if ( request . hasMinutely ( ) && request . getMinutely ( ) ) { aggregationGranularity = AGGREGATE_BY_BUCKET ; } return new MetricRequest ( componentNameInstanceId , metricNames , startTime , endTime , aggregationGranularity ) ; } | compatible with org . apache . heron . proto . tmaster . TopologyMaster . MetricRequest |
20,486 | public static TopologyMaster . MetricResponse toProtobuf ( MetricResponse response , MetricRequest request ) { TopologyMaster . MetricResponse . Builder builder = TopologyMaster . MetricResponse . newBuilder ( ) ; builder . setInterval ( ( request . getEndTime ( ) - request . getStartTime ( ) ) / 1000 ) ; builder . setStatus ( Common . Status . newBuilder ( ) . setStatus ( Common . StatusCode . OK ) ) ; Map < String , Map < String , List < MetricTimeRangeValue > > > aggregation = new HashMap < > ( ) ; for ( MetricDatum datum : response . getMetricList ( ) ) { String instanceId = datum . getInstanceId ( ) ; String metricName = datum . getMetricName ( ) ; List < MetricTimeRangeValue > metricValue = datum . getMetricValue ( ) ; if ( ! aggregation . containsKey ( instanceId ) ) { aggregation . put ( instanceId , new HashMap < String , List < MetricTimeRangeValue > > ( ) ) ; } if ( ! aggregation . get ( instanceId ) . containsKey ( metricName ) ) { aggregation . get ( instanceId ) . put ( metricName , new ArrayList < MetricTimeRangeValue > ( ) ) ; } aggregation . get ( instanceId ) . get ( metricName ) . addAll ( metricValue ) ; } for ( String instanceId : aggregation . keySet ( ) ) { TopologyMaster . MetricResponse . TaskMetric . Builder taskMetricBuilder = TopologyMaster . MetricResponse . TaskMetric . newBuilder ( ) ; taskMetricBuilder . setInstanceId ( instanceId ) ; for ( String metricName : aggregation . get ( instanceId ) . keySet ( ) ) { TopologyMaster . MetricResponse . IndividualMetric . Builder individualMetricBuilder = TopologyMaster . MetricResponse . IndividualMetric . newBuilder ( ) ; individualMetricBuilder . setName ( metricName ) ; List < MetricTimeRangeValue > list = aggregation . get ( instanceId ) . get ( metricName ) ; if ( list . size ( ) == 1 ) { individualMetricBuilder . setValue ( list . get ( 0 ) . getValue ( ) ) ; } else { for ( MetricTimeRangeValue v : list ) { TopologyMaster . MetricResponse . IndividualMetric . IntervalValue . Builder intervalValueBuilder = TopologyMaster . MetricResponse . IndividualMetric . IntervalValue . newBuilder ( ) ; intervalValueBuilder . setValue ( v . getValue ( ) ) ; intervalValueBuilder . setInterval ( TopologyMaster . MetricInterval . newBuilder ( ) . setStart ( v . getStartTime ( ) ) . setEnd ( v . getEndTime ( ) ) ) ; individualMetricBuilder . addIntervalValues ( intervalValueBuilder ) ; } } taskMetricBuilder . addMetric ( individualMetricBuilder ) ; } builder . addMetric ( taskMetricBuilder ) ; } return builder . build ( ) ; } | compatible with org . apache . heron . proto . tmaster . TopologyMaster . MetricResponse |
20,487 | public static ExceptionRequest fromProtobuf ( TopologyMaster . ExceptionLogRequest request ) { String componentName = request . getComponentName ( ) ; Map < String , Set < String > > componentNameInstanceId = new HashMap < > ( ) ; Set < String > instances = null ; if ( request . getInstancesCount ( ) > 0 ) { instances = new HashSet < > ( ) ; instances . addAll ( request . getInstancesList ( ) ) ; } componentNameInstanceId . put ( componentName , instances ) ; return new ExceptionRequest ( componentNameInstanceId ) ; } | compatible with org . apache . heron . proto . tmaster . TopologyMaster . ExceptionLogRequest |
20,488 | public static TopologyMaster . ExceptionLogResponse toProtobuf ( ExceptionResponse response ) { TopologyMaster . ExceptionLogResponse . Builder builder = TopologyMaster . ExceptionLogResponse . newBuilder ( ) ; builder . setStatus ( Common . Status . newBuilder ( ) . setStatus ( Common . StatusCode . OK ) ) ; for ( ExceptionDatum e : response . getExceptionDatapointList ( ) ) { TopologyMaster . TmasterExceptionLog . Builder exceptionBuilder = TopologyMaster . TmasterExceptionLog . newBuilder ( ) ; exceptionBuilder . setComponentName ( e . getComponentName ( ) ) ; exceptionBuilder . setHostname ( e . getHostname ( ) ) ; exceptionBuilder . setInstanceId ( e . getInstanceId ( ) ) ; exceptionBuilder . setStacktrace ( e . getStackTrace ( ) ) ; exceptionBuilder . setLasttime ( e . getLastTime ( ) ) ; exceptionBuilder . setFirsttime ( e . getFirstTime ( ) ) ; exceptionBuilder . setCount ( e . getCount ( ) ) ; exceptionBuilder . setLogging ( e . getLogging ( ) ) ; builder . addExceptions ( exceptionBuilder ) ; } return builder . build ( ) ; } | compatible with org . apache . heron . proto . tmaster . TopologyMaster . ExceptionLogResponse |
20,489 | public Map < String , List < Integer > > getComponentToTaskIds ( ) { if ( this . componentToTaskIds == null ) { this . componentToTaskIds = new HashMap < > ( ) ; for ( PhysicalPlans . Instance instance : this . getPhysicalPlan ( ) . getInstancesList ( ) ) { int taskId = instance . getInfo ( ) . getTaskId ( ) ; String componentName = instance . getInfo ( ) . getComponentName ( ) ; if ( ! this . componentToTaskIds . containsKey ( componentName ) ) { this . componentToTaskIds . put ( componentName , new ArrayList < Integer > ( ) ) ; } this . componentToTaskIds . get ( componentName ) . add ( taskId ) ; } } return this . componentToTaskIds ; } | Get the map < ; componentId - > ; taskIds> ; from the Physical Plan |
20,490 | public Duration extractTopologyTimeout ( ) { for ( TopologyAPI . Config . KeyValue keyValue : this . getTopology ( ) . getTopologyConfig ( ) . getKvsList ( ) ) { if ( keyValue . getKey ( ) . equals ( "topology.message.timeout.secs" ) ) { return TypeUtils . getDuration ( keyValue . getValue ( ) , ChronoUnit . SECONDS ) ; } } throw new IllegalArgumentException ( "topology.message.timeout.secs does not exist" ) ; } | Extract the config value topology . message . timeout . secs for given topology protobuf |
20,491 | public HashMap < TopologyAPI . StreamId , List < Grouping > > getStreamConsumers ( ) { if ( this . streamConsumers == null ) { this . streamConsumers = new HashMap < > ( ) ; Map < TopologyAPI . StreamId , TopologyAPI . StreamSchema > streamToSchema = new HashMap < > ( ) ; for ( TopologyAPI . Spout spout : this . getTopology ( ) . getSpoutsList ( ) ) { for ( TopologyAPI . OutputStream outputStream : spout . getOutputsList ( ) ) { streamToSchema . put ( outputStream . getStream ( ) , outputStream . getSchema ( ) ) ; } } for ( TopologyAPI . Bolt bolt : this . getTopology ( ) . getBoltsList ( ) ) { for ( TopologyAPI . OutputStream outputStream : bolt . getOutputsList ( ) ) { streamToSchema . put ( outputStream . getStream ( ) , outputStream . getSchema ( ) ) ; } } for ( TopologyAPI . Bolt bolt : this . getTopology ( ) . getBoltsList ( ) ) { for ( TopologyAPI . InputStream inputStream : bolt . getInputsList ( ) ) { TopologyAPI . StreamSchema schema = streamToSchema . get ( inputStream . getStream ( ) ) ; String componentName = bolt . getComp ( ) . getName ( ) ; List < Integer > taskIds = this . getComponentToTaskIds ( ) . get ( componentName ) ; if ( ! this . streamConsumers . containsKey ( inputStream . getStream ( ) ) ) { this . streamConsumers . put ( inputStream . getStream ( ) , new ArrayList < > ( ) ) ; } this . streamConsumers . get ( inputStream . getStream ( ) ) . add ( Grouping . create ( inputStream . getGtype ( ) , inputStream , schema , taskIds ) ) ; } } } return this . streamConsumers ; } | Get the stream consumers map that was generated from the topology |
20,492 | public void stop ( ) { if ( acceptChannel == null || ! acceptChannel . isOpen ( ) ) { LOG . info ( "Fail to stop server; not yet open." ) ; return ; } for ( Map . Entry < SocketChannel , SocketChannelHelper > connections : activeConnections . entrySet ( ) ) { SocketChannel channel = connections . getKey ( ) ; SocketAddress channelAddress = channel . socket ( ) . getRemoteSocketAddress ( ) ; LOG . info ( "Closing connected channel from client: " + channelAddress ) ; LOG . info ( "Removing all interest on channel: " + channelAddress ) ; nioLooper . removeAllInterest ( channel ) ; onClose ( channel ) ; connections . getValue ( ) . clear ( ) ; } activeConnections . clear ( ) ; requestMap . clear ( ) ; messageMap . clear ( ) ; try { acceptChannel . close ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to close server" , e ) ; } } | Stop the HeronServer and clean relative staff |
20,493 | public static void incr ( String counterName ) { org . apache . heron . api . metric . GlobalMetrics . incr ( counterName ) ; } | Not thread safe increment of counterName . Counter doesn t exist unless incremented once |
20,494 | private void activateTopologyHandler ( String topologyName ) throws TMasterException { assert ! potentialStaleExecutionData ; NetworkUtils . TunnelConfig tunnelConfig = NetworkUtils . TunnelConfig . build ( config , NetworkUtils . HeronSystem . SCHEDULER ) ; TMasterUtils . transitionTopologyState ( topologyName , TMasterUtils . TMasterCommand . ACTIVATE , Runtime . schedulerStateManagerAdaptor ( runtime ) , TopologyAPI . TopologyState . PAUSED , TopologyAPI . TopologyState . RUNNING , tunnelConfig ) ; } | Handler to activate a topology |
20,495 | protected void cleanState ( String topologyName , SchedulerStateManagerAdaptor statemgr ) throws TopologyRuntimeManagementException { LOG . fine ( "Cleaning up topology state" ) ; Boolean result ; result = statemgr . deleteTMasterLocation ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear TMaster location. Check whether TMaster set it correctly." ) ; } result = statemgr . deleteMetricsCacheLocation ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear MetricsCache location. Check whether MetricsCache set it correctly." ) ; } result = statemgr . deletePackingPlan ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear packing plan. Check whether Launcher set it correctly." ) ; } result = statemgr . deletePhysicalPlan ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear physical plan. Check whether TMaster set it correctly." ) ; } result = statemgr . deleteSchedulerLocation ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear scheduler location. Check whether Scheduler set it correctly." ) ; } result = statemgr . deleteLocks ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to delete locks. It's possible that the topology never created any." ) ; } result = statemgr . deleteExecutionState ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear execution state" ) ; } result = statemgr . deleteTopology ( topologyName ) ; if ( result == null || ! result ) { throw new TopologyRuntimeManagementException ( "Failed to clear topology definition" ) ; } LOG . fine ( "Cleaned up topology state" ) ; } | Clean all states of a heron topology 1 . Topology def and ExecutionState are required to exist to delete 2 . TMasterLocation SchedulerLocation and PhysicalPlan may not exist to delete |
20,496 | public PackingPlan pack ( ) { PackingPlanBuilder planBuilder = newPackingPlanBuilder ( null ) ; try { planBuilder = getFFDAllocation ( planBuilder ) ; } catch ( ConstraintViolationException e ) { throw new PackingException ( "Could not allocate all instances to packing plan" , e ) ; } return planBuilder . build ( ) ; } | Get a packing plan using First Fit Decreasing |
20,497 | private void assignInstancesToContainers ( PackingPlanBuilder planBuilder , Map < String , Integer > parallelismMap ) throws ConstraintViolationException { List < ResourceRequirement > resourceRequirements = getSortedInstances ( parallelismMap . keySet ( ) ) ; for ( ResourceRequirement resourceRequirement : resourceRequirements ) { String componentName = resourceRequirement . getComponentName ( ) ; int numInstance = parallelismMap . get ( componentName ) ; for ( int j = 0 ; j < numInstance ; j ++ ) { placeFFDInstance ( planBuilder , componentName ) ; } } } | Assigns instances to containers |
20,498 | private void placeFFDInstance ( PackingPlanBuilder planBuilder , String componentName ) throws ConstraintViolationException { if ( this . numContainers == 0 ) { planBuilder . updateNumContainers ( ++ numContainers ) ; } try { planBuilder . addInstance ( new ContainerIdScorer ( ) , componentName ) ; } catch ( ResourceExceededException e ) { planBuilder . updateNumContainers ( ++ numContainers ) ; planBuilder . addInstance ( numContainers , componentName ) ; } } | Assign a particular instance to an existing container or to a new container |
20,499 | public Map < Integer , List < HeronTuples . HeronTupleSet > > getCache ( ) { Map < Integer , List < HeronTuples . HeronTupleSet > > res = new HashMap < > ( ) ; for ( Map . Entry < Integer , TupleList > entry : cache . entrySet ( ) ) { res . put ( entry . getKey ( ) , entry . getValue ( ) . getTuplesList ( ) ) ; } return res ; } | Modification on Map would not cahnge values in cache |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.