idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
20,600
public Boolean updatePackingPlan ( PackingPlans . PackingPlan packingPlan , String topologyName ) { if ( getPackingPlan ( topologyName ) != null ) { deletePackingPlan ( topologyName ) ; } return setPackingPlan ( packingPlan , topologyName ) ; }
Update the packing plan for the given topology . If the packing plan doesn t exist create it . If it does update it .
20,601
public TopologyMaster . TMasterLocation getTMasterLocation ( String topologyName ) { return awaitResult ( delegate . getTMasterLocation ( null , topologyName ) ) ; }
Get the tmaster location for the given topology
20,602
public Scheduler . SchedulerLocation getSchedulerLocation ( String topologyName ) { return awaitResult ( delegate . getSchedulerLocation ( null , topologyName ) ) ; }
Get the scheduler location for the given topology
20,603
public TopologyMaster . MetricsCacheLocation getMetricsCacheLocation ( String topologyName ) { return awaitResult ( delegate . getMetricsCacheLocation ( null , topologyName ) ) ; }
Get the metricscache location for the given topology
20,604
public TopologyAPI . Topology getTopology ( String topologyName ) { return awaitResult ( delegate . getTopology ( null , topologyName ) ) ; }
Get the topology definition for the given topology
20,605
public ExecutionEnvironment . ExecutionState getExecutionState ( String topologyName ) { return awaitResult ( delegate . getExecutionState ( null , topologyName ) ) ; }
Get the execution state for the given topology
20,606
public PhysicalPlans . PhysicalPlan getPhysicalPlan ( String topologyName ) { return awaitResult ( delegate . getPhysicalPlan ( null , topologyName ) ) ; }
Get the physical plan for the given topology
20,607
public PackingPlans . PackingPlan getPackingPlan ( String topologyName ) { return awaitResult ( delegate . getPackingPlan ( null , topologyName ) ) ; }
Get the packing plan for the given topology
20,608
public void sendOutState ( State < Serializable , Serializable > state , String checkpointId , boolean spillState , String location ) { lock . lock ( ) ; try { flushRemaining ( ) ; byte [ ] serializedState = serializer . serialize ( state ) ; CheckpointManager . InstanceStateCheckpoint . Builder instanceStateBuilder = CheckpointManager . InstanceStateCheckpoint . newBuilder ( ) ; instanceStateBuilder . setCheckpointId ( checkpointId ) ; if ( spillState ) { FileUtils . cleanDir ( location ) ; String stateLocation = location + checkpointId + "-" + UUID . randomUUID ( ) ; if ( ! FileUtils . writeToFile ( stateLocation , serializedState , true ) ) { throw new RuntimeException ( "failed to spill state. Bailing out..." ) ; } instanceStateBuilder . setStateLocation ( stateLocation ) ; } else { instanceStateBuilder . setState ( ByteString . copyFrom ( serializedState ) ) ; } CheckpointManager . StoreInstanceStateCheckpoint storeRequest = CheckpointManager . StoreInstanceStateCheckpoint . newBuilder ( ) . setState ( instanceStateBuilder . build ( ) ) . build ( ) ; outQueue . offer ( storeRequest ) ; } finally { lock . unlock ( ) ; } }
Send out the instance s state with corresponding checkpointId . If spillState is True the actual state is spill to disk and only the state location is sent out .
20,609
public void clear ( ) { lock . lock ( ) ; try { currentControlTuple = null ; currentDataTuple = null ; outQueue . clear ( ) ; } finally { lock . unlock ( ) ; } }
Clean the internal state of OutgoingTupleCollection
20,610
public Set < PackingPlan . ContainerPlan > addContainers ( Set < PackingPlan . ContainerPlan > containersToAdd ) { controller . addContainers ( containersToAdd ) ; return containersToAdd ; }
Add containers for a scale - up event from an update command
20,611
public boolean anchor ( int taskId , long key , long value ) { return spoutTasksToRotatingMap . get ( taskId ) . anchor ( key , value ) ; }
Else return false
20,612
protected void rotate ( ) { for ( RotatingMap map : spoutTasksToRotatingMap . values ( ) ) { map . rotate ( ) ; } Runnable r = new Runnable ( ) { public void run ( ) { rotate ( ) ; } } ; looper . registerTimerEvent ( rotateInterval , r ) ; }
Protected method for unit test
20,613
public Collection < Symptom > detect ( Collection < Measurement > measurements ) { publishingMetrics . executeDetectorIncr ( BACK_PRESSURE_DETECTOR ) ; Collection < Symptom > result = new ArrayList < > ( ) ; Instant now = context . checkpoint ( ) ; MeasurementsTable bpMetrics = MeasurementsTable . of ( measurements ) . type ( METRIC_BACK_PRESSURE . text ( ) ) ; for ( String component : bpMetrics . uniqueComponents ( ) ) { double compBackPressure = bpMetrics . component ( component ) . sum ( ) ; if ( compBackPressure > noiseFilterMillis ) { LOG . info ( String . format ( "Detected component back-pressure for %s, total back pressure is %f" , component , compBackPressure ) ) ; List < String > addresses = Collections . singletonList ( component ) ; result . add ( new Symptom ( SYMPTOM_COMP_BACK_PRESSURE . text ( ) , now , addresses ) ) ; } } for ( String instance : bpMetrics . uniqueInstances ( ) ) { double totalBP = bpMetrics . instance ( instance ) . sum ( ) ; if ( totalBP > noiseFilterMillis ) { LOG . info ( String . format ( "Detected instance back-pressure for %s, total back pressure is %f" , instance , totalBP ) ) ; List < String > addresses = Collections . singletonList ( instance ) ; result . add ( new Symptom ( SYMPTOM_INSTANCE_BACK_PRESSURE . text ( ) , now , addresses ) ) ; } } return result ; }
Detects all components initiating backpressure above the configured limit . Normally there will be only one component
20,614
protected AuroraController getController ( ) throws ClassNotFoundException , InstantiationException , IllegalAccessException { Boolean cliController = config . getBooleanValue ( Key . AURORA_CONTROLLER_CLASS ) ; Config localConfig = Config . toLocalMode ( this . config ) ; if ( cliController ) { return new AuroraCLIController ( Runtime . topologyName ( runtime ) , Context . cluster ( localConfig ) , Context . role ( localConfig ) , Context . environ ( localConfig ) , AuroraContext . getHeronAuroraPath ( localConfig ) , Context . verbose ( localConfig ) ) ; } else { return new AuroraHeronShellController ( Runtime . topologyName ( runtime ) , Context . cluster ( localConfig ) , Context . role ( localConfig ) , Context . environ ( localConfig ) , AuroraContext . getHeronAuroraPath ( localConfig ) , Context . verbose ( localConfig ) , localConfig ) ; } }
Get an AuroraController based on the config and runtime
20,615
protected void createDir ( String dir ) throws StatefulStorageException { Path path = new Path ( dir ) ; try { fileSystem . mkdirs ( path ) ; if ( ! fileSystem . exists ( path ) ) { throw new StatefulStorageException ( "Failed to create dir: " + dir ) ; } } catch ( IOException e ) { throw new StatefulStorageException ( "Failed to create dir: " + dir , e ) ; } }
Creates the directory if it does not exist .
20,616
public Iterable < MetricsInfo > filter ( Iterable < MetricsInfo > metricsInfos ) { List < MetricsInfo > metricsFiltered = new ArrayList < MetricsInfo > ( ) ; for ( MetricsInfo metricsInfo : metricsInfos ) { if ( contains ( metricsInfo . getName ( ) ) ) { metricsFiltered . add ( metricsInfo ) ; } } return metricsFiltered ; }
Return an immutable view of filtered metrics
20,617
protected ObjectNode getContainer ( ObjectMapper mapper ) { ObjectNode containerNode = mapper . createObjectNode ( ) ; containerNode . put ( MarathonConstants . CONTAINER_TYPE , "DOCKER" ) ; containerNode . set ( "docker" , getDockerContainer ( mapper ) ) ; return containerNode ; }
build the container object
20,618
protected boolean requestSchedulerService ( Command command , byte [ ] data ) { String endpoint = getCommandEndpoint ( schedulerHttpEndpoint , command ) ; final HttpURLConnection connection = NetworkUtils . getHttpConnection ( endpoint ) ; if ( connection == null ) { LOG . severe ( "Scheduler not found." ) ; return false ; } try { if ( ! NetworkUtils . sendHttpPostRequest ( connection , NetworkUtils . URL_ENCODE_TYPE , data ) ) { LOG . log ( Level . SEVERE , "Failed to send http request to scheduler" ) ; return false ; } Common . StatusCode statusCode ; LOG . fine ( "Receiving response from scheduler..." ) ; try { statusCode = Scheduler . SchedulerResponse . newBuilder ( ) . mergeFrom ( NetworkUtils . readHttpResponse ( connection ) ) . build ( ) . getStatus ( ) . getStatus ( ) ; } catch ( InvalidProtocolBufferException e ) { LOG . log ( Level . SEVERE , "Failed to parse response" , e ) ; return false ; } if ( ! statusCode . equals ( Common . StatusCode . OK ) ) { LOG . severe ( "Received not OK response from scheduler" ) ; return false ; } } finally { connection . disconnect ( ) ; } return true ; }
Send payload to target HTTP connection to request a service
20,619
protected String getCommandEndpoint ( String schedulerEndpoint , Command command ) { return String . format ( "http://%s/%s" , schedulerEndpoint , command . name ( ) . toLowerCase ( ) ) ; }
Construct the endpoint to send http request for a particular command Make sure the construction matches server sides .
20,620
public PackingPlan createPackingPlan ( final Config config , final Config runtime ) throws PackingException { String packingClass = Context . packingClass ( config ) ; IPacking packing ; try { packing = ReflectionUtils . newInstance ( packingClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new PackingException ( String . format ( "Failed to instantiate packing instance using packing class %s" , packingClass ) , e ) ; } try { TopologyAPI . Topology topology = Runtime . topology ( runtime ) ; packing . initialize ( config , topology ) ; return packing . pack ( ) ; } finally { SysUtils . closeIgnoringExceptions ( packing ) ; } }
Returns a packing plan generated by configured packing class
20,621
public IScheduler getSchedulerInstance ( Config config , Config runtime ) throws SchedulerException { String schedulerClass = Context . schedulerClass ( config ) ; IScheduler scheduler ; try { scheduler = ReflectionUtils . newInstance ( schedulerClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new SchedulerException ( String . format ( "Failed to instantiate scheduler using class '%s'" , schedulerClass ) ) ; } scheduler . initialize ( config , runtime ) ; return scheduler ; }
Creates and initializes scheduler instance
20,622
public Config createPrimaryRuntime ( TopologyAPI . Topology topology ) { return Config . newBuilder ( ) . put ( Key . TOPOLOGY_ID , topology . getId ( ) ) . put ( Key . TOPOLOGY_NAME , topology . getName ( ) ) . put ( Key . TOPOLOGY_DEFINITION , topology ) . put ( Key . NUM_CONTAINERS , 1 + TopologyUtils . getNumContainers ( topology ) ) . build ( ) ; }
Creates initial runtime config instance using topology information .
20,623
public Config createAdaptorRuntime ( SchedulerStateManagerAdaptor adaptor ) { return Config . newBuilder ( ) . put ( Key . SCHEDULER_STATE_MANAGER_ADAPTOR , adaptor ) . build ( ) ; }
Creates initial runtime config of scheduler state manager adaptor
20,624
public Config createConfigWithPackingDetails ( Config runtime , PackingPlan packing ) { return Config . newBuilder ( ) . putAll ( runtime ) . put ( Key . COMPONENT_RAMMAP , packing . getComponentRamDistribution ( ) ) . put ( Key . NUM_CONTAINERS , 1 + packing . getContainers ( ) . size ( ) ) . build ( ) ; }
Creates a config instance with packing plan info added to runtime config
20,625
public void preSave ( String checkpointId ) { System . out . println ( String . format ( "Saving spout state at checkpoint %s" , checkpointId ) ) ; }
These two methods are required to implement the IStatefulComponent interface
20,626
public void open ( Map < String , Object > map , TopologyContext ctx , SpoutOutputCollector collector ) { spoutOutputCollector = collector ; }
These three methods are required to extend the BaseRichSpout abstract class
20,627
private static int runSyncProcess ( boolean isVerbose , boolean isInheritIO , String [ ] cmdline , StringBuilder outputBuilder , File workingDirectory , Map < String , String > envs ) { final StringBuilder builder = outputBuilder == null ? new StringBuilder ( ) : outputBuilder ; LOG . log ( Level . FINE , "Running synced process: ``{0}''''" , joinString ( cmdline ) ) ; ProcessBuilder pb = getProcessBuilder ( isInheritIO , cmdline , workingDirectory , envs ) ; pb . redirectErrorStream ( true ) ; Process process ; try { process = pb . start ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to run synced process" , e ) ; return - 1 ; } Thread outputsThread = createAsyncStreamThread ( process . getInputStream ( ) , builder , isVerbose ) ; try { outputsThread . start ( ) ; int exitValue = process . waitFor ( ) ; outputsThread . join ( ) ; return exitValue ; } catch ( InterruptedException e ) { outputsThread . interrupt ( ) ; process . destroy ( ) ; LOG . log ( Level . SEVERE , "Running synced process was interrupted" , e ) ; Thread . currentThread ( ) . interrupt ( ) ; return - 1 ; } }
run sync process
20,628
protected static String [ ] splitTokens ( String command ) { if ( command . length ( ) == 0 ) { throw new IllegalArgumentException ( "Empty command" ) ; } StringTokenizer st = new StringTokenizer ( command ) ; String [ ] cmdarray = new String [ st . countTokens ( ) ] ; for ( int i = 0 ; st . hasMoreTokens ( ) ; i ++ ) { cmdarray [ i ] = st . nextToken ( ) ; } return cmdarray ; }
argument contains space .
20,629
public static boolean curlPackage ( String uri , String destination , boolean isVerbose , boolean isInheritIO ) { File parentDirectory = Paths . get ( destination ) . getParent ( ) . toFile ( ) ; String cmd = String . format ( "curl %s -o %s" , uri , destination ) ; int ret = runSyncProcess ( isVerbose , isInheritIO , splitTokens ( cmd ) , new StringBuilder ( ) , parentDirectory ) ; return ret == 0 ; }
Copy a URL package to a target folder
20,630
public static boolean extractPackage ( String packageName , String targetFolder , boolean isVerbose , boolean isInheritIO ) { String cmd = String . format ( "tar -xvf %s" , packageName ) ; int ret = runSyncProcess ( isVerbose , isInheritIO , splitTokens ( cmd ) , new StringBuilder ( ) , new File ( targetFolder ) ) ; return ret == 0 ; }
Extract a tar package to a target folder
20,631
public void startPurge ( WakeableLooper wakeableLooper ) { synchronized ( CacheCore . class ) { if ( looper == null ) { looper = wakeableLooper ; } looper . registerTimerEvent ( interval , new Runnable ( ) { public void run ( ) { purge ( ) ; } } ) ; } }
start purge looper task
20,632
private void sendRegisterRequest ( ) { StreamManager . RegisterInstanceRequest request = StreamManager . RegisterInstanceRequest . newBuilder ( ) . setInstance ( instance ) . setTopologyName ( topologyName ) . setTopologyId ( topologyId ) . build ( ) ; sendRequest ( request , null , StreamManager . RegisterInstanceResponse . newBuilder ( ) , systemConfig . getInstanceReconnectStreammgrInterval ( ) ) ; }
Build register request and send to stream mgr
20,633
public boolean createJob ( Map < Integer , BaseContainer > jobDefinition ) { synchronized ( this ) { if ( isTerminated ) { LOG . severe ( "Job has been killed" ) ; return false ; } containersInfo . putAll ( jobDefinition ) ; for ( Map . Entry < Integer , BaseContainer > entry : jobDefinition . entrySet ( ) ) { Integer containerIndex = entry . getKey ( ) ; BaseContainer container = entry . getValue ( ) ; String taskId = TaskUtils . getTaskId ( container . name , 0 ) ; scheduleNewTask ( taskId ) ; } } return true ; }
Create a topology
20,634
public boolean killJob ( ) { synchronized ( this ) { if ( isTerminated ) { LOG . info ( "Job has been killed" ) ; return false ; } isTerminated = true ; LOG . info ( String . format ( "Kill job: %s" , Context . topologyName ( heronConfig ) ) ) ; LOG . info ( "Remove all tasks to schedule" ) ; toScheduleTasks . clear ( ) ; for ( String taskId : tasksId . values ( ) ) { driver . killTask ( Protos . TaskID . newBuilder ( ) . setValue ( taskId ) . build ( ) ) ; } containersInfo . clear ( ) ; tasksId . clear ( ) ; return true ; } }
Kill a topology
20,635
public boolean restartJob ( int containerIndex ) { synchronized ( this ) { if ( isTerminated ) { LOG . severe ( "Job has been killed" ) ; return false ; } List < String > tasksToRestart = new ArrayList < > ( ) ; if ( containerIndex == - 1 ) { tasksToRestart . addAll ( tasksId . values ( ) ) ; } else { tasksToRestart . add ( tasksId . get ( containerIndex ) ) ; } for ( String taskId : tasksToRestart ) { driver . killTask ( Protos . TaskID . newBuilder ( ) . setValue ( taskId ) . build ( ) ) ; } return true ; } }
Restart a topology
20,636
public boolean waitForRegistered ( long timeout , TimeUnit unit ) { try { if ( this . registeredLatch . await ( timeout , unit ) ) { return true ; } } catch ( InterruptedException e ) { LOG . severe ( "Failed to wait for mesos framework got registered" ) ; return false ; } return false ; }
Causes the current thread to wait for MesosFramework got registered unless the thread is interrupted or the specified waiting time elapses .
20,637
protected void handleMesosFailure ( String taskId ) { int attempt = TaskUtils . getAttemptForTaskId ( taskId ) ; BaseContainer container = containersInfo . get ( TaskUtils . getContainerIndexForTaskId ( taskId ) ) ; boolean hasAttemptsLeft = attempt < container . retries ; if ( hasAttemptsLeft ) { LOG . warning ( String . format ( "Retrying task: %s, attempt: %d" , container . name , attempt + 1 ) ) ; String newTaskId = TaskUtils . getTaskId ( container . name , attempt + 1 ) ; scheduleNewTask ( newTaskId ) ; } else { LOG . severe ( "Would not restart the job since it is beyond retries: " + attempt ) ; } }
Restart a failed task unless exceeding the retires limitation
20,638
protected boolean scheduleNewTask ( String taskId ) { LOG . info ( String . format ( "We are to schedule task: [%s]" , taskId ) ) ; int containerIndex = TaskUtils . getContainerIndexForTaskId ( taskId ) ; tasksId . put ( containerIndex , taskId ) ; toScheduleTasks . add ( taskId ) ; LOG . info ( String . format ( "Added task: %s into the to-schedule-tasks queue: " , taskId ) ) ; return true ; }
Schedule a new task
20,639
protected List < LaunchableTask > generateLaunchableTasks ( Map < Protos . Offer , TaskResources > offerResources ) { List < LaunchableTask > tasks = new LinkedList < > ( ) ; if ( isTerminated ) { LOG . info ( "Job has been killed" ) ; return tasks ; } while ( ! toScheduleTasks . isEmpty ( ) ) { String taskId = toScheduleTasks . poll ( ) ; BaseContainer baseContainer = containersInfo . get ( TaskUtils . getContainerIndexForTaskId ( taskId ) ) ; TaskResources neededResources = new TaskResources ( baseContainer . cpu , baseContainer . memInMB , baseContainer . diskInMB , baseContainer . ports ) ; boolean isMatched = false ; Iterator < Map . Entry < Protos . Offer , TaskResources > > it = offerResources . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < Protos . Offer , TaskResources > kv = it . next ( ) ; Protos . Offer offer = kv . getKey ( ) ; TaskResources resources = kv . getValue ( ) ; if ( resources . canSatisfy ( neededResources ) ) { resources . consume ( neededResources ) ; List < Integer > freePorts = new ArrayList < > ( ) ; for ( int port = ( int ) ( neededResources . getPortsHold ( ) . get ( 0 ) . rangeStart ) ; port <= ( int ) ( neededResources . getPortsHold ( ) . get ( 0 ) . rangeEnd ) ; port ++ ) { freePorts . add ( port ) ; } tasks . add ( new LaunchableTask ( taskId , baseContainer , offer , freePorts ) ) ; isMatched = true ; break ; } } if ( ! isMatched ) { LOG . info ( String . format ( "Insufficient resources remaining for baseContainer: %s, " + "will append to queue. Need: [%s]" , taskId , neededResources . toString ( ) ) ) ; toScheduleTasks . add ( taskId ) ; break ; } } return tasks ; }
Generate launchable tasks basing on offer resources
20,640
private static void setupLogging ( Config config ) throws IOException { String systemConfigFilename = Context . systemConfigFile ( config ) ; SystemConfig systemConfig = SystemConfig . newBuilder ( true ) . putAll ( systemConfigFilename , true ) . build ( ) ; Level loggingLevel = Level . INFO ; if ( Context . verbose ( config ) . booleanValue ( ) ) { loggingLevel = Level . FINE ; } String loggingDir = systemConfig . getHeronLoggingDirectory ( ) ; if ( ! FileUtils . isDirectoryExists ( loggingDir ) ) { FileUtils . createDirectory ( loggingDir ) ; } LoggingHelper . loggerInit ( loggingLevel , true ) ; String processId = String . format ( "%s-%s-%s" , "heron" , Context . topologyName ( config ) , "scheduler" ) ; LoggingHelper . addLoggingHandler ( LoggingHelper . getFileHandler ( processId , loggingDir , true , systemConfig . getHeronLoggingMaximumSize ( ) , systemConfig . getHeronLoggingMaximumFiles ( ) ) ) ; LOG . info ( "Logging setup done." ) ; }
Set up logging based on the Config
20,641
protected SchedulerServer getServer ( Config runtime , IScheduler scheduler , int port ) throws IOException { return new SchedulerServer ( runtime , scheduler , port ) ; }
Get the http server for receiving scheduler requests
20,642
private void addSpoutsTasks ( ) { Runnable spoutTasks = new Runnable ( ) { public void run ( ) { spoutMetrics . updateTaskRunCount ( ) ; if ( isProduceTuple ( ) ) { spoutMetrics . updateProduceTupleCount ( ) ; produceTuple ( ) ; collector . sendOutTuples ( ) ; } if ( ! collector . isOutQueuesAvailable ( ) ) { spoutMetrics . updateOutQueueFullCount ( ) ; } readTuplesAndExecute ( streamInQueue ) ; if ( ackEnabled ) { spoutMetrics . updatePendingTuplesCount ( collector . numInFlight ( ) ) ; } else { doImmediateAcks ( ) ; } if ( isContinueWork ( ) ) { spoutMetrics . updateContinueWorkCount ( ) ; looper . wakeUp ( ) ; } } } ; looper . addTasksOnWakeup ( spoutTasks ) ; if ( enableMessageTimeouts ) { lookForTimeouts ( ) ; } InstanceUtils . prepareTimerEvents ( looper , helper ) ; }
Tasks happen in every time looper is waken up
20,643
private Config lazyCreateConfig ( Mode newMode ) { if ( newMode == this . mode ) { return this ; } Config newRawConfig = this . rawConfig ; Config newLocalConfig = this . localConfig ; Config newClusterConfig = this . clusterConfig ; switch ( this . mode ) { case RAW : newRawConfig = this ; break ; case LOCAL : newLocalConfig = this ; break ; case CLUSTER : newClusterConfig = this ; break ; default : throw new IllegalArgumentException ( "Unrecognized mode found in config: " + this . mode ) ; } switch ( newMode ) { case LOCAL : if ( this . localConfig == null ) { Config tempConfig = Config . expand ( Config . newBuilder ( ) . putAll ( rawConfig . cfgMap ) . build ( ) ) ; this . localConfig = new Config ( Mode . LOCAL , newRawConfig , tempConfig , newClusterConfig ) ; } return this . localConfig ; case CLUSTER : if ( this . clusterConfig == null ) { Config . Builder bc = Config . newBuilder ( ) . putAll ( rawConfig . cfgMap ) . put ( Key . HERON_HOME , get ( Key . HERON_CLUSTER_HOME ) ) . put ( Key . HERON_CONF , get ( Key . HERON_CLUSTER_CONF ) ) ; Config tempConfig = Config . expand ( bc . build ( ) ) ; this . clusterConfig = new Config ( Mode . CLUSTER , newRawConfig , newLocalConfig , tempConfig ) ; } return this . clusterConfig ; case RAW : default : throw new IllegalArgumentException ( "Unrecognized mode passed to lazyCreateConfig: " + newMode ) ; } }
what gets generated during toClusterMode
20,644
public static void loggerInit ( Level level , boolean isRedirectStdOutErr , String format ) throws IOException { setLoggingFormat ( format ) ; Logger rootLogger = Logger . getLogger ( "" ) ; for ( Handler handler : rootLogger . getHandlers ( ) ) { handler . setLevel ( level ) ; } rootLogger . setLevel ( level ) ; if ( rootLogger . getLevel ( ) . intValue ( ) < Level . WARNING . intValue ( ) ) { Logger . getLogger ( "org.apache.zookeeper" ) . setLevel ( Level . WARNING ) ; } System . setProperty ( "org.apache.commons.logging.Log" , "org.apache.commons.logging.impl.SimpleLog" ) ; System . setProperty ( "org.apache.commons.logging.simplelog.log.httpclient.wire" , "ERROR" ) ; System . setProperty ( "org.apache.commons.logging.simplelog.log.org.apache.http" , "ERROR" ) ; System . setProperty ( "org.apache.commons.logging.simplelog.log.org.apache.http.headers" , "ERROR" ) ; if ( isRedirectStdOutErr ) { for ( Handler handler : rootLogger . getHandlers ( ) ) { if ( handler instanceof ConsoleHandler ) { rootLogger . removeHandler ( handler ) ; } } Logger logger ; LoggingOutputStream los ; logger = Logger . getLogger ( "stdout" ) ; los = new LoggingOutputStream ( logger , StdOutErrLevel . STDOUT ) ; System . setOut ( new PrintStream ( los , true ) ) ; logger = Logger . getLogger ( "stderr" ) ; los = new LoggingOutputStream ( logger , StdOutErrLevel . STDERR ) ; System . setErr ( new PrintStream ( los , true ) ) ; } }
Init java util logging
20,645
private void addSinkTasks ( ) { Runnable sinkTasks = new Runnable ( ) { public void run ( ) { while ( ! metricsInSinkQueue . isEmpty ( ) ) { metricsSink . processRecord ( metricsInSinkQueue . poll ( ) ) ; } } } ; slaveLooper . addTasksOnWakeup ( sinkTasks ) ; }
Add task to invoke processRecord method when the WakeableLooper is waken up
20,646
private PackingPlanBuilder getResourceCompliantRRAllocation ( PackingPlanBuilder planBuilder , Map < String , Integer > componentChanges ) throws ConstraintViolationException { Map < String , Integer > componentsToScaleDown = PackingUtils . getComponentsToScale ( componentChanges , PackingUtils . ScalingDirection . DOWN ) ; Map < String , Integer > componentsToScaleUp = PackingUtils . getComponentsToScale ( componentChanges , PackingUtils . ScalingDirection . UP ) ; if ( ! componentsToScaleDown . isEmpty ( ) ) { resetToFirstContainer ( ) ; removeInstancesFromContainers ( planBuilder , componentsToScaleDown ) ; } if ( ! componentsToScaleUp . isEmpty ( ) ) { resetToFirstContainer ( ) ; assignInstancesToContainers ( planBuilder , componentsToScaleUp , PolicyType . FLEXIBLE ) ; } return planBuilder ; }
Get the instances allocation based on the ResourceCompliantRR packing algorithm
20,647
private void assignInstancesToContainers ( PackingPlanBuilder planBuilder , Map < String , Integer > parallelismMap , PolicyType policyType ) throws ConstraintViolationException { for ( String componentName : parallelismMap . keySet ( ) ) { int numInstance = parallelismMap . get ( componentName ) ; for ( int i = 0 ; i < numInstance ; ++ i ) { policyType . assignInstance ( planBuilder , componentName , this ) ; } } }
Assigns instances to containers .
20,648
private void strictRRpolicy ( PackingPlanBuilder planBuilder , String componentName ) throws ConstraintViolationException { planBuilder . addInstance ( this . containerId , componentName ) ; this . containerId = nextContainerId ( this . containerId ) ; }
Attempts to place the instance the current containerId .
20,649
private void flexibleRRpolicy ( PackingPlanBuilder planBuilder , String componentName ) throws ResourceExceededException { ContainerIdScorer scorer = new ContainerIdScorer ( this . containerId , this . numContainers ) ; this . containerId = nextContainerId ( planBuilder . addInstance ( scorer , componentName ) ) ; }
Performs a RR placement . Tries to place the instance on any container with space starting at containerId and cycling through the container set until it can be placed .
20,650
private void removeRRInstance ( PackingPlanBuilder packingPlanBuilder , String componentName ) throws RuntimeException { List < Scorer < Container > > scorers = new ArrayList < > ( ) ; scorers . add ( new HomogeneityScorer ( componentName , true ) ) ; scorers . add ( new InstanceCountScorer ( ) ) ; scorers . add ( new HomogeneityScorer ( componentName , false ) ) ; scorers . add ( new ContainerIdScorer ( false ) ) ; this . containerId = nextContainerId ( packingPlanBuilder . removeInstance ( scorers , componentName ) ) ; }
Remove an instance of a particular component from the containers
20,651
public void invokeHookSpoutAck ( Object messageId , Duration completeLatency ) { if ( taskHooks . size ( ) != 0 ) { SpoutAckInfo ackInfo = new SpoutAckInfo ( messageId , getThisTaskId ( ) , completeLatency ) ; for ( ITaskHook taskHook : taskHooks ) { taskHook . spoutAck ( ackInfo ) ; } } }
Task hook called in spout every time a tuple gets acked
20,652
public void invokeHookSpoutFail ( Object messageId , Duration failLatency ) { if ( taskHooks . size ( ) != 0 ) { SpoutFailInfo failInfo = new SpoutFailInfo ( messageId , getThisTaskId ( ) , failLatency ) ; for ( ITaskHook taskHook : taskHooks ) { taskHook . spoutFail ( failInfo ) ; } } }
Task hook called in spout every time a tuple gets failed
20,653
public void invokeHookBoltExecute ( Tuple tuple , Duration executeLatency ) { if ( taskHooks . size ( ) != 0 ) { BoltExecuteInfo executeInfo = new BoltExecuteInfo ( tuple , getThisTaskId ( ) , executeLatency ) ; for ( ITaskHook taskHook : taskHooks ) { taskHook . boltExecute ( executeInfo ) ; } } }
Task hook called in bolt every time a tuple gets executed
20,654
public void invokeHookBoltAck ( Tuple tuple , Duration processLatency ) { if ( taskHooks . size ( ) != 0 ) { BoltAckInfo ackInfo = new BoltAckInfo ( tuple , getThisTaskId ( ) , processLatency ) ; for ( ITaskHook taskHook : taskHooks ) { taskHook . boltAck ( ackInfo ) ; } } }
Task hook called in bolt every time a tuple gets acked
20,655
public void invokeHookBoltFail ( Tuple tuple , Duration failLatency ) { if ( taskHooks . size ( ) != 0 ) { BoltFailInfo failInfo = new BoltFailInfo ( tuple , getThisTaskId ( ) , failLatency ) ; for ( ITaskHook taskHook : taskHooks ) { taskHook . boltFail ( failInfo ) ; } } }
Task hook called in bolt every time a tuple gets failed
20,656
public static void writeToFile ( InputStream uploadedInputStream , String uploadedFileLocation ) throws IOException { File file = new File ( uploadedFileLocation ) ; file . getParentFile ( ) . mkdirs ( ) ; int read = 0 ; byte [ ] bytes = new byte [ 1024 ] ; try ( OutputStream out = new FileOutputStream ( file ) ) { while ( ( read = uploadedInputStream . read ( bytes ) ) != - 1 ) { out . write ( bytes , 0 , read ) ; } out . flush ( ) ; } }
save uploaded file to new location
20,657
@ Path ( "/upload" ) @ Consumes ( MediaType . MULTIPART_FORM_DATA ) public Response uploadFile ( @ FormDataParam ( "file" ) InputStream uploadedInputStream , @ FormDataParam ( "file" ) FormDataContentDisposition fileDetail ) { Config config = createConfig ( ) ; if ( uploadedInputStream == null ) { String msg = "input stream is null" ; LOG . error ( msg ) ; return Response . status ( Response . Status . BAD_REQUEST ) . type ( MediaType . APPLICATION_JSON ) . entity ( Utils . createMessage ( msg ) ) . build ( ) ; } if ( fileDetail == null ) { String msg = "form data content disposition is null" ; LOG . error ( msg ) ; return Response . status ( Response . Status . BAD_REQUEST ) . type ( MediaType . APPLICATION_JSON ) . entity ( Utils . createMessage ( msg ) ) . build ( ) ; } String uploadDir = config . getStringValue ( FILE_SYSTEM_DIRECTORY ) ; final String fileName = UUID . randomUUID ( ) + "-" + fileDetail . getFileName ( ) ; final String uploadedFileLocation = uploadDir + "/" + fileName ; try { FileHelper . writeToFile ( uploadedInputStream , uploadedFileLocation ) ; } catch ( IOException e ) { LOG . error ( "error uploading file {}" , fileDetail . getFileName ( ) , e ) ; return Response . serverError ( ) . type ( MediaType . APPLICATION_JSON ) . entity ( Utils . createMessage ( e . getMessage ( ) ) ) . build ( ) ; } String uri = String . format ( "http://%s:%s/api/v1/file/download/%s" , getHostNameOrIP ( ) , getPort ( ) , fileName ) ; return Response . status ( Response . Status . OK ) . entity ( uri ) . build ( ) ; }
Endpoints for artifacts upload
20,658
@ Path ( "/download/{file}" ) public Response downloadFile ( final @ PathParam ( "file" ) String file ) { Config config = createConfig ( ) ; String uploadDir = config . getStringValue ( FILE_SYSTEM_DIRECTORY ) ; String filePath = uploadDir + "/" + file ; return getResponseByFile ( filePath ) ; }
Endpoints for artifacts download
20,659
public boolean submitTopology ( String appConf ) { if ( this . isVerbose ) { LOG . log ( Level . INFO , "Topology conf is: " + appConf ) ; } if ( ! this . topologyName . equals ( this . topologyName . toLowerCase ( ) ) ) { LOG . log ( Level . SEVERE , "Marathon scheduler does not allow upper case topologies" ) ; return false ; } String schedulerURI = String . format ( "%s/v2/groups" , this . marathonURI ) ; HttpURLConnection conn = NetworkUtils . getHttpConnection ( schedulerURI ) ; if ( this . marathonAuthToken != null ) { conn . setRequestProperty ( "Authorization" , String . format ( "token=%s" , this . marathonAuthToken ) ) ; } if ( conn == null ) { LOG . log ( Level . SEVERE , "Failed to find marathon scheduler" ) ; return false ; } try { if ( ! NetworkUtils . sendHttpPostRequest ( conn , NetworkUtils . JSON_TYPE , appConf . getBytes ( ) ) ) { LOG . log ( Level . SEVERE , "Failed to send post request" ) ; return false ; } boolean success = NetworkUtils . checkHttpResponseCode ( conn , HttpURLConnection . HTTP_CREATED ) ; if ( success ) { LOG . log ( Level . INFO , "Topology submitted successfully" ) ; return true ; } else if ( NetworkUtils . checkHttpResponseCode ( conn , HttpURLConnection . HTTP_UNAUTHORIZED ) ) { LOG . log ( Level . SEVERE , "Marathon requires authentication" ) ; return false ; } else { LOG . log ( Level . SEVERE , "Failed to submit topology" ) ; return false ; } } finally { conn . disconnect ( ) ; } }
submit a topology as a group containers as apps in the group
20,660
private void startMetricsCacheChecker ( ) { final int checkIntervalSec = TypeUtils . getInteger ( sinkConfig . get ( KEY_TMASTER_LOCATION_CHECK_INTERVAL_SEC ) ) ; Runnable runnable = new Runnable ( ) { public void run ( ) { TopologyMaster . MetricsCacheLocation location = ( TopologyMaster . MetricsCacheLocation ) SingletonRegistry . INSTANCE . getSingleton ( MetricsManagerServer . METRICSCACHE_LOCATION_BEAN_NAME ) ; if ( location != null ) { if ( currentMetricsCacheLocation == null || ! location . equals ( currentMetricsCacheLocation ) ) { LOG . info ( "Update current MetricsCacheLocation to: " + location ) ; currentMetricsCacheLocation = location ; metricsCacheClientService . updateMetricsCacheLocation ( currentMetricsCacheLocation ) ; metricsCacheClientService . startNewMasterClient ( ) ; sinkContext . exportCountMetric ( METRICSMGR_LOCATION_UPDATE_COUNT , 1 ) ; } } tMasterLocationStarter . schedule ( this , checkIntervalSec , TimeUnit . SECONDS ) ; } } ; tMasterLocationStarter . schedule ( runnable , checkIntervalSec , TimeUnit . SECONDS ) ; LOG . info ( "MetricsCacheChecker started with interval: " + checkIntervalSec ) ; }
If so restart the metricsCacheClientService with the new MetricsCacheLocation
20,661
@ SuppressWarnings ( "unchecked" ) public static Map < String , Object > loadFile ( String fileName ) { Map < String , Object > props = new HashMap < > ( ) ; if ( fileName == null ) { LOG . warning ( "Config file name cannot be null" ) ; return props ; } else if ( fileName . isEmpty ( ) ) { LOG . warning ( "Config file name is empty" ) ; return props ; } else { Path path = Paths . get ( fileName ) ; if ( ! Files . exists ( path ) ) { LOG . fine ( "Config file " + fileName + " does not exist" ) ; return props ; } if ( ! Files . isRegularFile ( path ) ) { LOG . warning ( "Config file " + fileName + " might be a directory." ) ; return props ; } LOG . log ( Level . FINE , "Reading config file {0}" , fileName ) ; Map < String , Object > propsYaml = null ; try { FileInputStream fin = new FileInputStream ( new File ( fileName ) ) ; try { Yaml yaml = new Yaml ( ) ; propsYaml = ( Map < String , Object > ) yaml . load ( fin ) ; LOG . log ( Level . FINE , "Successfully read config file {0}" , fileName ) ; } finally { fin . close ( ) ; } } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to load config file: " + fileName , e ) ; } return propsYaml != null ? propsYaml : props ; } }
Load properties from the given YAML file
20,662
@ SuppressWarnings ( "unchecked" ) public static Map < String , Object > loadStream ( InputStream inputStream ) { LOG . fine ( "Reading config stream" ) ; Yaml yaml = new Yaml ( ) ; Map < Object , Object > propsYaml = ( Map < Object , Object > ) yaml . load ( inputStream ) ; LOG . fine ( "Successfully read config" ) ; Map < String , Object > typedMap = new HashMap < > ( ) ; for ( Object key : propsYaml . keySet ( ) ) { typedMap . put ( key . toString ( ) , propsYaml . get ( key ) ) ; } return typedMap ; }
Load config from the given YAML stream
20,663
public E poll ( ) { E result = buffer . poll ( ) ; if ( producer != null ) { producer . wakeUp ( ) ; } return result ; }
Check if there is any item in the queue
20,664
public boolean offer ( E e ) { buffer . offer ( e ) ; if ( consumer != null ) { consumer . wakeUp ( ) ; } return true ; }
Since it is an unbounded queue the offer will always return true .
20,665
public int drainTo ( Collection < ? super E > c ) { int result = buffer . drainTo ( c ) ; if ( producer != null ) { producer . wakeUp ( ) ; } return result ; }
Removes all available elements from this queue and adds them to the given collection . This operation may be more efficient than repeatedly polling this queue . A failure encountered while attempting to add elements to collection c may result in elements being in neither either or both collections when the associated exception is thrown . Attempts to drain a queue to itself result in IllegalArgumentException . Further the behavior of this operation is undefined if the specified collection is modified while the operation is in progress .
20,666
Task getTaskSpecDockerDriver ( Task task , String taskName , int containerIndex ) { String executorBinary = Context . executorBinary ( this . clusterConfig ) ; String [ ] executorArgs = SchedulerUtils . executorCommandArgs ( this . clusterConfig , this . runtimeConfig , NomadConstants . EXECUTOR_PORTS , String . valueOf ( containerIndex ) ) ; String executorCmd = executorBinary + " " + String . join ( " " , executorArgs ) ; String topologyDownloadCmd = getFetchCommand ( this . clusterConfig , this . clusterConfig , this . runtimeConfig ) ; task . setName ( taskName ) ; task . setDriver ( NomadConstants . NomadDriver . DOCKER . getName ( ) ) ; task . addConfig ( NomadConstants . NOMAD_IMAGE , NomadContext . getHeronExecutorDockerImage ( this . localConfig ) ) ; task . addConfig ( NomadConstants . NOMAD_TASK_COMMAND , NomadConstants . SHELL_CMD ) ; task . addConfig ( NomadConstants . NETWORK_MODE , NomadContext . getHeronNomadNetworkMode ( this . localConfig ) ) ; String setMetricsPortFileCmd = getSetMetricsPortFileCmd ( ) ; String [ ] args = { "-c" , String . format ( "%s && %s && %s" , topologyDownloadCmd , setMetricsPortFileCmd , executorCmd ) } ; task . addConfig ( NomadConstants . NOMAD_TASK_COMMAND_ARGS , args ) ; Map < String , String > envVars = new HashMap < > ( ) ; envVars . put ( NomadConstants . HOST , "${attr.unique.network.ip-address}" ) ; task . setEnv ( envVars ) ; return task ; }
Get the task spec for using the docker driver in Nomad In docker mode Heron will be use in docker containers
20,667
Task getTaskSpecRawDriver ( Task task , String taskName , int containerIndex ) { String executorBinary = Context . executorBinary ( this . clusterConfig ) ; String [ ] executorArgs = SchedulerUtils . executorCommandArgs ( this . clusterConfig , this . runtimeConfig , NomadConstants . EXECUTOR_PORTS , String . valueOf ( containerIndex ) ) ; String executorCmd = executorBinary + " " + String . join ( " " , executorArgs ) ; String topologyDownloadCmd = getFetchCommand ( this . localConfig , this . clusterConfig , this . runtimeConfig ) ; String heronNomadScript = getHeronNomadScript ( this . localConfig ) ; task . setName ( taskName ) ; task . setDriver ( NomadConstants . NomadDriver . RAW_EXEC . getName ( ) ) ; task . addConfig ( NomadConstants . NOMAD_TASK_COMMAND , NomadConstants . SHELL_CMD ) ; String [ ] args = { NomadConstants . NOMAD_HERON_SCRIPT_NAME } ; task . addConfig ( NomadConstants . NOMAD_TASK_COMMAND_ARGS , args ) ; Template template = new Template ( ) ; template . setEmbeddedTmpl ( heronNomadScript ) ; template . setDestPath ( NomadConstants . NOMAD_HERON_SCRIPT_NAME ) ; task . addTemplates ( template ) ; Port [ ] ports = new Port [ NomadConstants . EXECUTOR_PORTS . size ( ) ] ; int i = 0 ; for ( SchedulerUtils . ExecutorPort port : NomadConstants . EXECUTOR_PORTS . keySet ( ) ) { ports [ i ] = new Port ( ) . setLabel ( port . getName ( ) . replace ( "-" , "_" ) ) ; i ++ ; } Map < String , String > envVars = new HashMap < > ( ) ; envVars . put ( NomadConstants . HERON_NOMAD_WORKING_DIR , NomadContext . workingDirectory ( this . localConfig ) + "/container-" + String . valueOf ( containerIndex ) ) ; if ( NomadContext . useCorePackageUri ( this . localConfig ) ) { envVars . put ( NomadConstants . HERON_USE_CORE_PACKAGE_URI , "true" ) ; envVars . put ( NomadConstants . HERON_CORE_PACKAGE_URI , NomadContext . corePackageUri ( this . localConfig ) ) ; } else { envVars . put ( NomadConstants . HERON_USE_CORE_PACKAGE_URI , "false" ) ; envVars . put ( NomadConstants . HERON_CORE_PACKAGE_DIR , NomadContext . corePackageDirectory ( this . localConfig ) ) ; } envVars . put ( NomadConstants . HERON_TOPOLOGY_DOWNLOAD_CMD , topologyDownloadCmd ) ; envVars . put ( NomadConstants . HERON_EXECUTOR_CMD , executorCmd ) ; task . setEnv ( envVars ) ; return task ; }
Get the task spec for using raw_exec driver in Nomad In raw exec mode Heron will be run directly on the machine
20,668
static String getFetchCommand ( Config localConfig , Config clusterConfig , Config runtime ) { return String . format ( "%s -u %s -f . -m local -p %s -d %s" , Context . downloaderBinary ( clusterConfig ) , Runtime . topologyPackageUri ( runtime ) . toString ( ) , Context . heronConf ( localConfig ) , Context . heronHome ( clusterConfig ) ) ; }
Get the command that will be used to retrieve the topology JAR
20,669
private static String generateStorageObjectName ( String topologyName , String filename ) { return String . format ( "%s/%s" , topologyName , filename ) ; }
Generate the storage object name in gcs given the topologyName and filename .
20,670
private static String getDownloadUrl ( String bucket , String objectName ) { return String . format ( GCS_URL_FORMAT , bucket , objectName ) ; }
Returns a url to download an gcs object the given bucket and object name
20,671
protected boolean setupWorkingDirectory ( ) { String coreReleasePackageURI = SlurmContext . corePackageUri ( config ) ; String coreReleaseFileDestination = Paths . get ( topologyWorkingDirectory , "heron-core.tar.gz" ) . toString ( ) ; String topologyPackageURI = Runtime . topologyPackageUri ( runtime ) . toString ( ) ; String topologyPackageDestination = Paths . get ( topologyWorkingDirectory , "topology.tar.gz" ) . toString ( ) ; if ( ! SchedulerUtils . createOrCleanDirectory ( topologyWorkingDirectory ) ) { return false ; } final boolean isVerbose = Context . verbose ( config ) ; if ( ! SchedulerUtils . extractPackage ( topologyWorkingDirectory , coreReleasePackageURI , coreReleaseFileDestination , true , isVerbose ) ) { return false ; } return SchedulerUtils . extractPackage ( topologyWorkingDirectory , topologyPackageURI , topologyPackageDestination , true , isVerbose ) ; }
setup the working directory mainly it downloads and extracts the heron - core - release and topology package to the working directory
20,672
public void registerMetrics ( MetricsCollector metricsCollector ) { SystemConfig systemConfig = ( SystemConfig ) SingletonRegistry . INSTANCE . getSingleton ( SystemConfig . HERON_SYSTEM_CONFIG ) ; int interval = ( int ) systemConfig . getHeronMetricsExportInterval ( ) . getSeconds ( ) ; metricsCollector . registerMetric ( "__jvm-gc-collection-time-ms" , jvmGCTimeMs , interval ) ; metricsCollector . registerMetric ( "__jvm-gc-collection-count" , jvmGCCount , interval ) ; metricsCollector . registerMetric ( "__jvm-gc-time-ms" , jvmGCTimeMsPerGCType , interval ) ; metricsCollector . registerMetric ( "__jvm-gc-count" , jvmGCCountPerGCType , interval ) ; metricsCollector . registerMetric ( "__jvm-uptime-secs" , jvmUpTimeSecs , interval ) ; metricsCollector . registerMetric ( "__jvm-thread-count" , jvmThreadCount , interval ) ; metricsCollector . registerMetric ( "__jvm-daemon-thread-count" , jvmDaemonThreadCount , interval ) ; metricsCollector . registerMetric ( "__jvm-process-cpu-time-nanos" , processCPUTimeNs , interval ) ; metricsCollector . registerMetric ( "__jvm-threads-cpu-time-nanos" , threadsCPUTimeNs , interval ) ; metricsCollector . registerMetric ( "__jvm-other-threads-cpu-time-nanos" , otherThreadsCPUTimeNs , interval ) ; metricsCollector . registerMetric ( "__jvm-threads-user-cpu-time-nanos" , threadsUserCPUTimeNs , interval ) ; metricsCollector . registerMetric ( "__jvm-other-threads-user-cpu-time-nanos" , otherThreadsUserCPUTimeNs , interval ) ; metricsCollector . registerMetric ( "__jvm-process-cpu-load" , processCPULoad , interval ) ; metricsCollector . registerMetric ( "__jvm-fd-count" , fdCount , interval ) ; metricsCollector . registerMetric ( "__jvm-fd-limit" , fdLimit , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-free-mb" , jvmMemoryFreeMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-used-mb" , jvmMemoryUsedMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-mb-total" , jvmMemoryTotalMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-heap-mb-used" , jvmMemoryHeapUsedMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-heap-mb-committed" , jvmMemoryHeapCommittedMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-heap-mb-max" , jvmMemoryHeapMaxMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-non-heap-mb-used" , jvmMemoryNonHeapUsedMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-non-heap-mb-committed" , jvmMemoryNonHeapCommittedMB , interval ) ; metricsCollector . registerMetric ( "__jvm-memory-non-heap-mb-max" , jvmMemoryNonHeapMaxMB , interval ) ; metricsCollector . registerMetric ( "__jvm-peak-usage" , jvmPeakUsagePerMemoryPool , interval ) ; metricsCollector . registerMetric ( "__jvm-collection-usage" , jvmCollectionUsagePerMemoryPool , interval ) ; metricsCollector . registerMetric ( "__jvm-estimated-usage" , jvmEstimatedUsagePerMemoryPool , interval ) ; metricsCollector . registerMetric ( "__jvm-buffer-pool" , jvmBufferPoolMemoryUsage , interval ) ; }
Register metrics with the metrics collector
20,673
private void updateBufferPoolMetrics ( ) { for ( BufferPoolMXBean bufferPoolMXBean : bufferPoolMXBeanList ) { String normalizedKeyName = bufferPoolMXBean . getName ( ) . replaceAll ( "[^\\w]" , "-" ) ; final ByteAmount memoryUsed = ByteAmount . fromBytes ( bufferPoolMXBean . getMemoryUsed ( ) ) ; final ByteAmount totalCapacity = ByteAmount . fromBytes ( bufferPoolMXBean . getTotalCapacity ( ) ) ; final ByteAmount count = ByteAmount . fromBytes ( bufferPoolMXBean . getCount ( ) ) ; jvmBufferPoolMemoryUsage . safeScope ( normalizedKeyName + "-memory-used" ) . setValue ( memoryUsed . asMegabytes ( ) ) ; jvmBufferPoolMemoryUsage . safeScope ( normalizedKeyName + "-total-capacity" ) . setValue ( totalCapacity . asMegabytes ( ) ) ; jvmBufferPoolMemoryUsage . safeScope ( normalizedKeyName + "-count" ) . setValue ( count . asMegabytes ( ) ) ; } }
These metrics can be useful for diagnosing native memory usage .
20,674
private void updateMemoryPoolMetrics ( ) { for ( MemoryPoolMXBean memoryPoolMXBean : memoryPoolMXBeanList ) { String normalizedKeyName = memoryPoolMXBean . getName ( ) . replaceAll ( "[^\\w]" , "-" ) ; MemoryUsage peakUsage = memoryPoolMXBean . getPeakUsage ( ) ; if ( peakUsage != null ) { jvmPeakUsagePerMemoryPool . safeScope ( normalizedKeyName + "-used" ) . setValue ( ByteAmount . fromBytes ( peakUsage . getUsed ( ) ) . asMegabytes ( ) ) ; jvmPeakUsagePerMemoryPool . safeScope ( normalizedKeyName + "-committed" ) . setValue ( ByteAmount . fromBytes ( peakUsage . getCommitted ( ) ) . asMegabytes ( ) ) ; jvmPeakUsagePerMemoryPool . safeScope ( normalizedKeyName + "-max" ) . setValue ( ByteAmount . fromBytes ( peakUsage . getMax ( ) ) . asMegabytes ( ) ) ; } MemoryUsage collectionUsage = memoryPoolMXBean . getCollectionUsage ( ) ; if ( collectionUsage != null ) { jvmCollectionUsagePerMemoryPool . safeScope ( normalizedKeyName + "-used" ) . setValue ( ByteAmount . fromBytes ( collectionUsage . getUsed ( ) ) . asMegabytes ( ) ) ; jvmCollectionUsagePerMemoryPool . safeScope ( normalizedKeyName + "-committed" ) . setValue ( ByteAmount . fromBytes ( collectionUsage . getCommitted ( ) ) . asMegabytes ( ) ) ; jvmCollectionUsagePerMemoryPool . safeScope ( normalizedKeyName + "-max" ) . setValue ( ByteAmount . fromBytes ( collectionUsage . getMax ( ) ) . asMegabytes ( ) ) ; } MemoryUsage estimatedUsage = memoryPoolMXBean . getUsage ( ) ; if ( estimatedUsage != null ) { jvmEstimatedUsagePerMemoryPool . safeScope ( normalizedKeyName + "-used" ) . setValue ( ByteAmount . fromBytes ( estimatedUsage . getUsed ( ) ) . asMegabytes ( ) ) ; jvmEstimatedUsagePerMemoryPool . safeScope ( normalizedKeyName + "-committed" ) . setValue ( ByteAmount . fromBytes ( estimatedUsage . getCommitted ( ) ) . asMegabytes ( ) ) ; jvmEstimatedUsagePerMemoryPool . safeScope ( normalizedKeyName + "-max" ) . setValue ( ByteAmount . fromBytes ( estimatedUsage . getMax ( ) ) . asMegabytes ( ) ) ; } } }
Par Eden Space Par Survivor Space CMS Old Gen CMS Perm Gen
20,675
private void updateFdMetrics ( ) { if ( osMbean instanceof com . sun . management . UnixOperatingSystemMXBean ) { final com . sun . management . UnixOperatingSystemMXBean unix = ( com . sun . management . UnixOperatingSystemMXBean ) osMbean ; fdCount . setValue ( unix . getOpenFileDescriptorCount ( ) ) ; fdLimit . setValue ( unix . getMaxFileDescriptorCount ( ) ) ; } }
Update file descriptor metrics
20,676
public boolean killJob ( ) { List < String > auroraCmd = new ArrayList < > ( Arrays . asList ( "aurora" , "job" , "killall" ) ) ; auroraCmd . add ( jobSpec ) ; appendAuroraCommandOptions ( auroraCmd , isVerbose ) ; return runProcess ( auroraCmd ) ; }
Kill an aurora job
20,677
public boolean restart ( Integer containerId ) { List < String > auroraCmd = new ArrayList < > ( Arrays . asList ( "aurora" , "job" , "restart" ) ) ; if ( containerId != null ) { auroraCmd . add ( String . format ( "%s/%d" , jobSpec , containerId ) ) ; } else { auroraCmd . add ( jobSpec ) ; } appendAuroraCommandOptions ( auroraCmd , isVerbose ) ; return runProcess ( auroraCmd ) ; }
Restart an aurora job
20,678
private static void appendAuroraCommandOptions ( List < String > auroraCmd , boolean isVerbose ) { if ( isVerbose ) { auroraCmd . add ( "--verbose" ) ; } auroraCmd . add ( "--batch-size" ) ; auroraCmd . add ( Integer . toString ( Integer . MAX_VALUE ) ) ; }
Static method to append verbose and batching options if needed
20,679
public List < String > getBoltNames ( PhysicalPlan pp ) { TopologyAPI . Topology localTopology = pp . getTopology ( ) ; ArrayList < String > boltNames = new ArrayList < > ( ) ; for ( TopologyAPI . Bolt bolt : localTopology . getBoltsList ( ) ) { boltNames . add ( bolt . getComp ( ) . getName ( ) ) ; } return boltNames ; }
A utility method to extract bolt component names from the topology .
20,680
public List < String > getSpoutNames ( PhysicalPlan pp ) { TopologyAPI . Topology localTopology = pp . getTopology ( ) ; ArrayList < String > spoutNames = new ArrayList < > ( ) ; for ( TopologyAPI . Spout spout : localTopology . getSpoutsList ( ) ) { spoutNames . add ( spout . getComp ( ) . getName ( ) ) ; } return spoutNames ; }
A utility method to extract spout component names from the topology .
20,681
public Config buildConfig ( EcoTopologyDefinition topologyDefinition ) throws IllegalArgumentException { Map < String , Object > configMap = topologyDefinition . getConfig ( ) ; Config config = new Config ( ) ; for ( Map . Entry < String , Object > entry : configMap . entrySet ( ) ) { if ( entry . getKey ( ) . equals ( COMPONENT_RESOURCE_MAP ) ) { setComponentLevelResource ( config , entry ) ; } else if ( entry . getKey ( ) . equals ( COMPONENT_JVM_OPTIONS ) ) { List < Object > objects = ( List < Object > ) entry . getValue ( ) ; for ( Object obj : objects ) { String objString = obj . toString ( ) ; objString = objString . replace ( LEFT_BRACE , WHITESPACE ) ; objString = objString . replace ( RIGHT_BRACE , WHITESPACE ) ; int idIndex = objString . indexOf ( ID ) ; int optionsIndex = objString . indexOf ( OPTIONS ) ; String id = getIdValue ( objString , idIndex ) ; String jvmOptions ; if ( optionsIndex != - 1 ) { int equalsIndex = objString . indexOf ( EQUALS , optionsIndex ) ; jvmOptions = objString . substring ( equalsIndex + 1 , objString . length ( ) ) ; jvmOptions = jvmOptions . replace ( LEFT_BRACKET , "" ) . replace ( RIGHT_BRACKET , "" ) ; } else { throw new IllegalArgumentException ( "You must specify the JVM options for your component" ) ; } config . setComponentJvmOptions ( id , jvmOptions ) ; } } else { config . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } } return config ; }
Build the config for a ECO topology definition
20,682
public static boolean verifyTopology ( TopologyAPI . Topology topology ) { if ( ! topology . hasName ( ) || topology . getName ( ) . isEmpty ( ) ) { LOG . severe ( "Missing topology name" ) ; return false ; } if ( topology . getName ( ) . contains ( "." ) || topology . getName ( ) . contains ( "/" ) ) { LOG . severe ( "Invalid topology name. Topology name shouldn't have . or /" ) ; return false ; } getComponentRamMapConfig ( topology ) ; Set < String > outputStreams = new HashSet < > ( ) ; for ( TopologyAPI . Spout spout : topology . getSpoutsList ( ) ) { for ( TopologyAPI . OutputStream stream : spout . getOutputsList ( ) ) { outputStreams . add ( stream . getStream ( ) . getComponentName ( ) + "/" + stream . getStream ( ) . getId ( ) ) ; } } for ( TopologyAPI . Bolt bolt : topology . getBoltsList ( ) ) { for ( TopologyAPI . OutputStream stream : bolt . getOutputsList ( ) ) { outputStreams . add ( stream . getStream ( ) . getComponentName ( ) + "/" + stream . getStream ( ) . getId ( ) ) ; } } for ( TopologyAPI . Bolt bolt : topology . getBoltsList ( ) ) { for ( TopologyAPI . InputStream stream : bolt . getInputsList ( ) ) { String key = stream . getStream ( ) . getComponentName ( ) + "/" + stream . getStream ( ) . getId ( ) ; if ( ! outputStreams . contains ( key ) ) { LOG . severe ( "Invalid input stream " + key + " existing streams are " + outputStreams ) ; return false ; } } } return true ; }
Verify if the given topology has all the necessary information
20,683
public static Map < String , Double > getComponentCpuMapConfig ( TopologyAPI . Topology topology ) throws RuntimeException { Map < String , String > configMap = getComponentConfigMap ( topology , Config . TOPOLOGY_COMPONENT_CPUMAP ) ; Map < String , Double > cpuMap = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : configMap . entrySet ( ) ) { Double requiredCpu = Double . parseDouble ( entry . getValue ( ) ) ; cpuMap . put ( entry . getKey ( ) , requiredCpu ) ; } return cpuMap ; }
Parses the value in Config . TOPOLOGY_COMPONENT_CPUMAP and returns a map containing only component specified . Returns a empty map if the Config is not set
20,684
public static Map < String , ByteAmount > getComponentRamMapConfig ( TopologyAPI . Topology topology ) throws RuntimeException { Map < String , String > configMap = getComponentConfigMap ( topology , Config . TOPOLOGY_COMPONENT_RAMMAP ) ; Map < String , ByteAmount > ramMap = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : configMap . entrySet ( ) ) { long requiredRam = Long . parseLong ( entry . getValue ( ) ) ; ramMap . put ( entry . getKey ( ) , ByteAmount . fromBytes ( requiredRam ) ) ; } return ramMap ; }
Parses the value in Config . TOPOLOGY_COMPONENT_RAMMAP and returns a map containing only component specified . Returns a empty map if the Config is not set
20,685
public static Map < String , ByteAmount > getComponentDiskMapConfig ( TopologyAPI . Topology topology ) throws RuntimeException { Map < String , String > configMap = getComponentConfigMap ( topology , Config . TOPOLOGY_COMPONENT_DISKMAP ) ; Map < String , ByteAmount > diskMap = new HashMap < > ( ) ; for ( Map . Entry < String , String > entry : configMap . entrySet ( ) ) { long requiredDisk = Long . parseLong ( entry . getValue ( ) ) ; diskMap . put ( entry . getKey ( ) , ByteAmount . fromBytes ( requiredDisk ) ) ; } return diskMap ; }
Parses the value in Config . TOPOLOGY_COMPONENT_DISKMAP and returns a map containing only component specified . Returns a empty map if the Config is not set
20,686
public void updateTopology ( final PackingPlans . PackingPlan existingProtoPackingPlan , final PackingPlans . PackingPlan proposedProtoPackingPlan ) throws ExecutionException , InterruptedException , ConcurrentModificationException { String topologyName = Runtime . topologyName ( runtime ) ; SchedulerStateManagerAdaptor stateManager = Runtime . schedulerStateManagerAdaptor ( runtime ) ; Lock lock = stateManager . getLock ( topologyName , IStateManager . LockName . UPDATE_TOPOLOGY ) ; if ( lock . tryLock ( 5 , TimeUnit . SECONDS ) ) { try { PackingPlans . PackingPlan foundPackingPlan = getPackingPlan ( stateManager , topologyName ) ; if ( ! deserializer . fromProto ( existingProtoPackingPlan ) . equals ( deserializer . fromProto ( foundPackingPlan ) ) ) { throw new ConcurrentModificationException ( String . format ( "The packing plan in state manager is not the same as the submitted existing " + "packing plan for topology %s. Another actor has changed it and has likely" + "performed an update on it. Failing this request, try again once other " + "update is complete" , topologyName ) ) ; } updateTopology ( existingProtoPackingPlan , proposedProtoPackingPlan , stateManager ) ; } finally { lock . unlock ( ) ; } } else { throw new ConcurrentModificationException ( String . format ( "The update lock can not be obtained for topology %s. Another actor is performing an " + "update on it. Failing this request, try again once current update is complete" , topologyName ) ) ; } }
Scales the topology out or in based on the proposedPackingPlan
20,687
public SpoutDeclarer setSpout ( String id , IRichSpout spout , Number parallelismHint ) { validateComponentName ( id ) ; SpoutDeclarer s = new SpoutDeclarer ( id , spout , parallelismHint ) ; spouts . put ( id , s ) ; return s ; }
Define a new spout in this topology with the specified parallelism . If the spout declares itself as non - distributed the parallelismHint will be ignored and only one task will be allocated to this component .
20,688
public Collection < Symptom > detect ( Collection < Measurement > measurements ) { Collection < Symptom > result = new ArrayList < > ( ) ; MeasurementsTable metrics = MeasurementsTable . of ( measurements ) . type ( metricName ) ; Instant now = context . checkpoint ( ) ; for ( String component : metrics . uniqueComponents ( ) ) { Set < String > addresses = new HashSet < > ( ) ; Set < String > positiveAddresses = new HashSet < > ( ) ; Set < String > negativeAddresses = new HashSet < > ( ) ; double componentMax = getMaxOfAverage ( metrics . component ( component ) ) ; double componentMin = getMinOfAverage ( metrics . component ( component ) ) ; if ( componentMax > skewRatio * componentMin ) { addresses . add ( component ) ; result . add ( new Symptom ( symptomType . text ( ) , now , addresses ) ) ; for ( String instance : metrics . component ( component ) . uniqueInstances ( ) ) { if ( metrics . instance ( instance ) . mean ( ) >= 0.90 * componentMax ) { positiveAddresses . add ( instance ) ; } if ( metrics . instance ( instance ) . mean ( ) <= 1.10 * componentMin ) { negativeAddresses . add ( instance ) ; } } if ( ! positiveAddresses . isEmpty ( ) ) { result . add ( new Symptom ( "POSITIVE " + symptomType . text ( ) , now , positiveAddresses ) ) ; } if ( ! negativeAddresses . isEmpty ( ) ) { result . add ( new Symptom ( "NEGATIVE " + symptomType . text ( ) , now , negativeAddresses ) ) ; } } } return result ; }
Detects components experiencing skew on a specific metric
20,689
public boolean launch ( PackingPlan packing ) { LOG . log ( Level . FINE , "Launching topology for local cluster {0}" , LocalContext . cluster ( config ) ) ; if ( ! setupWorkingDirectoryAndExtractPackages ( ) ) { LOG . severe ( "Failed to setup working directory" ) ; return false ; } String [ ] schedulerCmd = getSchedulerCommand ( ) ; Process p = startScheduler ( schedulerCmd ) ; if ( p == null ) { LOG . severe ( "Failed to start SchedulerMain using: " + Arrays . toString ( schedulerCmd ) ) ; return false ; } LOG . log ( Level . FINE , String . format ( "To check the status and logs of the topology, use the working directory %s" , LocalContext . workingDirectory ( config ) ) ) ; return true ; }
Launch the topology
20,690
public void start ( ) throws Exception { String statemgrClass = Context . stateManagerClass ( config ) ; LOG . info ( "Context.stateManagerClass " + statemgrClass ) ; IStateManager statemgr ; try { statemgr = ReflectionUtils . newInstance ( statemgrClass ) ; } catch ( IllegalAccessException | InstantiationException | ClassNotFoundException e ) { throw new Exception ( String . format ( "Failed to instantiate state manager class '%s'" , statemgrClass ) , e ) ; } try { statemgr . initialize ( config ) ; Boolean b = statemgr . setMetricsCacheLocation ( metricsCacheLocation , topologyName ) . get ( 5000 , TimeUnit . MILLISECONDS ) ; if ( b != null && b ) { LOG . info ( "metricsCacheLocation " + metricsCacheLocation . toString ( ) ) ; LOG . info ( "topologyName " + topologyName . toString ( ) ) ; LOG . info ( "Starting Metrics Cache HTTP Server" ) ; metricsCacheManagerHttpServer . start ( ) ; LOG . info ( "Starting Metrics Cache Server" ) ; metricsCacheManagerServer . start ( ) ; metricsCacheManagerServerLoop . loop ( ) ; } else { throw new RuntimeException ( "Failed to set metricscahe location." ) ; } } finally { SysUtils . closeIgnoringExceptions ( statemgr ) ; } }
start statemgr_client metricscache_server http_server
20,691
private String generateS3Path ( String pathPrefixParent , String topologyName , String filename ) { List < String > pathParts = new ArrayList < > ( Arrays . asList ( pathPrefixParent . split ( "/" ) ) ) ; pathParts . add ( topologyName ) ; pathParts . add ( filename ) ; return String . join ( "/" , pathParts ) ; }
Generate the path to a file in s3 given a prefix topologyName and filename
20,692
public void submitTopology ( String name , Config heronConfig , HeronTopology heronTopology ) { TopologyAPI . Topology topologyToRun = heronTopology . setConfig ( heronConfig ) . setName ( name ) . setState ( TopologyAPI . TopologyState . RUNNING ) . getTopology ( ) ; if ( ! TopologyUtils . verifyTopology ( topologyToRun ) ) { throw new RuntimeException ( "Topology object is Malformed" ) ; } if ( isTopologyStateful ( heronConfig ) ) { throw new RuntimeException ( "Stateful topology is not supported" ) ; } TopologyManager topologyManager = new TopologyManager ( topologyToRun ) ; LOG . info ( "Physical Plan: \n" + topologyManager . getPhysicalPlan ( ) ) ; streamExecutor = new StreamExecutor ( topologyManager ) ; metricsExecutor = new MetricsExecutor ( systemConfig ) ; for ( PhysicalPlans . Instance instance : topologyManager . getPhysicalPlan ( ) . getInstancesList ( ) ) { InstanceExecutor instanceExecutor = new InstanceExecutor ( topologyManager . getPhysicalPlan ( ) , instance . getInstanceId ( ) ) ; streamExecutor . addInstanceExecutor ( instanceExecutor ) ; metricsExecutor . addInstanceExecutor ( instanceExecutor ) ; instanceExecutors . add ( instanceExecutor ) ; } Thread . setDefaultUncaughtExceptionHandler ( new DefaultExceptionHandler ( ) ) ; threadsPool . execute ( metricsExecutor ) ; threadsPool . execute ( streamExecutor ) ; for ( InstanceExecutor instanceExecutor : instanceExecutors ) { threadsPool . execute ( instanceExecutor ) ; } }
Submit and run topology in simulator
20,693
public static TopologyAPI . Config . Builder getConfigBuilder ( Config config ) { TopologyAPI . Config . Builder cBldr = TopologyAPI . Config . newBuilder ( ) ; Set < String > apiVars = config . getApiVars ( ) ; for ( String key : config . keySet ( ) ) { if ( key == null ) { LOG . warning ( "ignore: null config key found" ) ; continue ; } Object value = config . get ( key ) ; if ( value == null ) { LOG . warning ( "ignore: config key " + key + " has null value" ) ; continue ; } TopologyAPI . Config . KeyValue . Builder b = TopologyAPI . Config . KeyValue . newBuilder ( ) ; b . setKey ( key ) ; if ( apiVars . contains ( key ) ) { b . setType ( TopologyAPI . ConfigValueType . STRING_VALUE ) ; b . setValue ( value . toString ( ) ) ; } else { b . setType ( TopologyAPI . ConfigValueType . JAVA_SERIALIZED_VALUE ) ; b . setSerializedValue ( ByteString . copyFrom ( serialize ( value ) ) ) ; } cBldr . addKvs ( b ) ; } return cBldr ; }
Converts a Heron Config object into a TopologyAPI . Config . Builder . Config entries with null keys or values are ignored .
20,694
public Resource getMaxContainerResources ( ) { double maxCpu = 0 ; ByteAmount maxRam = ByteAmount . ZERO ; ByteAmount maxDisk = ByteAmount . ZERO ; for ( ContainerPlan containerPlan : getContainers ( ) ) { Resource containerResource = containerPlan . getScheduledResource ( ) . or ( containerPlan . getRequiredResource ( ) ) ; maxCpu = Math . max ( maxCpu , containerResource . getCpu ( ) ) ; maxRam = maxRam . max ( containerResource . getRam ( ) ) ; maxDisk = maxDisk . max ( containerResource . getDisk ( ) ) ; } return new Resource ( maxCpu , maxRam , maxDisk ) ; }
Computes the maximum of all the resources required by the containers in the packing plan . If the PackingPlan has already been scheduled the scheduled resources will be used over the required resources .
20,695
public Map < String , Integer > getComponentCounts ( ) { Map < String , Integer > componentCounts = new HashMap < > ( ) ; for ( ContainerPlan containerPlan : getContainers ( ) ) { for ( InstancePlan instancePlan : containerPlan . getInstances ( ) ) { Integer count = 0 ; if ( componentCounts . containsKey ( instancePlan . getComponentName ( ) ) ) { count = componentCounts . get ( instancePlan . getComponentName ( ) ) ; } componentCounts . put ( instancePlan . getComponentName ( ) , ++ count ) ; } } return componentCounts ; }
Return a map containing the count of all of the components keyed by name
20,696
public String getComponentRamDistribution ( ) { Map < String , ByteAmount > ramMap = new HashMap < > ( ) ; for ( ContainerPlan containerPlan : this . getContainers ( ) ) { for ( InstancePlan instancePlan : containerPlan . getInstances ( ) ) { ByteAmount newRam = instancePlan . getResource ( ) . getRam ( ) ; ByteAmount currentRam = ramMap . get ( instancePlan . getComponentName ( ) ) ; if ( currentRam == null || currentRam . asBytes ( ) > newRam . asBytes ( ) ) { ramMap . put ( instancePlan . getComponentName ( ) , newRam ) ; } } } StringBuilder ramMapBuilder = new StringBuilder ( ) ; for ( String component : ramMap . keySet ( ) ) { ramMapBuilder . append ( String . format ( "%s:%d," , component , ramMap . get ( component ) . asBytes ( ) ) ) ; } ramMapBuilder . deleteCharAt ( ramMapBuilder . length ( ) - 1 ) ; return ramMapBuilder . toString ( ) ; }
Get the formatted String describing component RAM distribution from PackingPlan used by executor
20,697
public Code declare ( MethodId < ? , ? > method , int flags ) { TypeDeclaration typeDeclaration = getTypeDeclaration ( method . declaringType ) ; if ( typeDeclaration . methods . containsKey ( method ) ) { throw new IllegalStateException ( "already declared: " + method ) ; } int supportedFlags = Modifier . PUBLIC | Modifier . PRIVATE | Modifier . PROTECTED | Modifier . STATIC | Modifier . FINAL | Modifier . SYNCHRONIZED | AccessFlags . ACC_SYNTHETIC | AccessFlags . ACC_BRIDGE ; if ( ( flags & ~ supportedFlags ) != 0 ) { throw new IllegalArgumentException ( "Unexpected flag: " + Integer . toHexString ( flags ) ) ; } if ( ( flags & Modifier . SYNCHRONIZED ) != 0 ) { flags = ( flags & ~ Modifier . SYNCHRONIZED ) | AccessFlags . ACC_DECLARED_SYNCHRONIZED ; } if ( method . isConstructor ( ) || method . isStaticInitializer ( ) ) { flags |= ACC_CONSTRUCTOR ; } MethodDeclaration methodDeclaration = new MethodDeclaration ( method , flags ) ; typeDeclaration . methods . put ( method , methodDeclaration ) ; return methodDeclaration . code ; }
Declares a method or constructor .
20,698
public void declare ( FieldId < ? , ? > fieldId , int flags , Object staticValue ) { TypeDeclaration typeDeclaration = getTypeDeclaration ( fieldId . declaringType ) ; if ( typeDeclaration . fields . containsKey ( fieldId ) ) { throw new IllegalStateException ( "already declared: " + fieldId ) ; } int supportedFlags = Modifier . PUBLIC | Modifier . PRIVATE | Modifier . PROTECTED | Modifier . STATIC | Modifier . FINAL | Modifier . VOLATILE | Modifier . TRANSIENT | AccessFlags . ACC_SYNTHETIC ; if ( ( flags & ~ supportedFlags ) != 0 ) { throw new IllegalArgumentException ( "Unexpected flag: " + Integer . toHexString ( flags ) ) ; } if ( ( flags & Modifier . STATIC ) == 0 && staticValue != null ) { throw new IllegalArgumentException ( "staticValue is non-null, but field is not static" ) ; } FieldDeclaration fieldDeclaration = new FieldDeclaration ( fieldId , flags , staticValue ) ; typeDeclaration . fields . put ( fieldId , fieldDeclaration ) ; }
Declares a field .
20,699
public byte [ ] generate ( ) { if ( outputDex == null ) { DexOptions options = new DexOptions ( ) ; options . minSdkVersion = DexFormat . API_NO_EXTENDED_OPCODES ; outputDex = new DexFile ( options ) ; } for ( TypeDeclaration typeDeclaration : types . values ( ) ) { outputDex . add ( typeDeclaration . toClassDefItem ( ) ) ; } try { return outputDex . toDex ( null , false ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } }
Generates a dex file and returns its bytes .