idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
20,500
public static byte [ ] readHttpRequestBody ( HttpExchange exchange ) { int contentLength = Integer . parseInt ( exchange . getRequestHeaders ( ) . getFirst ( CONTENT_LENGTH ) ) ; if ( contentLength <= 0 ) { LOG . log ( Level . SEVERE , "Failed to read content length http request body: " + contentLength ) ; return new byte [ 0 ] ; } byte [ ] requestBody = new byte [ contentLength ] ; InputStream is = exchange . getRequestBody ( ) ; try { int off = 0 ; int bRead = 0 ; while ( off != ( contentLength - 1 ) && ( bRead = is . read ( requestBody , off , contentLength - off ) ) != - 1 ) { off += bRead ; } } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to read http request body: " , e ) ; return new byte [ 0 ] ; } finally { try { is . close ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to close InputStream: " , e ) ; return new byte [ 0 ] ; } } return requestBody ; }
Read the request body of HTTP request from a given HttpExchange
20,501
public static boolean sendHttpResponse ( boolean isSuccess , HttpExchange exchange , byte [ ] response ) { int returnCode = isSuccess ? HttpURLConnection . HTTP_OK : HttpURLConnection . HTTP_UNAVAILABLE ; try { exchange . sendResponseHeaders ( returnCode , response . length ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to send response headers: " , e ) ; return false ; } OutputStream os = exchange . getResponseBody ( ) ; try { os . write ( response ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to send http response: " , e ) ; return false ; } finally { try { os . close ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to close OutputStream: " , e ) ; return false ; } } return true ; }
Send a http response with HTTP_OK return code and response body
20,502
public static boolean sendHttpPostRequest ( HttpURLConnection connection , String contentType , byte [ ] data ) { try { connection . setRequestMethod ( "POST" ) ; } catch ( ProtocolException e ) { LOG . log ( Level . SEVERE , "Failed to set post request: " , e ) ; return false ; } if ( data . length > 0 ) { connection . setRequestProperty ( CONTENT_TYPE , contentType ) ; connection . setRequestProperty ( CONTENT_LENGTH , Integer . toString ( data . length ) ) ; connection . setUseCaches ( false ) ; connection . setDoOutput ( true ) ; OutputStream os = null ; try { os = connection . getOutputStream ( ) ; os . write ( data ) ; os . flush ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to send request: " , e ) ; return false ; } finally { try { if ( os != null ) { os . close ( ) ; } } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to close OutputStream: " , e ) ; return false ; } } } return true ; }
Send Http POST Request to a connection with given data in request body
20,503
public static byte [ ] readHttpResponse ( HttpURLConnection connection ) { byte [ ] res ; try { if ( connection . getResponseCode ( ) != HttpURLConnection . HTTP_OK ) { LOG . log ( Level . WARNING , "Http Response not OK: " + connection . getResponseCode ( ) ) ; } } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to get response code" , e ) ; return new byte [ 0 ] ; } int responseLength = connection . getContentLength ( ) ; if ( responseLength <= 0 ) { LOG . log ( Level . SEVERE , "Response length abnormal: " + responseLength ) ; return new byte [ 0 ] ; } try { res = new byte [ responseLength ] ; InputStream is = connection . getInputStream ( ) ; int off = 0 ; int bRead = 0 ; while ( off != ( responseLength - 1 ) && ( bRead = is . read ( res , off , responseLength - off ) ) != - 1 ) { off += bRead ; } return res ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to read response: " , e ) ; return new byte [ 0 ] ; } finally { try { connection . getInputStream ( ) . close ( ) ; } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to close InputStream: " , e ) ; return new byte [ 0 ] ; } } }
Read http response from a given http connection
20,504
public static void sendToTMaster ( String command , String topologyName , SchedulerStateManagerAdaptor stateManager , NetworkUtils . TunnelConfig tunnelConfig ) throws TMasterException { final List < String > empty = new ArrayList < String > ( ) ; sendToTMasterWithArguments ( command , topologyName , empty , stateManager , tunnelConfig ) ; }
Communicate with TMaster with command
20,505
private static TopologyAPI . TopologyState getRuntimeTopologyState ( String topologyName , SchedulerStateManagerAdaptor statemgr ) throws TMasterException { PhysicalPlans . PhysicalPlan plan = statemgr . getPhysicalPlan ( topologyName ) ; if ( plan == null ) { throw new TMasterException ( String . format ( "Failed to get physical plan for topology '%s'" , topologyName ) ) ; } return plan . getTopology ( ) . getState ( ) ; }
Get current running TopologyState
20,506
private Map < Integer , List < InstanceId > > getRoundRobinAllocation ( int numContainer , Map < String , Integer > parallelismMap ) { Map < Integer , List < InstanceId > > allocation = new HashMap < > ( ) ; int totalInstance = TopologyUtils . getTotalInstance ( parallelismMap ) ; if ( numContainer < 1 ) { throw new RuntimeException ( String . format ( "Invlaid number of container: %d" , numContainer ) ) ; } else if ( numContainer > totalInstance ) { throw new RuntimeException ( String . format ( "More containers (%d) allocated than instances (%d)." , numContainer , totalInstance ) ) ; } for ( int i = 1 ; i <= numContainer ; ++ i ) { allocation . put ( i , new ArrayList < > ( ) ) ; } int index = 1 ; int globalTaskIndex = 1 ; List < String > sortedInstances = getSortedRAMComponents ( parallelismMap . keySet ( ) ) . stream ( ) . map ( ResourceRequirement :: getComponentName ) . collect ( Collectors . toList ( ) ) ; for ( String component : sortedInstances ) { int numInstance = parallelismMap . get ( component ) ; for ( int i = 0 ; i < numInstance ; ++ i ) { allocation . get ( index ) . add ( new InstanceId ( component , globalTaskIndex , i ) ) ; index = ( index == numContainer ) ? 1 : index + 1 ; globalTaskIndex ++ ; } } return allocation ; }
Get the instances allocation basing on round robin algorithm
20,507
private void validatePackingPlan ( PackingPlan plan ) throws PackingException { for ( PackingPlan . ContainerPlan containerPlan : plan . getContainers ( ) ) { for ( PackingPlan . InstancePlan instancePlan : containerPlan . getInstances ( ) ) { if ( instancePlan . getResource ( ) . getRam ( ) . lessThan ( MIN_RAM_PER_INSTANCE ) ) { throw new PackingException ( String . format ( "Invalid packing plan generated. A minimum of " + "%s RAM is required, but InstancePlan for component '%s' has %s" , MIN_RAM_PER_INSTANCE , instancePlan . getComponentName ( ) , instancePlan . getResource ( ) . getRam ( ) ) ) ; } } } }
Check whether the PackingPlan generated is valid
20,508
public Resource subtractAbsolute ( Resource other ) { double cpuDifference = this . getCpu ( ) - other . getCpu ( ) ; double extraCpu = Math . max ( 0 , cpuDifference ) ; ByteAmount ramDifference = this . getRam ( ) . minus ( other . getRam ( ) ) ; ByteAmount extraRam = ByteAmount . ZERO . max ( ramDifference ) ; ByteAmount diskDifference = this . getDisk ( ) . minus ( other . getDisk ( ) ) ; ByteAmount extraDisk = ByteAmount . ZERO . max ( diskDifference ) ; return new Resource ( extraCpu , extraRam , extraDisk ) ; }
Subtracts a given resource from the current resource . The results is never negative .
20,509
public Resource plus ( Resource other ) { double totalCpu = this . getCpu ( ) + other . getCpu ( ) ; ByteAmount totalRam = this . getRam ( ) . plus ( other . getRam ( ) ) ; ByteAmount totalDisk = this . getDisk ( ) . plus ( other . getDisk ( ) ) ; return new Resource ( totalCpu , totalRam , totalDisk ) ; }
Adds a given resource from the current resource .
20,510
public double divideBy ( Resource other ) throws RuntimeException { if ( other . getCpu ( ) == 0 || other . getRam ( ) . isZero ( ) || other . getDisk ( ) . isZero ( ) ) { throw new RuntimeException ( "Division by 0." ) ; } else { double cpuFactor = Math . ceil ( this . getCpu ( ) / other . getCpu ( ) ) ; double ramFactor = Math . ceil ( ( double ) this . getRam ( ) . asBytes ( ) / other . getRam ( ) . asBytes ( ) ) ; double diskFactor = Math . ceil ( ( double ) this . getDisk ( ) . asBytes ( ) / other . getDisk ( ) . asBytes ( ) ) ; return Math . max ( cpuFactor , Math . max ( ramFactor , diskFactor ) ) ; } }
Divides a resource by another resource by dividing the CPU memory and disk values of the resources . It returns the maximum of the three results .
20,511
public static String checkNotBlank ( String text , String errorMessage ) { if ( StringUtils . isBlank ( text ) ) { throw new IllegalArgumentException ( errorMessage ) ; } else { return text ; } }
Verifies not blank text as the utility function .
20,512
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public static Config translateConfig ( Map stormConfig ) { Config heronConfig ; if ( stormConfig != null ) { heronConfig = new Config ( ( Map < String , Object > ) stormConfig ) ; } else { heronConfig = new Config ( ) ; } doSerializationTranslation ( heronConfig ) ; doStormTranslation ( heronConfig ) ; doTaskHooksTranslation ( heronConfig ) ; doTopologyLevelTranslation ( heronConfig ) ; return heronConfig ; }
Translate storm config to heron config for topology
20,513
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public static Config translateComponentConfig ( Map stormConfig ) { Config heronConfig ; if ( stormConfig != null ) { heronConfig = new Config ( ( Map < String , Object > ) stormConfig ) ; } else { heronConfig = new Config ( ) ; } doStormTranslation ( heronConfig ) ; return heronConfig ; }
Translate storm config to heron config for components
20,514
private static void doTopologyLevelTranslation ( Config heronConfig ) { if ( heronConfig . containsKey ( org . apache . storm . Config . TOPOLOGY_ACKER_EXECUTORS ) ) { Integer nAckers = Utils . getInt ( heronConfig . get ( org . apache . storm . Config . TOPOLOGY_ACKER_EXECUTORS ) ) ; if ( nAckers > 0 ) { org . apache . heron . api . Config . setTopologyReliabilityMode ( heronConfig , org . apache . heron . api . Config . TopologyReliabilityMode . ATLEAST_ONCE ) ; } else { org . apache . heron . api . Config . setTopologyReliabilityMode ( heronConfig , org . apache . heron . api . Config . TopologyReliabilityMode . ATMOST_ONCE ) ; } } else { org . apache . heron . api . Config . setTopologyReliabilityMode ( heronConfig , org . apache . heron . api . Config . TopologyReliabilityMode . ATMOST_ONCE ) ; } }
Translate topology config .
20,515
private void sendRegisterRequest ( ) { Metrics . MetricPublisher publisher = Metrics . MetricPublisher . newBuilder ( ) . setHostname ( hostname ) . setPort ( instance . getInfo ( ) . getTaskId ( ) ) . setComponentName ( instance . getInfo ( ) . getComponentName ( ) ) . setInstanceId ( instance . getInstanceId ( ) ) . setInstanceIndex ( instance . getInfo ( ) . getComponentIndex ( ) ) . build ( ) ; Metrics . MetricPublisherRegisterRequest request = Metrics . MetricPublisherRegisterRequest . newBuilder ( ) . setPublisher ( publisher ) . build ( ) ; sendRequest ( request , null , Metrics . MetricPublisherRegisterResponse . newBuilder ( ) , systemConfig . getInstanceReconnectMetricsmgrInterval ( ) ) ; }
Build register request and send to metrics mgr
20,516
public JsonNode get ( Integer expectedResponseCode ) throws IOException { HttpURLConnection conn = getUrlConnection ( ) ; byte [ ] responseData ; try { if ( ! NetworkUtils . sendHttpGetRequest ( conn ) ) { throw new IOException ( "Failed to send get request to " + endpointURI ) ; } if ( ! NetworkUtils . checkHttpResponseCode ( conn , expectedResponseCode ) ) { throw new IOException ( "Unexpected response from connection. Expected " + expectedResponseCode + " but received " + conn . getResponseCode ( ) ) ; } responseData = NetworkUtils . readHttpResponse ( conn ) ; } finally { conn . disconnect ( ) ; } JsonNode podDefinition ; try { ObjectMapper mapper = new ObjectMapper ( ) ; podDefinition = mapper . readTree ( responseData ) ; } catch ( IOException ioe ) { throw new IOException ( "Failed to parse JSON response from API" ) ; } return podDefinition ; }
Make a GET request to a JSON - based API endpoint
20,517
public void delete ( Integer expectedResponseCode ) throws IOException { HttpURLConnection conn = getUrlConnection ( ) ; try { if ( ! NetworkUtils . sendHttpDeleteRequest ( conn ) ) { throw new IOException ( "Failed to send delete request to " + endpointURI ) ; } if ( ! NetworkUtils . checkHttpResponseCode ( conn , expectedResponseCode ) ) { throw new IOException ( "Unexpected response from connection. Expected " + expectedResponseCode + " but received " + conn . getResponseCode ( ) ) ; } } finally { conn . disconnect ( ) ; } }
Make a DELETE request to a URI
20,518
public void post ( String jsonBody , Integer expectedResponseCode ) throws IOException { HttpURLConnection conn = getUrlConnection ( ) ; try { if ( ! NetworkUtils . sendHttpPostRequest ( conn , NetworkUtils . JSON_TYPE , jsonBody . getBytes ( ) ) ) { throw new IOException ( "Failed to send POST to " + endpointURI ) ; } if ( ! NetworkUtils . checkHttpResponseCode ( conn , expectedResponseCode ) ) { byte [ ] bytes = NetworkUtils . readHttpResponse ( conn ) ; LOG . log ( Level . SEVERE , "Failed to send POST request to endpoint" ) ; LOG . log ( Level . SEVERE , new String ( bytes ) ) ; throw new IOException ( "Unexpected response from connection. Expected " + expectedResponseCode + " but received " + conn . getResponseCode ( ) ) ; } } finally { conn . disconnect ( ) ; } }
Make a POST request to a URI
20,519
public List < IncomingPacket > read ( ) { long startOfCycle = System . nanoTime ( ) ; long bytesRead = 0 ; long nPacketsRead = 0 ; List < IncomingPacket > ret = new ArrayList < IncomingPacket > ( ) ; while ( ( System . nanoTime ( ) - startOfCycle - readReadBatchTime . toNanos ( ) ) < 0 && ( bytesRead < readBatchSize . asBytes ( ) ) ) { int readState = incomingPacket . readFromChannel ( socketChannel , maximumPacketSize . asBytes ( ) ) ; if ( readState > 0 ) { break ; } else if ( readState < 0 ) { LOG . severe ( "Something bad happened while reading from channel: " + socketChannel . socket ( ) . getRemoteSocketAddress ( ) ) ; selectHandler . handleError ( socketChannel ) ; ret . clear ( ) ; break ; } else { nPacketsRead ++ ; bytesRead += incomingPacket . size ( ) ; ret . add ( incomingPacket ) ; incomingPacket = new IncomingPacket ( ) ; } } totalPacketsRead += nPacketsRead ; totalBytesRead += bytesRead ; return ret ; }
It would return an empty list if something bad happens
20,520
public void write ( ) { long startOfCycle = System . nanoTime ( ) ; long bytesWritten = 0 ; long nPacketsWritten = 0 ; while ( ( System . nanoTime ( ) - startOfCycle - writeBatchTime . toNanos ( ) ) < 0 && ( bytesWritten < writeBatchSize . asBytes ( ) ) ) { OutgoingPacket outgoingPacket = outgoingPacketsToWrite . peek ( ) ; if ( outgoingPacket == null ) { break ; } int writeState = outgoingPacket . writeToChannel ( socketChannel ) ; if ( writeState > 0 ) { break ; } else if ( writeState < 0 ) { LOG . severe ( "Something bad happened while writing to channel" ) ; selectHandler . handleError ( socketChannel ) ; return ; } else { bytesWritten += outgoingPacket . size ( ) ; nPacketsWritten ++ ; outgoingPacketsToWrite . remove ( ) ; } } totalPacketsWritten += nPacketsWritten ; totalBytesWritten += bytesWritten ; if ( getOutstandingPackets ( ) == 0 ) { disableWriting ( ) ; } }
Write the outgoingPackets in buffer to socket
20,521
private static String combinePaths ( List < String > paths ) { File file = new File ( paths . get ( 0 ) ) ; for ( int i = 1 ; i < paths . size ( ) ; i ++ ) { file = new File ( file , paths . get ( i ) ) ; } return file . getPath ( ) ; }
Given a list of strings concatenate them to form a file system path
20,522
protected void handleControlSignal ( ) { if ( toActivate ) { if ( ! isInstanceStarted ) { startInstance ( ) ; } instance . activate ( ) ; LOG . info ( "Activated instance: " + physicalPlanHelper . getMyInstanceId ( ) ) ; toActivate = false ; } if ( toDeactivate ) { instance . deactivate ( ) ; LOG . info ( "Deactivated instance: " + physicalPlanHelper . getMyInstanceId ( ) ) ; toDeactivate = false ; } if ( toStop ) { instance . shutdown ( ) ; LOG . info ( "Stopped instance: " + physicalPlanHelper . getMyInstanceId ( ) ) ; toStop = false ; } }
But we have to handle these flags inside the WakeableLooper thread
20,523
public static void setEnableAcking ( Map < String , Object > conf , boolean acking ) { if ( acking ) { setTopologyReliabilityMode ( conf , Config . TopologyReliabilityMode . ATLEAST_ONCE ) ; } else { setTopologyReliabilityMode ( conf , Config . TopologyReliabilityMode . ATMOST_ONCE ) ; } }
Is topology running with acking enabled?
20,524
@ SuppressWarnings ( "unchecked" ) public static void registerTopologyTimerEvents ( Map < String , Object > conf , String name , Duration interval , Runnable task ) { if ( interval . isZero ( ) || interval . isNegative ( ) ) { throw new IllegalArgumentException ( "Timer duration needs to be positive" ) ; } if ( ! conf . containsKey ( Config . TOPOLOGY_TIMER_EVENTS ) ) { conf . put ( Config . TOPOLOGY_TIMER_EVENTS , new HashMap < String , Pair < Duration , Runnable > > ( ) ) ; } Map < String , Pair < Duration , Runnable > > timers = ( Map < String , Pair < Duration , Runnable > > ) conf . get ( Config . TOPOLOGY_TIMER_EVENTS ) ; if ( timers . containsKey ( name ) ) { throw new IllegalArgumentException ( "Timer with name " + name + " already exists" ) ; } timers . put ( name , Pair . of ( interval , task ) ) ; }
Registers a timer event that executes periodically
20,525
public void run ( String name , Config config , Builder builder ) { BuilderImpl bldr = ( BuilderImpl ) builder ; TopologyBuilder topologyBuilder = bldr . build ( ) ; try { HeronSubmitter . submitTopology ( name , config . getHeronConfig ( ) , topologyBuilder . createTopology ( ) ) ; } catch ( AlreadyAliveException | InvalidTopologyException e ) { e . printStackTrace ( ) ; } }
Runs the computation
20,526
private LogRecordWithInputStream nextLogRecord ( ) throws IOException { try { return nextLogRecord ( reader ) ; } catch ( EndOfStreamException e ) { eos = true ; LOG . info ( ( ) -> "end of stream is reached" ) ; return null ; } }
Get input stream representing next entry in the ledger .
20,527
public TopologyAPI . Topology trimTopology ( TopologyAPI . Topology topology ) { TopologyAPI . Topology . Builder builder = TopologyAPI . Topology . newBuilder ( ) . mergeFrom ( topology ) ; for ( TopologyAPI . Spout . Builder spout : builder . getSpoutsBuilderList ( ) ) { spout . getCompBuilder ( ) . clearSerializedObject ( ) ; } for ( TopologyAPI . Bolt . Builder bolt : builder . getBoltsBuilderList ( ) ) { bolt . getCompBuilder ( ) . clearSerializedObject ( ) ; } return builder . build ( ) ; }
Trim the topology definition for storing into state manager . This is because the user generated spouts and bolts might be huge .
20,528
public void call ( ) throws LauncherException , PackingException , SubmitDryRunResponse { SchedulerStateManagerAdaptor statemgr = Runtime . schedulerStateManagerAdaptor ( runtime ) ; TopologyAPI . Topology topology = Runtime . topology ( runtime ) ; String topologyName = Context . topologyName ( config ) ; PackingPlan packedPlan = LauncherUtils . getInstance ( ) . createPackingPlan ( config , runtime ) ; if ( Context . dryRun ( config ) ) { throw new SubmitDryRunResponse ( topology , config , packedPlan ) ; } launcher . initialize ( config , runtime ) ; Boolean result = statemgr . setTopology ( trimTopology ( topology ) , topologyName ) ; if ( result == null || ! result ) { throw new LauncherException ( String . format ( "Failed to set topology definition for topology '%s'" , topologyName ) ) ; } result = statemgr . setPackingPlan ( createPackingPlan ( packedPlan ) , topologyName ) ; if ( result == null || ! result ) { statemgr . deleteTopology ( topologyName ) ; throw new LauncherException ( String . format ( "Failed to set packing plan for topology '%s'" , topologyName ) ) ; } ExecutionEnvironment . ExecutionState executionState = createExecutionState ( ) ; result = statemgr . setExecutionState ( executionState , topologyName ) ; if ( result == null || ! result ) { statemgr . deletePackingPlan ( topologyName ) ; statemgr . deleteTopology ( topologyName ) ; throw new LauncherException ( String . format ( "Failed to set execution state for topology '%s'" , topologyName ) ) ; } if ( ! launcher . launch ( packedPlan ) ) { statemgr . deleteExecutionState ( topologyName ) ; statemgr . deletePackingPlan ( topologyName ) ; statemgr . deleteTopology ( topologyName ) ; throw new LauncherException ( String . format ( "Failed to launch topology '%s'" , topologyName ) ) ; } }
Call launcher to launch topology
20,529
protected Process startExecutorProcess ( int container , Set < PackingPlan . InstancePlan > instances ) { return ShellUtils . runASyncProcess ( getExecutorCommand ( container , instances ) , new File ( LocalContext . workingDirectory ( config ) ) , Integer . toString ( container ) ) ; }
Start executor process via running an async shell process
20,530
protected void startExecutor ( final int container , Set < PackingPlan . InstancePlan > instances ) { LOG . info ( "Starting a new executor for container: " + container ) ; final Process containerExecutor = startExecutorProcess ( container , instances ) ; processToContainer . put ( containerExecutor , container ) ; LOG . info ( "Started the executor for container: " + container ) ; startExecutorMonitor ( container , containerExecutor , instances ) ; }
Start the executor for the given container
20,531
protected void startExecutorMonitor ( final int container , final Process containerExecutor , Set < PackingPlan . InstancePlan > instances ) { Runnable r = new Runnable ( ) { public void run ( ) { try { LOG . info ( "Waiting for container " + container + " to finish." ) ; containerExecutor . waitFor ( ) ; LOG . log ( Level . INFO , "Container {0} is completed. Exit status: {1}" , new Object [ ] { container , containerExecutor . exitValue ( ) } ) ; if ( isTopologyKilled ) { LOG . info ( "Topology is killed. Not to start new executors." ) ; return ; } else if ( ! processToContainer . containsKey ( containerExecutor ) ) { LOG . log ( Level . INFO , "Container {0} is killed. No need to relaunch." , container ) ; return ; } LOG . log ( Level . INFO , "Trying to restart container {0}" , container ) ; startExecutor ( processToContainer . remove ( containerExecutor ) , instances ) ; } catch ( InterruptedException e ) { if ( ! isTopologyKilled ) { LOG . log ( Level . SEVERE , "Process is interrupted: " , e ) ; } } } } ; monitorService . submit ( r ) ; }
Start the monitor of a given executor
20,532
public boolean onSchedule ( PackingPlan packing ) { LOG . info ( "Starting to deploy topology: " + LocalContext . topologyName ( config ) ) ; synchronized ( processToContainer ) { LOG . info ( "Starting executor for TMaster" ) ; startExecutor ( 0 , null ) ; for ( PackingPlan . ContainerPlan container : packing . getContainers ( ) ) { startExecutor ( container . getId ( ) , container . getInstances ( ) ) ; } } LOG . info ( "Executor for each container have been started." ) ; return true ; }
Schedule the provided packed plan
20,533
public boolean onKill ( Scheduler . KillTopologyRequest request ) { String topologyName = LocalContext . topologyName ( config ) ; LOG . info ( "Command to kill topology: " + topologyName ) ; isTopologyKilled = true ; synchronized ( processToContainer ) { for ( Process p : processToContainer . keySet ( ) ) { int index = processToContainer . get ( p ) ; LOG . info ( "Killing executor for container: " + index ) ; p . destroy ( ) ; LOG . info ( "Killed executor for container: " + index ) ; } processToContainer . clear ( ) ; } return true ; }
Handler to kill topology
20,534
public boolean onRestart ( Scheduler . RestartTopologyRequest request ) { int containerId = request . getContainerIndex ( ) ; List < Process > processesToRestart = new LinkedList < > ( ) ; if ( containerId == - 1 ) { LOG . info ( "Command to restart the entire topology: " + LocalContext . topologyName ( config ) ) ; processesToRestart . addAll ( processToContainer . keySet ( ) ) ; } else { LOG . info ( "Command to restart a container of topology: " + LocalContext . topologyName ( config ) ) ; LOG . info ( "Restart container requested: " + containerId ) ; for ( Process p : processToContainer . keySet ( ) ) { if ( containerId == processToContainer . get ( p ) ) { processesToRestart . add ( p ) ; } } } if ( processesToRestart . isEmpty ( ) ) { LOG . severe ( "Container not exist." ) ; return false ; } for ( Process process : processesToRestart ) { process . destroy ( ) ; } return true ; }
Handler to restart topology
20,535
public boolean restart ( Integer containerId ) { if ( containerId == null || containerId == 0 ) { return cliController . restart ( containerId ) ; } if ( stateMgrAdaptor == null ) { LOG . warning ( "SchedulerStateManagerAdaptor not initialized" ) ; return false ; } StMgr sm = searchContainer ( containerId ) ; if ( sm == null ) { LOG . warning ( "container not found in pplan " + containerId ) ; return false ; } String url = "http://" + sm . getHostName ( ) + ":" + sm . getShellPort ( ) + "/killexecutor" ; String payload = "secret=" + stateMgrAdaptor . getExecutionState ( topologyName ) . getTopologyId ( ) ; LOG . info ( "sending `kill container` to " + url + "; payload: " + payload ) ; HttpURLConnection con = NetworkUtils . getHttpConnection ( url ) ; try { if ( NetworkUtils . sendHttpPostRequest ( con , "X" , payload . getBytes ( ) ) ) { return NetworkUtils . checkHttpResponseCode ( con , 200 ) ; } else { LOG . info ( "heron-shell killexecutor failed; try aurora client .." ) ; return cliController . restart ( containerId ) ; } } finally { con . disconnect ( ) ; } }
Restart an aurora container
20,536
public boolean canSatisfy ( TaskResources needed ) { return this . ports >= needed . ports && ( this . cpu >= needed . cpu ) && ( this . mem >= needed . mem ) && ( this . disk >= needed . disk ) ; }
Whether this resource can satisfy the TaskResources needed from parameter
20,537
public static TaskResources apply ( Protos . Offer offer , String role ) { double cpu = 0 ; double mem = 0 ; double disk = 0 ; List < Range > portsResource = new ArrayList < > ( ) ; for ( Protos . Resource r : offer . getResourcesList ( ) ) { if ( ! r . hasRole ( ) || r . getRole ( ) . equals ( "*" ) || r . getRole ( ) . equals ( role ) ) { if ( r . getName ( ) . equals ( CPUS_RESOURCE_NAME ) ) { cpu = r . getScalar ( ) . getValue ( ) ; } if ( r . getName ( ) . equals ( MEM_RESOURCE_NAME ) ) { mem = r . getScalar ( ) . getValue ( ) ; } if ( r . getName ( ) . equals ( DISK_RESOURCE_NAME ) ) { disk = r . getScalar ( ) . getValue ( ) ; } if ( r . getName ( ) . equals ( PORT_RESOURCE_NAME ) ) { Protos . Value . Ranges ranges = r . getRanges ( ) ; for ( Protos . Value . Range range : ranges . getRangeList ( ) ) { portsResource . add ( new Range ( range . getBegin ( ) , range . getEnd ( ) ) ) ; } } } } return new TaskResources ( cpu , mem , disk , portsResource ) ; }
A static method to construct a TaskResources from mesos Protos . Offer
20,538
public void registerMetrics ( MetricsCollector metricsCollector ) { SystemConfig systemConfig = ( SystemConfig ) SingletonRegistry . INSTANCE . getSingleton ( SystemConfig . HERON_SYSTEM_CONFIG ) ; int interval = ( int ) systemConfig . getHeronMetricsExportInterval ( ) . getSeconds ( ) ; metricsCollector . registerMetric ( "__gateway-received-packets-size" , receivedPacketsSize , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-packets-size" , sentPacketsSize , interval ) ; metricsCollector . registerMetric ( "__gateway-received-packets-count" , receivedPacketsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-packets-count" , sentPacketsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-metrics-size" , sentMetricsSize , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-metrics-packets-count" , sentMetricsPacketsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-metrics-count" , sentMetricsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-sent-exceptions-count" , sentExceptionsCount , interval ) ; metricsCollector . registerMetric ( "__gateway-in-stream-queue-size" , inStreamQueueSize , interval ) ; metricsCollector . registerMetric ( "__gateway-out-stream-queue-size" , outStreamQueueSize , interval ) ; metricsCollector . registerMetric ( "__gateway-in-stream-queue-expected-capacity" , inStreamQueueExpectedCapacity , interval ) ; metricsCollector . registerMetric ( "__gateway-out-stream-queue-expected-capacity" , outStreamQueueExpectedCapacity , interval ) ; metricsCollector . registerMetric ( "__gateway-in-queue-full-count" , inQueueFullCount , interval ) ; }
Register default Gateway Metrics to given MetricsCollector
20,539
public void onInternalMessage ( Metrics . MetricPublisher request , Metrics . MetricPublisherPublishMessage message ) { handlePublisherPublishMessage ( request , message ) ; }
This method is thread - safe since we would push Messages into a Concurrent Queue .
20,540
private void startTMasterChecker ( ) { final int checkIntervalSec = TypeUtils . getInteger ( sinkConfig . get ( KEY_TMASTER_LOCATION_CHECK_INTERVAL_SEC ) ) ; Runnable runnable = new Runnable ( ) { public void run ( ) { TopologyMaster . TMasterLocation location = ( TopologyMaster . TMasterLocation ) SingletonRegistry . INSTANCE . getSingleton ( TMASTER_LOCATION_BEAN_NAME ) ; if ( location != null ) { if ( currentTMasterLocation == null || ! location . equals ( currentTMasterLocation ) ) { LOG . info ( "Update current TMasterLocation to: " + location ) ; currentTMasterLocation = location ; tMasterClientService . updateTMasterLocation ( currentTMasterLocation ) ; tMasterClientService . startNewMasterClient ( ) ; sinkContext . exportCountMetric ( TMASTER_LOCATION_UPDATE_COUNT , 1 ) ; } } tMasterLocationStarter . schedule ( this , checkIntervalSec , TimeUnit . SECONDS ) ; } } ; tMasterLocationStarter . schedule ( runnable , checkIntervalSec , TimeUnit . SECONDS ) ; LOG . info ( "TMasterChecker started with interval: " + checkIntervalSec ) ; }
If so restart the TMasterClientService with the new TMasterLocation
20,541
public static Pair < String , List < Process > > setupZkTunnel ( Config config , NetworkUtils . TunnelConfig tunnelConfig ) { String connectionString = Context . stateManagerConnectionString ( config ) . replaceAll ( "\\s+" , "" ) ; List < Pair < InetSocketAddress , Process > > ret = new ArrayList < > ( ) ; String [ ] endpoints = connectionString . split ( "," ) ; for ( String endpoint : endpoints ) { InetSocketAddress address = NetworkUtils . getInetSocketAddress ( endpoint ) ; Pair < InetSocketAddress , Process > pair = NetworkUtils . establishSSHTunnelIfNeeded ( address , tunnelConfig , NetworkUtils . TunnelType . PORT_FORWARD ) ; ret . add ( pair ) ; } StringBuilder connectionStringBuilder = new StringBuilder ( ) ; List < Process > tunnelProcesses = new ArrayList < > ( ) ; String delim = "" ; for ( Pair < InetSocketAddress , Process > pair : ret ) { if ( pair . first != null ) { connectionStringBuilder . append ( delim ) . append ( pair . first . getHostName ( ) ) . append ( ":" ) . append ( pair . first . getPort ( ) ) ; delim = "," ; if ( pair . second != null ) { tunnelProcesses . add ( pair . second ) ; } } } String newConnectionString = connectionStringBuilder . toString ( ) ; return new Pair < String , List < Process > > ( newConnectionString , tunnelProcesses ) ; }
Setup the tunnel if needed
20,542
public void autoTune ( Long progress ) { if ( lastAction == ACTION . NOOP ) { if ( prevProgress == - 1 ) { doAction ( ACTION . INCREASE , autoTuneFactor , progress ) ; } else if ( moreThanNum ( progress , prevProgress , progressBound ) ) { doAction ( ACTION . INCREASE , autoTuneFactor , progress ) ; } else if ( lessThanNum ( progress , prevProgress , progressBound ) ) { doAction ( ACTION . DECREASE , Math . max ( ( prevProgress - progress ) / ( float ) prevProgress , autoTuneFactor ) , progress ) ; } else { ++ callsInNoop ; if ( callsInNoop >= NOOP_THRESHOLD ) { doAction ( speculativeAction , autoTuneFactor , progress ) ; speculativeAction = speculativeAction == ACTION . INCREASE ? ACTION . DECREASE : ACTION . INCREASE ; } } } else if ( lastAction == ACTION . INCREASE ) { if ( moreThanNum ( progress , prevProgress , autoTuneFactor - progressBound ) ) { doAction ( ACTION . INCREASE , autoTuneFactor , progress ) ; } else if ( lessThanNum ( progress , prevProgress , progressBound ) ) { float drop = Math . max ( ( prevProgress - progress ) / ( float ) prevProgress , autoTuneFactor ) ; if ( drop > autoTuneFactor ) { doAction ( ACTION . DECREASE , drop , progress ) ; } else { doAction ( ACTION . RESTORE , autoTuneFactor , progress ) ; } } else { doAction ( ACTION . RESTORE , autoTuneFactor , progress ) ; } } else if ( lastAction == ACTION . DECREASE ) { if ( moreThanNum ( progress , prevProgress , progressBound ) ) { doAction ( ACTION . DECREASE , autoTuneFactor , progress ) ; } else { doAction ( ACTION . NOOP , autoTuneFactor , progress ) ; } } else if ( lastAction == ACTION . RESTORE ) { doAction ( ACTION . NOOP , autoTuneFactor , progress ) ; } }
Tune max default max spout pending based on progress
20,543
public boolean process ( Set < ? extends TypeElement > annotations , RoundEnvironment roundEnv ) { if ( ! roundEnv . processingOver ( ) ) { for ( TypeElement te : annotations ) { for ( Element elt : roundEnv . getElementsAnnotatedWith ( te ) ) { if ( ! elt . toString ( ) . startsWith ( "org.apache.heron" ) ) { env . getMessager ( ) . printMessage ( Kind . WARNING , String . format ( "%s extends from a class annotated with %s" , elt , te ) , elt ) ; } } } } return true ; }
If a non - heron class extends from a class annotated as Unstable Private or LimitedPrivate emit a warning .
20,544
@ SuppressWarnings ( "unchecked" ) public static void applyOverridesToStateManagerConfig ( Path overridesPath , Path stateManagerPath ) throws IOException { final Path tempStateManagerPath = Files . createTempFile ( "statemgr-" , CONFIG_SUFFIX ) ; Reader stateManagerReader = null ; try ( Reader overrideReader = Files . newBufferedReader ( overridesPath ) ; Writer writer = Files . newBufferedWriter ( tempStateManagerPath ) ; ) { stateManagerReader = Files . newBufferedReader ( stateManagerPath ) ; final Map < String , Object > overrides = ( Map < String , Object > ) new Yaml ( ) . load ( overrideReader ) ; final Map < String , Object > stateMangerConfig = ( Map < String , Object > ) new Yaml ( ) . load ( stateManagerReader ) ; for ( Map . Entry < String , Object > entry : overrides . entrySet ( ) ) { if ( stateMangerConfig . containsKey ( entry . getKey ( ) ) ) { stateMangerConfig . put ( entry . getKey ( ) , entry . getValue ( ) ) ; } } newYaml ( ) . dump ( stateMangerConfig , writer ) ; stateManagerReader . close ( ) ; FileHelper . copy ( tempStateManagerPath , stateManagerPath ) ; } finally { tempStateManagerPath . toFile ( ) . delete ( ) ; SysUtils . closeIgnoringExceptions ( stateManagerReader ) ; } }
this is needed because the heron executor ignores the override . yaml
20,545
public ByteAmount minus ( ResourceMeasure < Long > other ) { checkArgument ( Long . MIN_VALUE + other . value <= value , String . format ( "Subtracting %s from %s would overshoot Long.MIN_LONG" , other , this ) ) ; return ByteAmount . fromBytes ( value - other . value ) ; }
Subtracts other from this .
20,546
public ByteAmount plus ( ResourceMeasure < Long > other ) { checkArgument ( Long . MAX_VALUE - value >= other . value , String . format ( "Adding %s to %s would exceed Long.MAX_LONG" , other , this ) ) ; return ByteAmount . fromBytes ( value + other . value ) ; }
Adds other to this .
20,547
public ByteAmount multiply ( int factor ) { checkArgument ( value <= Long . MAX_VALUE / factor , String . format ( "Multiplying %s by %d would exceed Long.MAX_LONG" , this , factor ) ) ; return ByteAmount . fromBytes ( value * factor ) ; }
Multiplies by factor
20,548
public ByteAmount increaseBy ( int percentage ) { checkArgument ( percentage >= 0 , String . format ( "Increasing by negative percent (%d) not supported" , percentage ) ) ; double factor = 1.0 + ( ( double ) percentage / 100 ) ; long max = Math . round ( Long . MAX_VALUE / factor ) ; checkArgument ( value <= max , String . format ( "Increasing %s by %d percent would exceed Long.MAX_LONG" , this , percentage ) ) ; return ByteAmount . fromBytes ( Math . round ( value . doubleValue ( ) * factor ) ) ; }
Increases by a percentage rounding any remainder . Be aware that because of rounding increases will be approximate to the nearest byte .
20,549
public boolean createJob ( String slurmScript , String heronExec , String [ ] commandArgs , String topologyWorkingDirectory , long containers , String partition ) { List < String > slurmCmd = slurmCommand ( slurmScript , heronExec , containers , partition ) ; List < String > transformedArgs = new ArrayList < > ( ) ; for ( int i = 0 ; i < commandArgs . length ; i ++ ) { String arg = commandArgs [ i ] ; if ( arg == null || arg . trim ( ) . equals ( "" ) ) { transformedArgs . add ( "\"\"" ) ; } else { transformedArgs . add ( arg ) ; } } slurmCmd . addAll ( transformedArgs ) ; String [ ] slurmCmdArray = slurmCmd . toArray ( new String [ 0 ] ) ; LOG . log ( Level . INFO , "Executing job [" + topologyWorkingDirectory + "]:" , Arrays . toString ( slurmCmdArray ) ) ; StringBuilder stderr = new StringBuilder ( ) ; boolean ret = runProcess ( topologyWorkingDirectory , slurmCmdArray , stderr ) ; return ret ; }
Create a slurm job . Use the slurm scheduler s sbatch command to submit the job . sbatch allocates the nodes and runs the script specified by slurmScript . This script runs the heron executor on each of the nodes allocated .
20,550
private List < String > slurmCommand ( String slurmScript , String heronExec , long containers , String partition ) { String nTasks = String . format ( "--ntasks=%d" , containers ) ; List < String > slurmCmd ; if ( partition != null ) { slurmCmd = new ArrayList < > ( Arrays . asList ( "sbatch" , "-N" , Long . toString ( containers ) , nTasks , "-p" , partition , slurmScript , heronExec ) ) ; } else { slurmCmd = new ArrayList < > ( Arrays . asList ( "sbatch" , "-N" , Long . toString ( containers ) , nTasks , slurmScript , heronExec ) ) ; } return slurmCmd ; }
Construct the SLURM Command
20,551
public boolean createJob ( String slurmScript , String heronExec , String [ ] commandArgs , String topologyWorkingDirectory , long containers ) { return createJob ( slurmScript , heronExec , commandArgs , topologyWorkingDirectory , containers , null ) ; }
Create a slurm job . Use the slurm schedule r sbatch command to submit the job . sbatch allocates the nodes and runs the script specified by slurmScript . This script runs the heron executor on each of the nodes allocated .
20,552
protected boolean runProcess ( String topologyWorkingDirectory , String [ ] slurmCmd , StringBuilder stderr ) { File file = topologyWorkingDirectory == null ? null : new File ( topologyWorkingDirectory ) ; return 0 == ShellUtils . runSyncProcess ( true , false , slurmCmd , stderr , file ) ; }
This is for unit testing
20,553
public boolean killJob ( String jobIdFile ) { List < String > jobIdFileContent = readFromFile ( jobIdFile ) ; if ( jobIdFileContent . size ( ) > 0 ) { String [ ] slurmCmd = new String [ ] { "scancel" , jobIdFileContent . get ( 0 ) } ; return runProcess ( null , slurmCmd , new StringBuilder ( ) ) ; } else { LOG . log ( Level . SEVERE , "Failed to read the Slurm Job id from file: {0}" , jobIdFile ) ; return false ; } }
Cancel the Slurm job by reading the jobid from the jobIdFile . Uses scancel command to cancel the job . The file contains a single line with the job id . This file is written by the slurm job script after the job is allocated .
20,554
protected List < String > readFromFile ( String filename ) { Path path = new File ( filename ) . toPath ( ) ; List < String > result = new ArrayList < > ( ) ; try { List < String > tempResult = Files . readAllLines ( path ) ; if ( tempResult != null ) { result . addAll ( tempResult ) ; } } catch ( IOException e ) { LOG . log ( Level . SEVERE , "Failed to read from file. " , e ) ; } return result ; }
Read all the data from a text file line by line For now lets keep this util function here . We need to move it to a util location
20,555
public static void main ( String [ ] args ) throws Exception { CommandLineParser parser = new DefaultParser ( ) ; Options slaManagerCliOptions = constructCliOptions ( ) ; Options helpOptions = constructHelpOptions ( ) ; CommandLine cmd = parser . parse ( helpOptions , args , true ) ; if ( cmd . hasOption ( "h" ) ) { usage ( slaManagerCliOptions ) ; return ; } try { cmd = parser . parse ( slaManagerCliOptions , args ) ; } catch ( ParseException e ) { usage ( slaManagerCliOptions ) ; throw new RuntimeException ( "Error parsing command line options: " , e ) ; } DownloaderMode mode = DownloaderMode . cluster ; if ( cmd . hasOption ( CliArgs . MODE . text ) ) { mode = DownloaderMode . valueOf ( cmd . getOptionValue ( CliArgs . MODE . text , null ) ) ; } Config config ; switch ( mode ) { case cluster : config = Config . toClusterMode ( Config . newBuilder ( ) . putAll ( ConfigLoader . loadClusterConfig ( ) ) . build ( ) ) ; break ; case local : if ( ! cmd . hasOption ( CliArgs . HERON_HOME . text ) || ! cmd . hasOption ( CliArgs . CONFIG_PATH . text ) ) { throw new IllegalArgumentException ( "Missing heron_home or config_path argument" ) ; } String heronHome = cmd . getOptionValue ( CliArgs . HERON_HOME . text , null ) ; String configPath = cmd . getOptionValue ( CliArgs . CONFIG_PATH . text , null ) ; config = Config . toLocalMode ( Config . newBuilder ( ) . putAll ( ConfigLoader . loadConfig ( heronHome , configPath , null , null ) ) . build ( ) ) ; break ; default : throw new IllegalArgumentException ( "Invalid mode: " + cmd . getOptionValue ( CliArgs . MODE . text ) ) ; } String uri = cmd . getOptionValue ( CliArgs . TOPOLOGY_PACKAGE_URI . text , null ) ; String destination = cmd . getOptionValue ( CliArgs . EXTRACT_DESTINATION . text , null ) ; if ( uri == null && destination == null ) { String [ ] leftOverArgs = cmd . getArgs ( ) ; if ( leftOverArgs . length != 2 ) { System . err . println ( "Usage: downloader <topology-package-uri> <extract-destination>" ) ; return ; } uri = leftOverArgs [ 0 ] ; destination = leftOverArgs [ 1 ] ; } final URI topologyLocation = new URI ( uri ) ; final Path topologyDestination = Paths . get ( destination ) ; final File file = topologyDestination . toFile ( ) ; if ( ! file . exists ( ) ) { file . mkdirs ( ) ; } Class clazz = Registry . UriToClass ( config , topologyLocation ) ; final Downloader downloader = Registry . getDownloader ( clazz , topologyLocation ) ; downloader . download ( topologyLocation , topologyDestination ) ; }
takes topology package URI and extracts it to a directory
20,556
public void submit ( FileInputStream fileInputStream , FileInputStream propertiesFile , boolean envFilter ) throws Exception { EcoTopologyDefinition topologyDefinition = ecoParser . parseFromInputStream ( fileInputStream , propertiesFile , envFilter ) ; String topologyName = topologyDefinition . getName ( ) ; String topologyType = topologyDefinition . getType ( ) ; if ( "storm" . equals ( topologyType ) ) { System . out . println ( "topology type is Storm" ) ; org . apache . heron . eco . builder . storm . EcoBuilder ecoBuilder = new org . apache . heron . eco . builder . storm . EcoBuilder ( new org . apache . heron . eco . builder . storm . SpoutBuilder ( ) , new BoltBuilder ( ) , new org . apache . heron . eco . builder . storm . StreamBuilder ( ) , new ComponentBuilder ( ) , new ConfigBuilder ( ) ) ; Config topologyConfig = ecoBuilder . buildConfig ( topologyDefinition ) ; EcoExecutionContext executionContext = new EcoExecutionContext ( topologyDefinition , topologyConfig ) ; printTopologyInfo ( executionContext ) ; ObjectBuilder objectBuilder = new ObjectBuilder ( ) ; objectBuilder . setBuilderUtility ( new BuilderUtility ( ) ) ; org . apache . storm . topology . TopologyBuilder builder = ecoBuilder . buildTopologyBuilder ( executionContext , objectBuilder ) ; ecoSubmitter . submitStormTopology ( topologyName , topologyConfig , builder . createTopology ( ) ) ; } else if ( "heron" . equals ( topologyType ) ) { System . out . println ( "topology type is Heron" ) ; org . apache . heron . eco . builder . heron . EcoBuilder ecoBuilder = new org . apache . heron . eco . builder . heron . EcoBuilder ( new org . apache . heron . eco . builder . heron . SpoutBuilder ( ) , new BoltBuilder ( ) , new org . apache . heron . eco . builder . heron . StreamBuilder ( ) , new ComponentBuilder ( ) , new ConfigBuilder ( ) ) ; Config topologyConfig = ecoBuilder . buildConfig ( topologyDefinition ) ; EcoExecutionContext executionContext = new EcoExecutionContext ( topologyDefinition , topologyConfig ) ; printTopologyInfo ( executionContext ) ; ObjectBuilder objectBuilder = new ObjectBuilder ( ) ; objectBuilder . setBuilderUtility ( new BuilderUtility ( ) ) ; org . apache . heron . api . topology . TopologyBuilder builder = ecoBuilder . buildTopologyBuilder ( executionContext , objectBuilder ) ; ecoSubmitter . submitHeronTopology ( topologyName , topologyConfig , builder . createTopology ( ) ) ; } else { LOG . log ( Level . SEVERE , String . format ( "Unknown topology type \'%s\' for topology %s, not submitted" , topologyType , topologyName ) ) ; } }
Submit an ECO topology
20,557
public static void closeIgnoringExceptions ( AutoCloseable closeable ) { if ( closeable != null ) { try { closeable . close ( ) ; } catch ( Exception e ) { LOG . log ( Level . WARNING , String . format ( "Failed to close %s" , closeable ) , e ) ; } } }
Close a closable ignoring any exceptions . This method is used during cleanup or in a finally block .
20,558
private static Config topologyConfigs ( String topologyBinaryFile , String topologyDefnFile , TopologyAPI . Topology topology ) { PackageType packageType = PackageType . getPackageType ( topologyBinaryFile ) ; return Config . newBuilder ( ) . put ( Key . TOPOLOGY_ID , topology . getId ( ) ) . put ( Key . TOPOLOGY_NAME , topology . getName ( ) ) . put ( Key . TOPOLOGY_DEFINITION_FILE , topologyDefnFile ) . put ( Key . TOPOLOGY_BINARY_FILE , topologyBinaryFile ) . put ( Key . TOPOLOGY_PACKAGE_TYPE , packageType ) . build ( ) ; }
Load the topology config
20,559
public static Config loadConfig ( String cluster , String role , String environ , String topologyBinaryFile , String topologyDefnFile , Boolean verbose , TopologyAPI . Topology topology ) { return Config . toClusterMode ( Config . newBuilder ( ) . putAll ( ConfigLoader . loadClusterConfig ( ) ) . putAll ( commandLineConfigs ( cluster , role , environ , verbose ) ) . putAll ( topologyConfigs ( topologyBinaryFile , topologyDefnFile , topology ) ) . build ( ) ) ; }
build the config by expanding all the variables
20,560
@ SuppressWarnings ( { "rawtypes" , "unchecked" } ) public static Kryo getKryo ( Map conf ) { IKryoFactory kryoFactory = ( IKryoFactory ) Utils . newInstance ( ( String ) conf . get ( Config . TOPOLOGY_KRYO_FACTORY ) ) ; Kryo k = kryoFactory . getKryo ( conf ) ; k . register ( byte [ ] . class ) ; k . register ( ListDelegate . class ) ; k . register ( ArrayList . class , new ArrayListSerializer ( ) ) ; k . register ( HashMap . class , new HashMapSerializer ( ) ) ; k . register ( HashSet . class , new HashSetSerializer ( ) ) ; k . register ( BigInteger . class , new BigIntegerSerializer ( ) ) ; k . register ( Values . class ) ; Map < String , String > registrations = normalizeKryoRegister ( conf ) ; kryoFactory . preRegister ( k , conf ) ; boolean skipMissing = ( Boolean ) conf . get ( Config . TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS ) ; for ( String klassName : registrations . keySet ( ) ) { String serializerClassName = registrations . get ( klassName ) ; try { Class klass = Class . forName ( klassName ) ; Class serializerClass = null ; if ( serializerClassName != null ) { serializerClass = Class . forName ( serializerClassName ) ; } LOG . info ( "Doing kryo.register for class " + klass ) ; if ( serializerClass == null ) { k . register ( klass ) ; } else { k . register ( klass , resolveSerializerInstance ( k , klass , serializerClass ) ) ; } } catch ( ClassNotFoundException e ) { if ( skipMissing ) { LOG . info ( "Could not find serialization or class for " + serializerClassName + ". Skipping registration..." ) ; } else { throw new RuntimeException ( e ) ; } } } kryoFactory . postRegister ( k , conf ) ; if ( conf . get ( Config . TOPOLOGY_KRYO_DECORATORS ) != null ) { for ( String klassName : ( List < String > ) conf . get ( Config . TOPOLOGY_KRYO_DECORATORS ) ) { try { Class klass = Class . forName ( klassName ) ; IKryoDecorator decorator = ( IKryoDecorator ) klass . newInstance ( ) ; decorator . decorate ( k ) ; } catch ( ClassNotFoundException e ) { if ( skipMissing ) { LOG . info ( "Could not find kryo decorator named " + klassName + ". Skipping registration..." ) ; } else { throw new RuntimeException ( e ) ; } } catch ( InstantiationException e ) { throw new RuntimeException ( e ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( e ) ; } } } kryoFactory . postDecorate ( k , conf ) ; return k ; }
Get kryo based on conf
20,561
public Collection < Symptom > detect ( Collection < Measurement > measurements ) { Collection < Symptom > result = new ArrayList < > ( ) ; MeasurementsTable waitQueueMetrics = MeasurementsTable . of ( measurements ) . type ( METRIC_WAIT_Q_SIZE . text ( ) ) ; for ( String component : waitQueueMetrics . uniqueComponents ( ) ) { Set < String > addresses = new HashSet < > ( ) ; MeasurementsTable instanceMetrics = waitQueueMetrics . component ( component ) ; for ( String instance : instanceMetrics . uniqueInstances ( ) ) { double avgWaitQSize = instanceMetrics . instance ( instance ) . mean ( ) ; if ( avgWaitQSize > sizeLimit ) { LOG . info ( String . format ( "Detected large wait queues for instance" + "%s, smallest queue is + %f" , instance , avgWaitQSize ) ) ; addresses . add ( instance ) ; } } if ( addresses . size ( ) > 0 ) { result . add ( new Symptom ( SYMPTOM_LARGE_WAIT_Q . text ( ) , context . checkpoint ( ) , addresses ) ) ; } } return result ; }
Detects all components having a large pending buffer or wait queue
20,562
private static void submitTopologyToFile ( TopologyAPI . Topology fTopology , Map < String , String > heronCmdOptions ) { String dirName = heronCmdOptions . get ( CMD_TOPOLOGY_DEFN_TEMPDIR ) ; if ( dirName == null || dirName . isEmpty ( ) ) { throw new TopologySubmissionException ( "Topology definition temp directory not specified. " + "Please set cmdline option: " + CMD_TOPOLOGY_DEFN_TEMPDIR ) ; } String fileName = Paths . get ( dirName , fTopology . getName ( ) + TOPOLOGY_DEFINITION_SUFFIX ) . toString ( ) ; try ( FileOutputStream fos = new FileOutputStream ( new File ( fileName ) ) ; BufferedOutputStream bos = new BufferedOutputStream ( fos ) ) { byte [ ] topEncoding = fTopology . toByteArray ( ) ; bos . write ( topEncoding ) ; } catch ( IOException e ) { throw new TopologySubmissionException ( "Error writing topology definition to temp directory: " + dirName , e ) ; } }
Submits a topology to definition file
20,563
public ISchedulerClient getSchedulerClient ( ) throws SchedulerException { LOG . fine ( "Creating scheduler client" ) ; ISchedulerClient schedulerClient ; if ( Context . schedulerService ( config ) ) { SchedulerStateManagerAdaptor statemgr = Runtime . schedulerStateManagerAdaptor ( runtime ) ; Scheduler . SchedulerLocation schedulerLocation = statemgr . getSchedulerLocation ( Runtime . topologyName ( runtime ) ) ; if ( schedulerLocation == null ) { throw new SchedulerException ( "Failed to get scheduler location from state manager" ) ; } LOG . log ( Level . FINE , "Scheduler is listening on location: {0} " , schedulerLocation . toString ( ) ) ; schedulerClient = new HttpServiceSchedulerClient ( config , runtime , schedulerLocation . getHttpEndpoint ( ) ) ; } else { final IScheduler scheduler = LauncherUtils . getInstance ( ) . getSchedulerInstance ( config , runtime ) ; LOG . fine ( "Invoke scheduler as a library" ) ; schedulerClient = new LibrarySchedulerClient ( config , runtime , scheduler ) ; } return schedulerClient ; }
Implementation of getSchedulerClient - Used to create objects Currently it creates either HttpServiceSchedulerClient or LibrarySchedulerClient
20,564
public Streamlet < R > setName ( String sName ) { checkNotBlank ( sName , "Streamlet name cannot be null/blank" ) ; this . name = sName ; return this ; }
Sets the name of the Streamlet .
20,565
protected void setDefaultNameIfNone ( StreamletNamePrefix prefix , Set < String > stageNames ) { if ( getName ( ) == null ) { setName ( defaultNameCalculator ( prefix , stageNames ) ) ; } if ( stageNames . contains ( getName ( ) ) ) { throw new RuntimeException ( String . format ( "The stage name %s is used multiple times in the same topology" , getName ( ) ) ) ; } stageNames . add ( getName ( ) ) ; }
Sets a default unique name to the Streamlet by type if it is not set . Otherwise just checks its uniqueness .
20,566
protected Set < String > getAvailableStreamIds ( ) { HashSet < String > ids = new HashSet < String > ( ) ; ids . add ( getStreamId ( ) ) ; return ids ; }
Get the available stream ids in the Streamlet . For most Streamlets there is only one internal stream id therefore the function returns a set of one single stream id .
20,567
public < T > Streamlet < T > map ( SerializableFunction < R , ? extends T > mapFn ) { checkNotNull ( mapFn , "mapFn cannot be null" ) ; MapStreamlet < R , T > retval = new MapStreamlet < > ( this , mapFn ) ; addChild ( retval ) ; return retval ; }
Return a new Streamlet by applying mapFn to each element of this Streamlet
20,568
public < T > Streamlet < T > flatMap ( SerializableFunction < R , ? extends Iterable < ? extends T > > flatMapFn ) { checkNotNull ( flatMapFn , "flatMapFn cannot be null" ) ; FlatMapStreamlet < R , T > retval = new FlatMapStreamlet < > ( this , flatMapFn ) ; addChild ( retval ) ; return retval ; }
Return a new Streamlet by applying flatMapFn to each element of this Streamlet and flattening the result
20,569
public Streamlet < R > filter ( SerializablePredicate < R > filterFn ) { checkNotNull ( filterFn , "filterFn cannot be null" ) ; FilterStreamlet < R > retval = new FilterStreamlet < > ( this , filterFn ) ; addChild ( retval ) ; return retval ; }
Return a new Streamlet by applying the filterFn on each element of this streamlet and including only those elements that satisfy the filterFn
20,570
public Streamlet < R > repartition ( int numPartitions , SerializableBiFunction < R , Integer , List < Integer > > partitionFn ) { checkNotNull ( partitionFn , "partitionFn cannot be null" ) ; RemapStreamlet < R > retval = new RemapStreamlet < > ( this , partitionFn ) ; retval . setNumPartitions ( numPartitions ) ; addChild ( retval ) ; return retval ; }
A more generalized version of repartition where a user can determine which partitions any particular tuple should go to
20,571
public < K , T > KVStreamlet < KeyedWindow < K > , T > reduceByKeyAndWindow ( SerializableFunction < R , K > keyExtractor , SerializableFunction < R , T > valueExtractor , WindowConfig windowCfg , SerializableBinaryOperator < T > reduceFn ) { checkNotNull ( keyExtractor , "keyExtractor cannot be null" ) ; checkNotNull ( valueExtractor , "valueExtractor cannot be null" ) ; checkNotNull ( windowCfg , "windowCfg cannot be null" ) ; checkNotNull ( reduceFn , "reduceFn cannot be null" ) ; ReduceByKeyAndWindowStreamlet < R , K , T > retval = new ReduceByKeyAndWindowStreamlet < > ( this , keyExtractor , valueExtractor , windowCfg , reduceFn ) ; addChild ( retval ) ; return new KVStreamletShadow < KeyedWindow < K > , T > ( retval ) ; }
Return a new Streamlet accumulating tuples of this streamlet over a Window defined by windowCfg and applying reduceFn on those tuples .
20,572
public < K , T > KVStreamlet < KeyedWindow < K > , T > reduceByKeyAndWindow ( SerializableFunction < R , K > keyExtractor , WindowConfig windowCfg , T identity , SerializableBiFunction < T , R , ? extends T > reduceFn ) { checkNotNull ( keyExtractor , "keyExtractor cannot be null" ) ; checkNotNull ( windowCfg , "windowCfg cannot be null" ) ; checkNotNull ( identity , "identity cannot be null" ) ; checkNotNull ( reduceFn , "reduceFn cannot be null" ) ; GeneralReduceByKeyAndWindowStreamlet < R , K , T > retval = new GeneralReduceByKeyAndWindowStreamlet < > ( this , keyExtractor , windowCfg , identity , reduceFn ) ; addChild ( retval ) ; return new KVStreamletShadow < KeyedWindow < K > , T > ( retval ) ; }
Return a new Streamlet accumulating tuples of this streamlet over a Window defined by windowCfg and applying reduceFn on those tuples . For each window the value identity is used as a initial value . All the matching tuples are reduced using reduceFn starting from this initial value .
20,573
public void consume ( SerializableConsumer < R > consumer ) { checkNotNull ( consumer , "consumer cannot be null" ) ; ConsumerStreamlet < R > consumerStreamlet = new ConsumerStreamlet < > ( this , consumer ) ; addChild ( consumerStreamlet ) ; }
Applies the consumer function for every element of this streamlet
20,574
public void toSink ( Sink < R > sink ) { checkNotNull ( sink , "sink cannot be null" ) ; SinkStreamlet < R > sinkStreamlet = new SinkStreamlet < > ( this , sink ) ; addChild ( sinkStreamlet ) ; }
Uses the sink to consume every element of this streamlet
20,575
public Streamlet < R > split ( Map < String , SerializablePredicate < R > > splitFns ) { require ( splitFns . size ( ) > 0 , "At least one entry is required" ) ; require ( splitFns . keySet ( ) . stream ( ) . allMatch ( stream -> StringUtils . isNotBlank ( stream ) ) , "Stream Id can not be blank" ) ; SplitStreamlet < R > splitStreamlet = new SplitStreamlet < R > ( this , splitFns ) ; addChild ( splitStreamlet ) ; return splitStreamlet ; }
Returns multiple streams by splitting incoming stream .
20,576
public void rotate ( ) { Map < Long , Long > m = buckets . removeLast ( ) ; buckets . addFirst ( new HashMap < Long , Long > ( ) ) ; }
instantiates a new map at the front of the list
20,577
public boolean anchor ( long key , long value ) { for ( Map < Long , Long > m : buckets ) { if ( m . containsKey ( key ) ) { long currentValue = m . get ( key ) ; long newValue = currentValue ^ value ; m . put ( key , newValue ) ; return newValue == 0 ; } } return false ; }
xor turns zero . False otherwise
20,578
public boolean remove ( long key ) { for ( Map < Long , Long > m : buckets ) { if ( m . remove ( key ) != null ) { return true ; } } return false ; }
from some map . False otherwise .
20,579
protected void startHttpServer ( String path , int port ) { try { httpServer = HttpServer . create ( new InetSocketAddress ( port ) , 0 ) ; httpServer . createContext ( path , httpExchange -> { byte [ ] response = generateResponse ( ) ; httpExchange . sendResponseHeaders ( HTTP_STATUS_OK , response . length ) ; OutputStream os = httpExchange . getResponseBody ( ) ; os . write ( response ) ; os . close ( ) ; LOG . log ( Level . INFO , "Received metrics request." ) ; } ) ; LOG . info ( "Starting web sink server on port: " + port ) ; httpServer . start ( ) ; } catch ( IOException e ) { throw new RuntimeException ( "Failed to create Http server on port " + port , e ) ; } }
Start a http server on supplied port that will serve the metrics as json on the specified path .
20,580
< K , V > Cache < K , V > createCache ( ) { return CacheBuilder . newBuilder ( ) . maximumSize ( cacheMaxSize ) . expireAfterWrite ( cacheTtlSeconds , TimeUnit . SECONDS ) . ticker ( cacheTicker ) . build ( ) ; }
a convenience method for creating a metrics cache
20,581
public Collection < Symptom > detect ( Collection < Measurement > measurements ) { Collection < Symptom > result = new ArrayList < > ( ) ; MeasurementsTable waitQueueMetrics = MeasurementsTable . of ( measurements ) . type ( METRIC_WAIT_Q_SIZE . text ( ) ) ; for ( String component : waitQueueMetrics . uniqueComponents ( ) ) { double maxSlope = computeWaitQueueSizeTrend ( waitQueueMetrics . component ( component ) ) ; if ( maxSlope > rateLimit ) { LOG . info ( String . format ( "Detected growing wait queues for %s, max rate %f" , component , maxSlope ) ) ; Collection < String > addresses = Collections . singletonList ( component ) ; result . add ( new Symptom ( SYMPTOM_GROWING_WAIT_Q . text ( ) , context . checkpoint ( ) , addresses ) ) ; } } return result ; }
Detects all components unable to keep up with input load hence having a growing pending buffer or wait queue
20,582
static Map < String , Double > processMetrics ( String prefix , Iterable < MetricsInfo > metrics ) { Map < String , Double > map = new HashMap < > ( ) ; for ( MetricsInfo r : metrics ) { try { map . put ( prefix + r . getName ( ) , Double . valueOf ( r . getValue ( ) ) ) ; } catch ( NumberFormatException ne ) { LOG . log ( Level . SEVERE , "Could not parse metric, Name: " + r . getName ( ) + " Value: " + r . getValue ( ) , ne ) ; } } return map ; }
Helper to prefix metric names convert metric value to double and return as map
20,583
private static boolean fraudDetect ( WireRequest request ) { String logMessage ; boolean fraudulent = FRAUDULENT_CUSTOMERS . contains ( request . getCustomerId ( ) ) ; if ( fraudulent ) { logMessage = String . format ( "Rejected fraudulent customer %s" , request . getCustomerId ( ) ) ; LOG . warning ( logMessage ) ; } else { logMessage = String . format ( "Accepted request for $%d from customer %s" , request . getAmount ( ) , request . getCustomerId ( ) ) ; LOG . info ( logMessage ) ; } return ! fraudulent ; }
Each request is checked to make sure that requests from untrustworthy customers are rejected .
20,584
void add ( PackingPlan . InstancePlan instancePlan ) { if ( this . instances . contains ( instancePlan ) ) { throw new PackingException ( String . format ( "Instance %s already exists in container %s" , instancePlan , toString ( ) ) ) ; } this . instances . add ( instancePlan ) ; }
Update the resources currently used by the container when a new instance with specific resource requirements has been assigned to the container .
20,585
Optional < PackingPlan . InstancePlan > removeAnyInstanceOfComponent ( String component ) { Optional < PackingPlan . InstancePlan > instancePlan = getAnyInstanceOfComponent ( component ) ; if ( instancePlan . isPresent ( ) ) { PackingPlan . InstancePlan plan = instancePlan . get ( ) ; this . instances . remove ( plan ) ; return instancePlan ; } return Optional . absent ( ) ; }
Remove an instance of a particular component from a container and update its corresponding resources .
20,586
private Optional < PackingPlan . InstancePlan > getAnyInstanceOfComponent ( String componentName ) { for ( PackingPlan . InstancePlan instancePlan : this . instances ) { if ( instancePlan . getComponentName ( ) . equals ( componentName ) ) { return Optional . of ( instancePlan ) ; } } return Optional . absent ( ) ; }
Find whether any instance of a particular component is assigned to the container
20,587
Optional < PackingPlan . InstancePlan > getInstance ( String componentName , int componentIndex ) { for ( PackingPlan . InstancePlan instancePlan : this . instances ) { if ( instancePlan . getComponentName ( ) . equals ( componentName ) && instancePlan . getComponentIndex ( ) == componentIndex ) { return Optional . of ( instancePlan ) ; } } return Optional . absent ( ) ; }
Return the instance of componentName with a matching componentIndex if it exists
20,588
Optional < PackingPlan . InstancePlan > getInstance ( int taskId ) { for ( PackingPlan . InstancePlan instancePlan : this . instances ) { if ( instancePlan . getTaskId ( ) == taskId ) { return Optional . of ( instancePlan ) ; } } return Optional . absent ( ) ; }
Return the instance of with a given taskId if it exists
20,589
public Resource getTotalUsedResources ( ) { return getInstances ( ) . stream ( ) . map ( PackingPlan . InstancePlan :: getResource ) . reduce ( Resource . EMPTY_RESOURCE , Resource :: plus ) . plus ( getPadding ( ) ) ; }
Computes the used resources of the container by taking into account the resources allocated for each instance .
20,590
public Collection < Measurement > fetch ( ) { Collection < Measurement > result = new ArrayList < > ( ) ; Instant now = context . checkpoint ( ) ; List < String > boltComponents = physicalPlanProvider . getBoltNames ( ) ; Duration duration = getDuration ( ) ; for ( String component : boltComponents ) { String [ ] boltInstanceNames = packingPlanProvider . getBoltInstanceNames ( component ) ; for ( String instance : boltInstanceNames ) { String metric = getMetricName ( ) + instance + MetricName . METRIC_WAIT_Q_SIZE_SUFFIX ; Collection < Measurement > stmgrResult = metricsProvider . getMeasurements ( now , duration , metric , COMPONENT_STMGR ) ; if ( stmgrResult . isEmpty ( ) ) { continue ; } MeasurementsTable table = MeasurementsTable . of ( stmgrResult ) . component ( COMPONENT_STMGR ) ; if ( table . size ( ) == 0 ) { continue ; } double totalSize = table . type ( metric ) . sum ( ) ; Measurement measurement = new Measurement ( component , instance , getMetricName ( ) , now , totalSize ) ; result . add ( measurement ) ; } } return result ; }
The buffer size as provided by tracker
20,591
public static Config loadConfig ( String heronHome , String configPath , String releaseFile , String overrideConfigFile ) { Config defaultConfig = loadDefaults ( heronHome , configPath ) ; Config localConfig = Config . toLocalMode ( defaultConfig ) ; Config . Builder cb = Config . newBuilder ( ) . putAll ( defaultConfig ) . putAll ( loadConfig ( Context . clusterFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . clientFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . healthmgrFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . packingFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . schedulerFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . stateManagerFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . uploaderFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . downloaderFile ( localConfig ) ) ) . putAll ( loadConfig ( Context . statefulConfigFile ( localConfig ) ) ) . putAll ( loadConfig ( releaseFile ) ) . putAll ( loadConfig ( overrideConfigFile ) ) ; return cb . build ( ) ; }
Loads raw configurations from files under the heronHome and configPath . The returned config must be converted to either local or cluster mode to trigger pattern substitution of wildcards tokens .
20,592
public static Config loadClusterConfig ( ) { Config defaultConfig = loadDefaults ( Key . HERON_CLUSTER_HOME . getDefaultString ( ) , Key . HERON_CLUSTER_CONF . getDefaultString ( ) ) ; Config clusterConfig = Config . toClusterMode ( defaultConfig ) ; Config . Builder cb = Config . newBuilder ( ) . putAll ( defaultConfig ) . putAll ( loadConfig ( Context . packingFile ( clusterConfig ) ) ) . putAll ( loadConfig ( Context . healthmgrFile ( clusterConfig ) ) ) . putAll ( loadConfig ( Context . schedulerFile ( clusterConfig ) ) ) . putAll ( loadConfig ( Context . stateManagerFile ( clusterConfig ) ) ) . putAll ( loadConfig ( Context . uploaderFile ( clusterConfig ) ) ) . putAll ( loadConfig ( Context . downloaderFile ( clusterConfig ) ) ) . putAll ( loadConfig ( Context . statefulConfigFile ( clusterConfig ) ) ) ; cb . putAll ( loadConfig ( Context . overrideFile ( clusterConfig ) ) ) ; return cb . build ( ) ; }
Loads raw configurations using the default configured heronHome and configPath on the cluster . The returned config must be converted to either local or cluster mode to trigger pattern substitution of wildcards tokens .
20,593
public void initState ( State state ) { this . state = state ; super . initState ( this . state ) ; if ( ! this . state . containsKey ( USER_STATE ) ) { this . state . put ( USER_STATE , new HashMapState < K , V > ( ) ) ; } this . statefulWindowedBolt . initState ( ( State < K , V > ) this . state . get ( USER_STATE ) ) ; }
initalize state that is partitioned by window internal and user defined
20,594
public void publish ( LogRecord record ) { Throwable throwable = record . getThrown ( ) ; if ( throwable != null ) { synchronized ( ExceptionRepositoryAsMetrics . INSTANCE ) { if ( ExceptionRepositoryAsMetrics . INSTANCE . getExceptionsCount ( ) >= exceptionsLimit ) { droppedExceptionsCount . incr ( ) ; return ; } StringWriter sink = new StringWriter ( ) ; throwable . printStackTrace ( new PrintWriter ( sink , true ) ) ; String trace = sink . toString ( ) ; Metrics . ExceptionData . Builder exceptionDataBuilder = ExceptionRepositoryAsMetrics . INSTANCE . getExceptionInfo ( trace ) ; exceptionDataBuilder . setCount ( exceptionDataBuilder . getCount ( ) + 1 ) ; exceptionDataBuilder . setLasttime ( new Date ( ) . toString ( ) ) ; exceptionDataBuilder . setStacktrace ( trace ) ; if ( record . getMessage ( ) == null ) { exceptionDataBuilder . setLogging ( "" ) ; } else { exceptionDataBuilder . setLogging ( record . getMessage ( ) ) ; } } } }
will flush the exception to metrics manager during getValueAndReset call .
20,595
public Boolean setExecutionState ( ExecutionEnvironment . ExecutionState executionState , String topologyName ) { return awaitResult ( delegate . setExecutionState ( executionState , topologyName ) ) ; }
Set the execution state for the given topology
20,596
public Boolean setTopology ( TopologyAPI . Topology topology , String topologyName ) { return awaitResult ( delegate . setTopology ( topology , topologyName ) ) ; }
Set the topology definition for the given topology
20,597
public Boolean updateTopology ( TopologyAPI . Topology topology , String topologyName ) { if ( getTopology ( topologyName ) != null ) { deleteTopology ( topologyName ) ; } return setTopology ( topology , topologyName ) ; }
Update the topology definition for the given topology . If the topology doesn t exist create it . If it does update it .
20,598
public Boolean setSchedulerLocation ( Scheduler . SchedulerLocation location , String topologyName ) { return awaitResult ( delegate . setSchedulerLocation ( location , topologyName ) ) ; }
Set the scheduler location for the given topology
20,599
public Boolean setPackingPlan ( PackingPlans . PackingPlan packingPlan , String topologyName ) { return awaitResult ( delegate . setPackingPlan ( packingPlan , topologyName ) ) ; }
Set the packing plan for the given topology