_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q253200 | get_version_number | validation | def get_version_number(zipped_pex=False):
"""Print version from release.yaml
:param zipped_pex: True if the PEX file is built with flag `zip_safe=False'.
"""
if zipped_pex:
release_file = get_zipped_heron_release_file()
else:
release_file = get_heron_release_file()
with open(release_file) as releas... | python | {
"resource": ""
} |
q253201 | ExceptionSummaryHandler.getComponentExceptionSummary | validation | def getComponentExceptionSummary(self, tmaster, component_name, instances=[], callback=None):
"""
Get the summary of exceptions for component_name and list of instances.
Empty instance list will fetch all exceptions.
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
... | python | {
"resource": ""
} |
q253202 | Topology.register_watch | validation | def register_watch(self, callback):
"""
Returns the UUID with which the watch is
registered. This UUID can be used to unregister
the watch.
Returns None if watch could not be registered.
The argument 'callback' must be a function that takes
exactly one argument, the topology on which
th... | python | {
"resource": ""
} |
q253203 | Topology.unregister_watch | validation | def unregister_watch(self, uid):
"""
Unregister the watch with the given UUID.
"""
# Do not raise an error if UUID is
# not present in the watches.
| python | {
"resource": ""
} |
q253204 | Topology.trigger_watches | validation | def trigger_watches(self):
"""
Call all the callbacks.
If any callback raises an Exception,
unregister the corresponding watch.
"""
to_remove = []
for uid, callback in self.watches.items():
try:
callback(self)
except Exception as e:
Log.error("Caught | python | {
"resource": ""
} |
q253205 | Topology.set_physical_plan | validation | def set_physical_plan(self, physical_plan):
""" set physical plan """
if not physical_plan:
self.physical_plan = None
self.id = None
else:
| python | {
"resource": ""
} |
q253206 | Topology.set_packing_plan | validation | def set_packing_plan(self, packing_plan):
""" set packing plan """
if not packing_plan:
self.packing_plan = None
self.id = None
else:
| python | {
"resource": ""
} |
q253207 | Topology.set_execution_state | validation | def set_execution_state(self, execution_state):
""" set exectuion state """
if not execution_state:
self.execution_state = None
self.cluster = None
self.environ = None
else:
self.execution_state = execution_state
| python | {
"resource": ""
} |
q253208 | Topology.num_instances | validation | def num_instances(self):
"""
Number of spouts + bolts
"""
num = 0
# Get all the components
components = self.spouts() + self.bolts()
# Get instances for each | python | {
"resource": ""
} |
q253209 | Topology.get_machines | validation | def get_machines(self):
"""
Get all the machines that this topology is running on.
These are the hosts of all the stmgrs.
"""
if self.physical_plan:
| python | {
"resource": ""
} |
q253210 | Topology.get_status | validation | def get_status(self):
"""
Get the current state of this topology.
The state values are from the topology.proto
RUNNING = 1, PAUSED = 2, KILLED = 3
if the state is None "Unknown" is returned.
| python | {
"resource": ""
} |
q253211 | convert_pb_kvs | validation | def convert_pb_kvs(kvs, include_non_primitives=True):
"""
converts pb kvs to dict
"""
config = {}
for kv in kvs:
if kv.value:
config[kv.key] = kv.value
elif kv.serialized_value:
# add serialized_value support for python values (fixme)
# is this a serialized java object
if topo... | python | {
"resource": ""
} |
q253212 | Tracker.synch_topologies | validation | def synch_topologies(self):
"""
Sync the topologies with the statemgrs.
"""
self.state_managers = statemanagerfactory.get_all_state_managers(self.config.statemgr_config)
try:
for state_manager in self.state_managers:
state_manager.start()
except Exception as ex:
Log.error("Fo... | python | {
"resource": ""
} |
q253213 | Tracker.getTopologyByClusterRoleEnvironAndName | validation | def getTopologyByClusterRoleEnvironAndName(self, cluster, role, environ, topologyName):
"""
Find and return the topology given its cluster, environ, topology name, and
an optional role.
Raises exception if topology is not found, or more than one are found.
"""
topologies = list(filter(lambda t: ... | python | {
"resource": ""
} |
q253214 | Tracker.getTopologiesForStateLocation | validation | def getTopologiesForStateLocation(self, name):
"""
Returns all the topologies for | python | {
"resource": ""
} |
q253215 | Tracker.addNewTopology | validation | def addNewTopology(self, state_manager, topologyName):
"""
Adds a topology in the local cache, and sets a watch
on any changes on the topology.
"""
topology = Topology(topologyName, state_manager.name)
Log.info("Adding new topology: %s, state_manager: %s",
topologyName, state_manage... | python | {
"resource": ""
} |
q253216 | Tracker.removeTopology | validation | def removeTopology(self, topology_name, state_manager_name):
"""
Removes the topology from the local cache.
"""
topologies = []
for top in self.topologies:
if (top.name == topology_name and
top.state_manager_name == state_manager_name):
# Remove topologyInfo
if (topol... | python | {
"resource": ""
} |
q253217 | Tracker.extract_execution_state | validation | def extract_execution_state(self, topology):
"""
Returns the repesentation of execution state that will
be returned from Tracker.
"""
execution_state = topology.execution_state
executionState = {
"cluster": execution_state.cluster,
"environ": execution_state.environ,
"ro... | python | {
"resource": ""
} |
q253218 | Tracker.extract_scheduler_location | validation | def extract_scheduler_location(self, topology):
"""
Returns the representation of scheduler location that will
be returned from Tracker.
"""
schedulerLocation = {
"name": None,
"http_endpoint": None,
"job_page_link": None,
}
if topology.scheduler_location:
sche... | python | {
"resource": ""
} |
q253219 | Tracker.extract_tmaster | validation | def extract_tmaster(self, topology):
"""
Returns the representation of tmaster that will
be returned from Tracker.
"""
tmasterLocation = {
"name": None,
"id": None,
"host": None,
"controller_port": None,
"master_port": None,
"stats_port": None,
}
... | python | {
"resource": ""
} |
q253220 | Tracker.extract_logical_plan | validation | def extract_logical_plan(self, topology):
"""
Returns the representation of logical plan that will
be returned from Tracker.
"""
logicalPlan = {
"spouts": {},
"bolts": {},
}
# Add spouts.
for spout in topology.spouts():
spoutName = spout.comp.name
spoutType =... | python | {
"resource": ""
} |
q253221 | Tracker.extract_packing_plan | validation | def extract_packing_plan(self, topology):
"""
Returns the representation of packing plan that will
be returned from Tracker.
"""
packingPlan = {
"id": "",
"container_plans": []
}
if not topology.packing_plan:
return packingPlan
container_plans = topology.packing_p... | python | {
"resource": ""
} |
q253222 | Tracker.setTopologyInfo | validation | def setTopologyInfo(self, topology):
"""
Extracts info from the stored proto states and
convert it into representation that is exposed using
the API.
This method is called on any change for the topology.
For example, when a container moves and its host or some
port changes. All the informati... | python | {
"resource": ""
} |
q253223 | Tracker.getTopologyInfo | validation | def getTopologyInfo(self, topologyName, cluster, role, environ):
"""
Returns the JSON representation of a topology
by its name, cluster, environ, and an optional role parameter.
Raises exception if no such topology is found.
"""
# Iterate over the values to filter the desired topology.
for (... | python | {
"resource": ""
} |
q253224 | Config.load_configs | validation | def load_configs(self):
"""load config files"""
self.statemgr_config.set_state_locations(self.configs[STATEMGRS_KEY])
| python | {
"resource": ""
} |
q253225 | Config.validate_extra_link | validation | def validate_extra_link(self, extra_link):
"""validate extra link"""
if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link:
raise Exception("Invalid extra.links format. " +
"Extra link | python | {
"resource": ""
} |
q253226 | Config.validated_formatter | validation | def validated_formatter(self, url_format):
"""validate visualization url format"""
# We try to create a string by substituting all known
# parameters. If an unknown parameter is present, an error
# will be thrown
valid_parameters = {
"${CLUSTER}": "cluster",
"${ENVIRON}": "environ",
... | python | {
"resource": ""
} |
q253227 | SpoutInstance.emit | validation | def emit(self, tup, tup_id=None, stream=Stream.DEFAULT_STREAM_ID,
direct_task=None, need_task_ids=False):
"""Emits a new tuple from this Spout
It is compatible with StreamParse API.
:type tup: list or tuple
:param tup: the new output Tuple to send from this spout,
should con... | python | {
"resource": ""
} |
q253228 | SpoutInstance._is_continue_to_work | validation | def _is_continue_to_work(self):
"""Checks whether we still need to do more work
When the topology state is RUNNING:
1. if the out_queue is not full and ack is not enabled, we could wake up next time to
produce more tuples and push to the out_queue
2. if the out_queue is not full but the acking i... | python | {
"resource": ""
} |
q253229 | to_table | validation | def to_table(components, topo_info):
""" normalize raw logical plan info to table """
inputs, outputs = defaultdict(list), defaultdict(list)
for ctype, component in components.items():
if ctype == 'bolts':
for component_name, component_info in component.items():
for input_stream in component_inf... | python | {
"resource": ""
} |
q253230 | filter_bolts | validation | def filter_bolts(table, header):
""" filter to keep bolts """
bolts_info = []
for row in table:
if row[0] == 'bolt':
| python | {
"resource": ""
} |
q253231 | filter_spouts | validation | def filter_spouts(table, header):
""" filter to keep spouts """
spouts_info = []
for row in table:
if row[0] == 'spout':
| python | {
"resource": ""
} |
q253232 | ZkStateManager._get_topologies_with_watch | validation | def _get_topologies_with_watch(self, callback, isWatching):
"""
Helper function to get topologies with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_topologies_path()
if isWatching:
LOG.info("Adding children watch for path: " + path)
# pyli... | python | {
"resource": ""
} |
q253233 | ZkStateManager._get_packing_plan_with_watch | validation | def _get_packing_plan_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get packing_plan with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_packing_plan_path(topologyName)
if isWatching:
LOG.info("Adding data watch for... | python | {
"resource": ""
} |
q253234 | ZkStateManager.get_pplan | validation | def get_pplan(self, topologyName, callback=None):
""" get physical plan """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
if callback:
| python | {
"resource": ""
} |
q253235 | ZkStateManager.create_pplan | validation | def create_pplan(self, topologyName, pplan):
""" create physical plan """
if not pplan or not pplan.IsInitialized():
raise_(StateException("Physical Plan protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()[2])
path = self.get_pplan_path(topo... | python | {
"resource": ""
} |
q253236 | ZkStateManager.get_execution_state | validation | def get_execution_state(self, topologyName, callback=None):
""" get execution state """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
| python | {
"resource": ""
} |
q253237 | ZkStateManager._get_execution_state_with_watch | validation | def _get_execution_state_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get execution state with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_execution_state_path(topologyName)
if isWatching:
LOG.info("Adding data ... | python | {
"resource": ""
} |
q253238 | ZkStateManager.create_execution_state | validation | def create_execution_state(self, topologyName, executionState):
""" create execution state """
if not executionState or not executionState.IsInitialized():
raise_(StateException("Execution State protobuf not init properly",
StateException.EX_TYPE_PROTOBUF_ERROR), sys.exc_info()... | python | {
"resource": ""
} |
q253239 | ZkStateManager.get_scheduler_location | validation | def get_scheduler_location(self, topologyName, callback=None):
""" get scheduler location """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
| python | {
"resource": ""
} |
q253240 | ZkStateManager._get_scheduler_location_with_watch | validation | def _get_scheduler_location_with_watch(self, topologyName, callback, isWatching):
"""
Helper function to get scheduler location with
a callback. The future watch is placed
only if isWatching is True.
"""
path = self.get_scheduler_location_path(topologyName)
if isWatching:
LOG.info("Add... | python | {
"resource": ""
} |
q253241 | load | validation | def load(file_object):
"""
Deserializes Java primitive data and objects serialized by ObjectOutputStream
from a file-like object.
"""
marshaller | python | {
"resource": ""
} |
q253242 | loads | validation | def loads(string):
"""
Deserializes Java objects and primitive data serialized by ObjectOutputStream
from a string.
| python | {
"resource": ""
} |
q253243 | JavaObject.copy | validation | def copy(self, new_object):
"""copy an object"""
new_object.classdesc = self.classdesc
for name in | python | {
"resource": ""
} |
q253244 | Config.validate_state_locations | validation | def validate_state_locations(self):
"""
Names of all state locations must be unique.
""" | python | {
"resource": ""
} |
q253245 | PulsarSpout.initialize | validation | def initialize(self, config, context):
"""Implements Pulsar Spout's initialize method"""
self.logger.info("Initializing PulsarSpout with the following")
self.logger.info("Component-specific config: \n%s" % str(config))
self.logger.info("Context: \n%s" % str(context))
self.emit_count = 0
self.ac... | python | {
"resource": ""
} |
q253246 | JstackHandler.getInstanceJstack | validation | def getInstanceJstack(self, topology_info, instance_id):
"""
Fetches Instance jstack from heron-shell.
"""
pid_response = yield getInstancePid(topology_info, instance_id)
try:
http_client = tornado.httpclient.AsyncHTTPClient()
pid_json = json.loads(pid_response)
pid = pid_json['std... | python | {
"resource": ""
} |
q253247 | create_parser | validation | def create_parser(subparsers):
""" Create the parse for the update command """
parser = subparsers.add_parser(
'update',
help='Update a topology',
usage="%(prog)s [options] cluster/[role]/[env] <topology-name> "
+ "[--component-parallelism <name:value>] "
+ "[--container-number value] ... | python | {
"resource": ""
} |
q253248 | build_extra_args_dict | validation | def build_extra_args_dict(cl_args):
""" Build extra args map """
# Check parameters
component_parallelism = cl_args['component_parallelism']
runtime_configs = cl_args['runtime_config']
container_number = cl_args['container_number']
# Users need to provide either (component-parallelism || container_number) o... | python | {
"resource": ""
} |
q253249 | convert_args_dict_to_list | validation | def convert_args_dict_to_list(dict_extra_args):
""" flatten extra args """
list_extra_args = []
if 'component_parallelism' in dict_extra_args:
list_extra_args += ["--component_parallelism",
','.join(dict_extra_args['component_parallelism'])]
if 'runtime_config' in dict_extra_args:
... | python | {
"resource": ""
} |
q253250 | run | validation | def run(command, parser, cl_args, unknown_args):
""" run the update command """
Log.debug("Update Args: %s", cl_args)
# Build jar list
extra_lib_jars = jars.packing_jars()
action = "update topology%s" % (' in dry-run mode' if cl_args["dry_run"] else '')
# Build extra args
dict_extra_args = {}
try:
... | python | {
"resource": ""
} |
q253251 | getInstancePid | validation | def getInstancePid(topology_info, instance_id):
"""
This method is used by other modules, and so it
is not a part of the class.
Fetches Instance pid from heron-shell.
"""
try:
http_client = tornado.httpclient.AsyncHTTPClient()
endpoint = utils.make_shell_endpoint(topology_info, instance_id)
url ... | python | {
"resource": ""
} |
q253252 | Grouping.is_grouping_sane | validation | def is_grouping_sane(cls, gtype):
"""Checks if a given gtype is sane"""
if gtype == cls.SHUFFLE or gtype == cls.ALL or gtype == cls.LOWEST or gtype == cls.NONE:
return True
elif isinstance(gtype, cls.FIELDS):
return gtype.gtype == topology_pb2.Grouping.Value("FIELDS") and \
gtype.fi... | python | {
"resource": ""
} |
q253253 | Grouping.custom | validation | def custom(cls, customgrouper):
"""Custom grouping from a given implementation of ICustomGrouping
:param customgrouper: The ICustomGrouping implemention to use
"""
if customgrouper is None:
raise TypeError("Argument to custom() must be ICustomGrouping instance or classpath")
if not isinstance... | python | {
"resource": ""
} |
q253254 | Grouping.custom_serialized | validation | def custom_serialized(cls, serialized, is_java=True):
"""Custom grouping from a given serialized string
This class is created for compatibility with ``custom_serialized(cls, java_serialized)`` method
of StreamParse API, although its functionality is not yet implemented (Java-serialized).
Currently only... | python | {
"resource": ""
} |
q253255 | BaseMetricsHelper.register_metrics | validation | def register_metrics(self, metrics_collector, interval):
"""Registers its metrics to a given metrics collector with a given interval"""
| python | {
"resource": ""
} |
q253256 | BaseMetricsHelper.update_count | validation | def update_count(self, name, incr_by=1, key=None):
"""Update the value of CountMetric or MultiCountMetric
:type name: str
:param name: name of the registered metric to be updated.
:type incr_by: int
:param incr_by: specifies how much to increment. Default is 1.
:type key: str or None
:param... | python | {
"resource": ""
} |
q253257 | BaseMetricsHelper.update_reduced_metric | validation | def update_reduced_metric(self, name, value, key=None):
"""Update the value of ReducedMetric or MultiReducedMetric
:type name: str
:param name: name of the registered metric to be updated.
:param value: specifies a value to be reduced.
:type key: str or None
:param key: specifies a key for Mult... | python | {
"resource": ""
} |
q253258 | GatewayMetrics.update_received_packet | validation | def update_received_packet(self, received_pkt_size_bytes):
"""Update received packet metrics"""
self.update_count(self.RECEIVED_PKT_COUNT)
| python | {
"resource": ""
} |
q253259 | GatewayMetrics.update_sent_packet | validation | def update_sent_packet(self, sent_pkt_size_bytes):
"""Update sent packet metrics"""
self.update_count(self.SENT_PKT_COUNT)
| python | {
"resource": ""
} |
q253260 | ComponentMetrics.register_metrics | validation | def register_metrics(self, context):
"""Registers metrics to context
:param context: Topology Context
"""
sys_config = system_config.get_sys_config()
interval = float(sys_config[constants.HERON_METRICS_EXPORT_INTERVAL_SEC])
| python | {
"resource": ""
} |
q253261 | ComponentMetrics.serialize_data_tuple | validation | def serialize_data_tuple(self, stream_id, latency_in_ns):
"""Apply update to serialization metrics"""
| python | {
"resource": ""
} |
q253262 | SpoutMetrics.next_tuple | validation | def next_tuple(self, latency_in_ns):
"""Apply updates to the next tuple metrics"""
| python | {
"resource": ""
} |
q253263 | BoltMetrics.execute_tuple | validation | def execute_tuple(self, stream_id, source_component, latency_in_ns):
"""Apply updates to the execute metrics"""
self.update_count(self.EXEC_COUNT, key=stream_id)
self.update_reduced_metric(self.EXEC_LATENCY, latency_in_ns, stream_id)
self.update_count(self.EXEC_TIME_NS, incr_by=latency_in_ns, key=stream... | python | {
"resource": ""
} |
q253264 | BoltMetrics.deserialize_data_tuple | validation | def deserialize_data_tuple(self, stream_id, source_component, latency_in_ns):
"""Apply updates to the deserialization metrics"""
self.update_count(self.TUPLE_DESERIALIZATION_TIME_NS, incr_by=latency_in_ns, key=stream_id)
global_stream_id = source_component | python | {
"resource": ""
} |
q253265 | MetricsCollector.register_metric | validation | def register_metric(self, name, metric, time_bucket_in_sec):
"""Registers a given metric
:param name: name of the metric
:param metric: IMetric object to be registered
:param time_bucket_in_sec: time interval for update to the metrics manager
"""
if name in self.metrics_map:
raise Runtime... | python | {
"resource": ""
} |
q253266 | HeronCommunicator.poll | validation | def poll(self):
"""Poll from the buffer
It is a non-blocking operation, and when the buffer is empty, it raises Queue.Empty exception
"""
try:
# non-blocking
ret = self._buffer.get(block=False)
if self._producer_callback | python | {
"resource": ""
} |
q253267 | HeronCommunicator.offer | validation | def offer(self, item):
"""Offer to the buffer
It is a non-blocking operation, and when the buffer is full, it raises Queue.Full exception
"""
try:
# non-blocking
self._buffer.put(item, block=False)
if | python | {
"resource": ""
} |
q253268 | parse | validation | def parse(version):
"""
Parse version to major, minor, patch, pre-release, build parts.
"""
match = _REGEX.match(version)
if match is None:
raise ValueError('%s is not valid SemVer string' % version)
verinfo = match.groupdict() | python | {
"resource": ""
} |
q253269 | get_all_zk_state_managers | validation | def get_all_zk_state_managers(conf):
"""
Creates all the zookeeper state_managers and returns
them in a list
"""
state_managers = []
state_locations = conf.get_state_locations_of_type("zookeeper")
for location in state_locations:
name = location['name']
hostport = location['hostport']
hostport... | python | {
"resource": ""
} |
q253270 | get_all_file_state_managers | validation | def get_all_file_state_managers(conf):
"""
Returns all the file state_managers.
"""
state_managers = []
state_locations = conf.get_state_locations_of_type("file")
for location in state_locations:
name = location['name']
rootpath = os.path.expanduser(location['rootpath'])
| python | {
"resource": ""
} |
q253271 | MultiCountMetric.incr | validation | def incr(self, key, to_add=1):
"""Increments the value of a given key by ``to_add``""" | python | {
"resource": ""
} |
q253272 | MultiReducedMetric.update | validation | def update(self, key, value):
"""Updates a value of a given key and apply reduction"""
| python | {
"resource": ""
} |
q253273 | MultiReducedMetric.add_key | validation | def add_key(self, key):
"""Adds a new key to this metric"""
| python | {
"resource": ""
} |
q253274 | OutgoingTupleHelper.add_data_tuple | validation | def add_data_tuple(self, stream_id, new_data_tuple, tuple_size_in_bytes):
"""Add a new data tuple to the currently buffered set of tuples"""
if (self.current_data_tuple_set is None) or \
(self.current_data_tuple_set.stream.id != stream_id) or \
(len(self.current_data_tuple_set.tuples) >= self.da... | python | {
"resource": ""
} |
q253275 | OutgoingTupleHelper.add_ckpt_state | validation | def add_ckpt_state(self, ckpt_id, ckpt_state):
"""Add the checkpoint state message to be sent back the stmgr
:param ckpt_id: The id of the checkpoint
:ckpt_state: The checkpoint state
"""
# first flush any buffered tuples
self._flush_remaining()
| python | {
"resource": ""
} |
q253276 | valid_path | validation | def valid_path(path):
'''
Check if an entry in the class path exists as either a directory or a file
'''
# check if the suffic of classpath suffix exists as directory
if path.endswith('*'):
Log.debug('Checking classpath entry suffix as directory: %s', path[:-1])
if os.path.isdir(path[:-1]):
retu... | python | {
"resource": ""
} |
q253277 | valid_java_classpath | validation | def valid_java_classpath(classpath):
'''
Given a java classpath, check whether the path entries are valid | python | {
"resource": ""
} |
q253278 | _get_deps_list | validation | def _get_deps_list(abs_path_to_pex):
"""Get a list of paths to included dependencies in the specified pex file
Note that dependencies are located under `.deps` | python | {
"resource": ""
} |
q253279 | load_pex | validation | def load_pex(path_to_pex, include_deps=True):
"""Loads pex file and its dependencies to the current python path"""
abs_path_to_pex = os.path.abspath(path_to_pex)
Log.debug("Add a pex to the path: %s" % abs_path_to_pex)
if abs_path_to_pex not in sys.path:
sys.path.insert(0, os.path.dirname(abs_path_to_pex))
... | python | {
"resource": ""
} |
q253280 | resolve_heron_suffix_issue | validation | def resolve_heron_suffix_issue(abs_pex_path, class_path):
"""Resolves duplicate package suffix problems
When dynamically loading a pex file and a corresponding python class (bolt/spout/topology),
if the top level package in which to-be-loaded classes reside is named 'heron', the path conflicts
with this Heron ... | python | {
"resource": ""
} |
q253281 | import_and_get_class | validation | def import_and_get_class(path_to_pex, python_class_name):
"""Imports and load a class from a given pex file path and python class name
For example, if you want to get a class called `Sample` in
/some-path/sample.pex/heron/examples/src/python/sample.py,
``path_to_pex`` needs to be ``/some-path/sample.pex``, and... | python | {
"resource": ""
} |
q253282 | Builder.new_source | validation | def new_source(self, source):
"""Adds a new source to the computation DAG"""
source_streamlet = None
if callable(source):
source_streamlet = SupplierStreamlet(source)
elif isinstance(source, Generator):
source_streamlet = GeneratorStreamlet(source)
| python | {
"resource": ""
} |
q253283 | Builder.build | validation | def build(self, bldr):
"""Builds the topology and returns the builder"""
stage_names = sets.Set()
for source in self._sources:
source._build(bldr, stage_names)
for source in self._sources:
| python | {
"resource": ""
} |
q253284 | __replace | validation | def __replace(config, wildcards, config_file):
"""For each kvp in config, do wildcard substitution on the values"""
for config_key in config:
config_value = config[config_key]
original_value = config_value
if isinstance(config_value, str):
for token in wildcards:
if wildcards[token]:
... | python | {
"resource": ""
} |
q253285 | get_command_handlers | validation | def get_command_handlers():
'''
Create a map of command names and handlers
'''
return {
'activate': activate,
'config': hconfig,
'deactivate': deactivate,
'help': cli_help,
| python | {
"resource": ""
} |
q253286 | Metrics.setDefault | validation | def setDefault(self, constant, start, end):
""" set default time """
starttime = start / 60 * 60
if starttime < start:
starttime += 60
endtime = end / 60 * 60
while starttime <= endtime:
# STREAMCOMP-1559
# Second check is a work around, because the response from tmaster
# co... | python | {
"resource": ""
} |
q253287 | SlidingWindowBolt.process | validation | def process(self, tup):
"""Process a single tuple of input
We add the (time, tuple) pair into our current_tuples. And then look for expiring
elemnents
"""
| python | {
"resource": ""
} |
q253288 | SlidingWindowBolt.process_tick | validation | def process_tick(self, tup):
"""Called every slide_interval
"""
curtime = int(time.time())
window_info = WindowContext(curtime - self.window_duration, curtime)
tuple_batch = []
for (tup, tm) in | python | {
"resource": ""
} |
q253289 | TumblingWindowBolt.process_tick | validation | def process_tick(self, tup):
"""Called every window_duration
"""
curtime = int(time.time())
window_info = WindowContext(curtime | python | {
"resource": ""
} |
q253290 | RuntimeStateHandler.getStmgrsRegSummary | validation | def getStmgrsRegSummary(self, tmaster, callback=None):
"""
Get summary of stream managers registration summary
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
reg_request = tmaster_pb2.StmgrsRegistrationSummaryRequest()
request_str = reg_request.SerializeToString()... | python | {
"resource": ""
} |
q253291 | setup | validation | def setup(executor):
"""Set up log, process and signal handlers"""
# pylint: disable=unused-argument
def signal_handler(signal_to_handle, frame):
# We would do nothing here but just exit
# Just catch the SIGTERM and then cleanup(), registered with atexit, would invoke
Log.info('signal_handler invoked ... | python | {
"resource": ""
} |
q253292 | main | validation | def main():
"""Register exit handlers, initialize the executor and run it."""
# Since Heron on YARN runs as headless users, pex compiled
# binaries should be exploded into the container working
# directory. In order to do this, we need to set the
# PEX_ROOT shell environment before forking the processes
she... | python | {
"resource": ""
} |
q253293 | HeronExecutor._get_healthmgr_cmd | validation | def _get_healthmgr_cmd(self):
''' get the command to start the topology health manager processes '''
healthmgr_main_class = 'org.apache.heron.healthmgr.HealthManager'
healthmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'),
# We could not rely on the default -Xmx setting, which... | python | {
"resource": ""
} |
q253294 | HeronExecutor._get_tmaster_processes | validation | def _get_tmaster_processes(self):
''' get the command to start the tmaster processes '''
retval = {}
tmaster_cmd_lst = [
self.tmaster_binary,
'--topology_name=%s' % self.topology_name,
'--topology_id=%s' % self.topology_id,
'--zkhostportlist=%s' % self.state_manager_connectio... | python | {
"resource": ""
} |
q253295 | HeronExecutor._get_streaming_processes | validation | def _get_streaming_processes(self):
'''
Returns the processes to handle streams, including the stream-mgr and the user code containing
the stream logic of the topology
'''
retval = {}
instance_plans = self._get_instance_plans(self.packing_plan, self.shard)
instance_info = []
for instance... | python | {
"resource": ""
} |
q253296 | HeronExecutor._get_ckptmgr_process | validation | def _get_ckptmgr_process(self):
''' Get the command to start the checkpoint manager process'''
ckptmgr_main_class = 'org.apache.heron.ckptmgr.CheckpointManager'
ckptmgr_ram_mb = self.checkpoint_manager_ram / (1024 * 1024)
ckptmgr_cmd = [os.path.join(self.heron_java_home, "bin/java"),
... | python | {
"resource": ""
} |
q253297 | HeronExecutor._get_instance_plans | validation | def _get_instance_plans(self, packing_plan, container_id):
"""
For the given packing_plan, return the container plan with the given container_id. If protobufs
supported maps, we could just get the plan by id, but it doesn't so we have a collection of
containers to iterate over.
"""
this_containe... | python | {
"resource": ""
} |
q253298 | HeronExecutor._get_heron_support_processes | validation | def _get_heron_support_processes(self):
""" Get a map from all daemon services' name to the command to start them """
retval = {}
retval[self.heron_shell_ids[self.shard]] = Command([ | python | {
"resource": ""
} |
q253299 | HeronExecutor._wait_process_std_out_err | validation | def _wait_process_std_out_err(self, name, process):
''' Wait for the termination of a process and log its stdout & stderr | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.