repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
Cognexa/cxflow | cxflow/hooks/compute_stats.py | ComputeStats.after_epoch | def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:
"""
Compute the specified aggregations and save them to the given epoch data.
:param epoch_data: epoch data to be processed
"""
self._save_stats(epoch_data)
super().after_epoch(epoch_data=epoch_data, **kwargs) | python | def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:
"""
Compute the specified aggregations and save them to the given epoch data.
:param epoch_data: epoch data to be processed
"""
self._save_stats(epoch_data)
super().after_epoch(epoch_data=epoch_data, **kwargs) | [
"def",
"after_epoch",
"(",
"self",
",",
"epoch_data",
":",
"EpochData",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"self",
".",
"_save_stats",
"(",
"epoch_data",
")",
"super",
"(",
")",
".",
"after_epoch",
"(",
"epoch_data",
"=",
"epoch_data",
",",... | Compute the specified aggregations and save them to the given epoch data.
:param epoch_data: epoch data to be processed | [
"Compute",
"the",
"specified",
"aggregations",
"and",
"save",
"them",
"to",
"the",
"given",
"epoch",
"data",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/compute_stats.py#L106-L113 | train | 36,500 |
Cognexa/cxflow | cxflow/utils/training_trace.py | TrainingTrace.from_file | def from_file(filepath: str):
"""
Load training trace from the given ``filepath``.
:param filepath: training trace file path
:return: training trace
"""
trace = TrainingTrace()
trace._trace = load_config(filepath)
return trace | python | def from_file(filepath: str):
"""
Load training trace from the given ``filepath``.
:param filepath: training trace file path
:return: training trace
"""
trace = TrainingTrace()
trace._trace = load_config(filepath)
return trace | [
"def",
"from_file",
"(",
"filepath",
":",
"str",
")",
":",
"trace",
"=",
"TrainingTrace",
"(",
")",
"trace",
".",
"_trace",
"=",
"load_config",
"(",
"filepath",
")",
"return",
"trace"
] | Load training trace from the given ``filepath``.
:param filepath: training trace file path
:return: training trace | [
"Load",
"training",
"trace",
"from",
"the",
"given",
"filepath",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/training_trace.py#L85-L94 | train | 36,501 |
Cognexa/cxflow | cxflow/hooks/check.py | Check.after_epoch | def after_epoch(self, epoch_id: int, epoch_data: EpochData):
"""
Check termination conditions.
:param epoch_id: number of the processed epoch
:param epoch_data: epoch data to be checked
:raise KeyError: if the stream of variable was not found in ``epoch_data``
:raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation
:raise ValueError: if the specified number of epochs exceeded
:raise TrainingTerminated: if the monitor variable is above the required level
"""
if self._stream not in epoch_data:
raise KeyError('The hook could not determine whether the threshold was exceeded as the stream `{}`'
'was not found in the epoch data'.format(self._stream))
if self._variable not in epoch_data[self._stream]:
raise KeyError('The hook could not determine whether the threshold was exceeded as the variable `{}`'
'was not found in the epoch data stream `{}`'.format(self._variable, self._stream))
value = epoch_data[self._stream][self._variable]
if isinstance(value, dict) and 'mean' in value:
value = value['mean']
if not np.isscalar(value):
raise TypeError('The value to be checked has to be either a scalar or a dict with `mean` key. '
'Got `{}` instead.'.format(type(value).__name__))
if value > self._required_min_value:
raise TrainingTerminated('{} {} level matched (current {} is greater than required {}).'
.format(self._stream, self._variable, value, self._required_min_value))
elif epoch_id >= self._max_epoch:
raise ValueError('{} {} was only {} in epoch {}, but {} was required. Training failed.'
.format(self._stream, self._variable, value, epoch_id, self._required_min_value)) | python | def after_epoch(self, epoch_id: int, epoch_data: EpochData):
"""
Check termination conditions.
:param epoch_id: number of the processed epoch
:param epoch_data: epoch data to be checked
:raise KeyError: if the stream of variable was not found in ``epoch_data``
:raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation
:raise ValueError: if the specified number of epochs exceeded
:raise TrainingTerminated: if the monitor variable is above the required level
"""
if self._stream not in epoch_data:
raise KeyError('The hook could not determine whether the threshold was exceeded as the stream `{}`'
'was not found in the epoch data'.format(self._stream))
if self._variable not in epoch_data[self._stream]:
raise KeyError('The hook could not determine whether the threshold was exceeded as the variable `{}`'
'was not found in the epoch data stream `{}`'.format(self._variable, self._stream))
value = epoch_data[self._stream][self._variable]
if isinstance(value, dict) and 'mean' in value:
value = value['mean']
if not np.isscalar(value):
raise TypeError('The value to be checked has to be either a scalar or a dict with `mean` key. '
'Got `{}` instead.'.format(type(value).__name__))
if value > self._required_min_value:
raise TrainingTerminated('{} {} level matched (current {} is greater than required {}).'
.format(self._stream, self._variable, value, self._required_min_value))
elif epoch_id >= self._max_epoch:
raise ValueError('{} {} was only {} in epoch {}, but {} was required. Training failed.'
.format(self._stream, self._variable, value, epoch_id, self._required_min_value)) | [
"def",
"after_epoch",
"(",
"self",
",",
"epoch_id",
":",
"int",
",",
"epoch_data",
":",
"EpochData",
")",
":",
"if",
"self",
".",
"_stream",
"not",
"in",
"epoch_data",
":",
"raise",
"KeyError",
"(",
"'The hook could not determine whether the threshold was exceeded a... | Check termination conditions.
:param epoch_id: number of the processed epoch
:param epoch_data: epoch data to be checked
:raise KeyError: if the stream of variable was not found in ``epoch_data``
:raise TypeError: if the monitored variable is not a scalar or scalar ``mean`` aggregation
:raise ValueError: if the specified number of epochs exceeded
:raise TrainingTerminated: if the monitor variable is above the required level | [
"Check",
"termination",
"conditions",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/check.py#L43-L76 | train | 36,502 |
Cognexa/cxflow | cxflow/cli/train.py | train | def train(config_path: str, cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config and start the training.
:param config_path: path to configuration file
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root) | python | def train(config_path: str, cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config and start the training.
:param config_path: path to configuration file
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root) | [
"def",
"train",
"(",
"config_path",
":",
"str",
",",
"cl_arguments",
":",
"Iterable",
"[",
"str",
"]",
",",
"output_root",
":",
"str",
")",
"->",
"None",
":",
"config",
"=",
"None",
"try",
":",
"config_path",
"=",
"find_config",
"(",
"config_path",
")",
... | Load config and start the training.
:param config_path: path to configuration file
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | [
"Load",
"config",
"and",
"start",
"the",
"training",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/train.py#L11-L29 | train | 36,503 |
Cognexa/cxflow | cxflow/cli/eval.py | evaluate | def evaluate(model_path: str, stream_name: str, config_path: Optional[str], cl_arguments: Iterable[str],
output_root: str) -> None:
"""
Evaluate the given model on the specified data stream.
Configuration is updated by the respective predict.stream_name section, in particular:
- hooks section is entirely replaced
- model and dataset sections are updated
:param model_path: path to the model to be evaluated
:param stream_name: data stream name to be evaluated
:param config_path: path to the config to be used, if not specified infer the path from ``model_path``
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
model_dir = path.dirname(model_path) if not path.isdir(model_path) else model_path
config_path = find_config(model_dir if config_path is None else config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
if stream_name == CXF_PREDICT_STREAM and stream_name in config: # old style ``cxflow predict ...``
logging.warning('Old style ``predict`` configuration section is deprecated and will not be supported, '
'use ``eval.predict`` section instead.')
config['eval'] = {'predict': config['predict']}
if 'eval' in config and stream_name in config['eval']:
update_section = config['eval'][stream_name]
for subsection in ['dataset', 'model', 'main_loop']:
if subsection in update_section:
config[subsection].update(update_section[subsection])
if 'hooks' in update_section:
config['hooks'] = update_section['hooks']
else:
logging.warning('Config does not contain `eval.%s.hooks` section. '
'No hook will be employed during the evaluation.', stream_name)
config['hooks'] = []
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=model_path, eval=stream_name) | python | def evaluate(model_path: str, stream_name: str, config_path: Optional[str], cl_arguments: Iterable[str],
output_root: str) -> None:
"""
Evaluate the given model on the specified data stream.
Configuration is updated by the respective predict.stream_name section, in particular:
- hooks section is entirely replaced
- model and dataset sections are updated
:param model_path: path to the model to be evaluated
:param stream_name: data stream name to be evaluated
:param config_path: path to the config to be used, if not specified infer the path from ``model_path``
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
model_dir = path.dirname(model_path) if not path.isdir(model_path) else model_path
config_path = find_config(model_dir if config_path is None else config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
if stream_name == CXF_PREDICT_STREAM and stream_name in config: # old style ``cxflow predict ...``
logging.warning('Old style ``predict`` configuration section is deprecated and will not be supported, '
'use ``eval.predict`` section instead.')
config['eval'] = {'predict': config['predict']}
if 'eval' in config and stream_name in config['eval']:
update_section = config['eval'][stream_name]
for subsection in ['dataset', 'model', 'main_loop']:
if subsection in update_section:
config[subsection].update(update_section[subsection])
if 'hooks' in update_section:
config['hooks'] = update_section['hooks']
else:
logging.warning('Config does not contain `eval.%s.hooks` section. '
'No hook will be employed during the evaluation.', stream_name)
config['hooks'] = []
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=model_path, eval=stream_name) | [
"def",
"evaluate",
"(",
"model_path",
":",
"str",
",",
"stream_name",
":",
"str",
",",
"config_path",
":",
"Optional",
"[",
"str",
"]",
",",
"cl_arguments",
":",
"Iterable",
"[",
"str",
"]",
",",
"output_root",
":",
"str",
")",
"->",
"None",
":",
"conf... | Evaluate the given model on the specified data stream.
Configuration is updated by the respective predict.stream_name section, in particular:
- hooks section is entirely replaced
- model and dataset sections are updated
:param model_path: path to the model to be evaluated
:param stream_name: data stream name to be evaluated
:param config_path: path to the config to be used, if not specified infer the path from ``model_path``
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | [
"Evaluate",
"the",
"given",
"model",
"on",
"the",
"specified",
"data",
"stream",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/eval.py#L12-L57 | train | 36,504 |
Cognexa/cxflow | cxflow/cli/eval.py | predict | def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Run prediction from the specified config path.
If the config contains a ``predict`` section:
- override hooks with ``predict.hooks`` if present
- update dataset, model and main loop sections if the respective sections are present
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
if 'predict' in config:
for section in ['dataset', 'model', 'main_loop']:
if section in config['predict']:
config[section].update(config['predict'][section])
if 'hooks' in config['predict']:
config['hooks'] = config['predict']['hooks']
else:
logging.warning('Config does not contain `predict.hooks` section. '
'No hook will be employed during the prediction.')
config['hooks'] = []
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from, eval='predict') | python | def predict(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Run prediction from the specified config path.
If the config contains a ``predict`` section:
- override hooks with ``predict.hooks`` if present
- update dataset, model and main loop sections if the respective sections are present
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
if 'predict' in config:
for section in ['dataset', 'model', 'main_loop']:
if section in config['predict']:
config[section].update(config['predict'][section])
if 'hooks' in config['predict']:
config['hooks'] = config['predict']['hooks']
else:
logging.warning('Config does not contain `predict.hooks` section. '
'No hook will be employed during the prediction.')
config['hooks'] = []
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from, eval='predict') | [
"def",
"predict",
"(",
"config_path",
":",
"str",
",",
"restore_from",
":",
"Optional",
"[",
"str",
"]",
",",
"cl_arguments",
":",
"Iterable",
"[",
"str",
"]",
",",
"output_root",
":",
"str",
")",
"->",
"None",
":",
"config",
"=",
"None",
"try",
":",
... | Run prediction from the specified config path.
If the config contains a ``predict`` section:
- override hooks with ``predict.hooks`` if present
- update dataset, model and main loop sections if the respective sections are present
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | [
"Run",
"prediction",
"from",
"the",
"specified",
"config",
"path",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/eval.py#L60-L99 | train | 36,505 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop._create_epoch_data | def _create_epoch_data(self, streams: Optional[Iterable[str]]=None) -> EpochData:
"""Create empty epoch data double dict."""
if streams is None:
streams = [self._train_stream_name] + self._extra_streams
return OrderedDict([(stream_name, OrderedDict()) for stream_name in streams]) | python | def _create_epoch_data(self, streams: Optional[Iterable[str]]=None) -> EpochData:
"""Create empty epoch data double dict."""
if streams is None:
streams = [self._train_stream_name] + self._extra_streams
return OrderedDict([(stream_name, OrderedDict()) for stream_name in streams]) | [
"def",
"_create_epoch_data",
"(",
"self",
",",
"streams",
":",
"Optional",
"[",
"Iterable",
"[",
"str",
"]",
"]",
"=",
"None",
")",
"->",
"EpochData",
":",
"if",
"streams",
"is",
"None",
":",
"streams",
"=",
"[",
"self",
".",
"_train_stream_name",
"]",
... | Create empty epoch data double dict. | [
"Create",
"empty",
"epoch",
"data",
"double",
"dict",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L96-L100 | train | 36,506 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop._check_sources | def _check_sources(self, batch: Dict[str, object]) -> None:
"""
Check for unused and missing sources.
:param batch: batch to be checked
:raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error``
"""
unused_sources = [source for source in batch.keys() if source not in self._model.input_names]
missing_sources = [source for source in self._model.input_names if source not in batch.keys()]
# check stream sources
if unused_sources:
if self._on_unused_sources == 'warn' and not self._extra_sources_warned:
logging.warning('Some sources provided by the stream do not match model placeholders. Set '
'`main_loop.on_unused_sources` to `ignore` in order to suppress this warning. '
'Extra sources: %s', unused_sources)
self._extra_sources_warned = True
elif self._on_unused_sources == 'error':
raise ValueError('Some sources provided by the stream do not match model placeholders. Set'
'`main_loop.on_unused_sources` to `warn` in order to suppress this error.\n'
'Extra sources: {}'.format(unused_sources))
if missing_sources:
raise ValueError('Stream does not provide all required sources. Missing sources: {}'
.format(missing_sources)) | python | def _check_sources(self, batch: Dict[str, object]) -> None:
"""
Check for unused and missing sources.
:param batch: batch to be checked
:raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error``
"""
unused_sources = [source for source in batch.keys() if source not in self._model.input_names]
missing_sources = [source for source in self._model.input_names if source not in batch.keys()]
# check stream sources
if unused_sources:
if self._on_unused_sources == 'warn' and not self._extra_sources_warned:
logging.warning('Some sources provided by the stream do not match model placeholders. Set '
'`main_loop.on_unused_sources` to `ignore` in order to suppress this warning. '
'Extra sources: %s', unused_sources)
self._extra_sources_warned = True
elif self._on_unused_sources == 'error':
raise ValueError('Some sources provided by the stream do not match model placeholders. Set'
'`main_loop.on_unused_sources` to `warn` in order to suppress this error.\n'
'Extra sources: {}'.format(unused_sources))
if missing_sources:
raise ValueError('Stream does not provide all required sources. Missing sources: {}'
.format(missing_sources)) | [
"def",
"_check_sources",
"(",
"self",
",",
"batch",
":",
"Dict",
"[",
"str",
",",
"object",
"]",
")",
"->",
"None",
":",
"unused_sources",
"=",
"[",
"source",
"for",
"source",
"in",
"batch",
".",
"keys",
"(",
")",
"if",
"source",
"not",
"in",
"self",... | Check for unused and missing sources.
:param batch: batch to be checked
:raise ValueError: if a source is missing or unused and ``self._on_unused_sources`` is set to ``error`` | [
"Check",
"for",
"unused",
"and",
"missing",
"sources",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L102-L125 | train | 36,507 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop.train_by_stream | def train_by_stream(self, stream: StreamWrapper) -> None:
"""
Train the model with the given stream.
:param stream: stream to train with
"""
self._run_epoch(stream=stream, train=True) | python | def train_by_stream(self, stream: StreamWrapper) -> None:
"""
Train the model with the given stream.
:param stream: stream to train with
"""
self._run_epoch(stream=stream, train=True) | [
"def",
"train_by_stream",
"(",
"self",
",",
"stream",
":",
"StreamWrapper",
")",
"->",
"None",
":",
"self",
".",
"_run_epoch",
"(",
"stream",
"=",
"stream",
",",
"train",
"=",
"True",
")"
] | Train the model with the given stream.
:param stream: stream to train with | [
"Train",
"the",
"model",
"with",
"the",
"given",
"stream",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L182-L188 | train | 36,508 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop.evaluate_stream | def evaluate_stream(self, stream: StreamWrapper) -> None:
"""
Evaluate the given stream.
:param stream: stream to be evaluated
:param stream_name: stream name
"""
self._run_epoch(stream=stream, train=False) | python | def evaluate_stream(self, stream: StreamWrapper) -> None:
"""
Evaluate the given stream.
:param stream: stream to be evaluated
:param stream_name: stream name
"""
self._run_epoch(stream=stream, train=False) | [
"def",
"evaluate_stream",
"(",
"self",
",",
"stream",
":",
"StreamWrapper",
")",
"->",
"None",
":",
"self",
".",
"_run_epoch",
"(",
"stream",
"=",
"stream",
",",
"train",
"=",
"False",
")"
] | Evaluate the given stream.
:param stream: stream to be evaluated
:param stream_name: stream name | [
"Evaluate",
"the",
"given",
"stream",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L190-L197 | train | 36,509 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop._run_zeroth_epoch | def _run_zeroth_epoch(self, streams: Iterable[str]) -> None:
"""
Run zeroth epoch on the specified streams.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
:param streams: stream names to be evaluated
"""
for stream_name in streams:
with self.get_stream(stream_name) as stream:
self.evaluate_stream(stream)
epoch_data = self._create_epoch_data(streams)
for hook in self._hooks:
hook.after_epoch(epoch_id=0, epoch_data=epoch_data) | python | def _run_zeroth_epoch(self, streams: Iterable[str]) -> None:
"""
Run zeroth epoch on the specified streams.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
:param streams: stream names to be evaluated
"""
for stream_name in streams:
with self.get_stream(stream_name) as stream:
self.evaluate_stream(stream)
epoch_data = self._create_epoch_data(streams)
for hook in self._hooks:
hook.after_epoch(epoch_id=0, epoch_data=epoch_data) | [
"def",
"_run_zeroth_epoch",
"(",
"self",
",",
"streams",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"None",
":",
"for",
"stream_name",
"in",
"streams",
":",
"with",
"self",
".",
"get_stream",
"(",
"stream_name",
")",
"as",
"stream",
":",
"self",
".",
... | Run zeroth epoch on the specified streams.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
:param streams: stream names to be evaluated | [
"Run",
"zeroth",
"epoch",
"on",
"the",
"specified",
"streams",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L222-L237 | train | 36,510 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop.run_training | def run_training(self, trace: Optional[TrainingTrace]=None) -> None:
"""
Run the main loop in the training mode.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch_profile`
"""
for stream_name in [self._train_stream_name] + self._extra_streams:
self.get_stream(stream_name)
def training():
logging.debug('Training started')
self._epochs_done = 0
# Zeroth epoch: after_epoch
if not self._skip_zeroth_epoch:
logging.info('Evaluating 0th epoch')
self._run_zeroth_epoch([self._train_stream_name] + self._extra_streams)
logging.info('0th epoch done\n\n')
# Training loop: after_epoch, after_epoch_profile
while True:
epoch_id = self._epochs_done + 1
logging.info('Training epoch %s', epoch_id)
self._epoch_profile.clear()
epoch_data = self._create_epoch_data()
with self.get_stream(self._train_stream_name) as stream:
self.train_by_stream(stream)
for stream_name in self._extra_streams:
with self.get_stream(stream_name) as stream:
self.evaluate_stream(stream)
with Timer('after_epoch_hooks', self._epoch_profile):
for hook in self._hooks:
hook.after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)
for hook in self._hooks:
hook.after_epoch_profile(epoch_id=epoch_id, profile=self._epoch_profile,
train_stream_name=self._train_stream_name,
extra_streams=self._extra_streams)
self._epochs_done = epoch_id
if trace is not None:
trace[TrainingTraceKeys.EPOCHS_DONE] = self._epochs_done
logging.info('Epoch %s done\n\n', epoch_id)
self._try_run(training) | python | def run_training(self, trace: Optional[TrainingTrace]=None) -> None:
"""
Run the main loop in the training mode.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch_profile`
"""
for stream_name in [self._train_stream_name] + self._extra_streams:
self.get_stream(stream_name)
def training():
logging.debug('Training started')
self._epochs_done = 0
# Zeroth epoch: after_epoch
if not self._skip_zeroth_epoch:
logging.info('Evaluating 0th epoch')
self._run_zeroth_epoch([self._train_stream_name] + self._extra_streams)
logging.info('0th epoch done\n\n')
# Training loop: after_epoch, after_epoch_profile
while True:
epoch_id = self._epochs_done + 1
logging.info('Training epoch %s', epoch_id)
self._epoch_profile.clear()
epoch_data = self._create_epoch_data()
with self.get_stream(self._train_stream_name) as stream:
self.train_by_stream(stream)
for stream_name in self._extra_streams:
with self.get_stream(stream_name) as stream:
self.evaluate_stream(stream)
with Timer('after_epoch_hooks', self._epoch_profile):
for hook in self._hooks:
hook.after_epoch(epoch_id=epoch_id, epoch_data=epoch_data)
for hook in self._hooks:
hook.after_epoch_profile(epoch_id=epoch_id, profile=self._epoch_profile,
train_stream_name=self._train_stream_name,
extra_streams=self._extra_streams)
self._epochs_done = epoch_id
if trace is not None:
trace[TrainingTraceKeys.EPOCHS_DONE] = self._epochs_done
logging.info('Epoch %s done\n\n', epoch_id)
self._try_run(training) | [
"def",
"run_training",
"(",
"self",
",",
"trace",
":",
"Optional",
"[",
"TrainingTrace",
"]",
"=",
"None",
")",
"->",
"None",
":",
"for",
"stream_name",
"in",
"[",
"self",
".",
"_train_stream_name",
"]",
"+",
"self",
".",
"_extra_streams",
":",
"self",
"... | Run the main loop in the training mode.
Calls
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch`
- :py:meth:`cxflow.hooks.AbstractHook.after_epoch_profile` | [
"Run",
"the",
"main",
"loop",
"in",
"the",
"training",
"mode",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L262-L310 | train | 36,511 |
Cognexa/cxflow | cxflow/main_loop.py | MainLoop.run_evaluation | def run_evaluation(self, stream_name: str) -> None:
"""
Run the main loop with the given stream in the prediction mode.
:param stream_name: name of the stream to be evaluated
"""
def prediction():
logging.info('Running prediction')
self._run_zeroth_epoch([stream_name])
logging.info('Prediction done\n\n')
self._try_run(prediction) | python | def run_evaluation(self, stream_name: str) -> None:
"""
Run the main loop with the given stream in the prediction mode.
:param stream_name: name of the stream to be evaluated
"""
def prediction():
logging.info('Running prediction')
self._run_zeroth_epoch([stream_name])
logging.info('Prediction done\n\n')
self._try_run(prediction) | [
"def",
"run_evaluation",
"(",
"self",
",",
"stream_name",
":",
"str",
")",
"->",
"None",
":",
"def",
"prediction",
"(",
")",
":",
"logging",
".",
"info",
"(",
"'Running prediction'",
")",
"self",
".",
"_run_zeroth_epoch",
"(",
"[",
"stream_name",
"]",
")",... | Run the main loop with the given stream in the prediction mode.
:param stream_name: name of the stream to be evaluated | [
"Run",
"the",
"main",
"loop",
"with",
"the",
"given",
"stream",
"in",
"the",
"prediction",
"mode",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/main_loop.py#L312-L322 | train | 36,512 |
Cognexa/cxflow | cxflow/models/ensemble.py | major_vote | def major_vote(all_votes: Iterable[Iterable[Hashable]]) -> Iterable[Hashable]:
"""
For the given iterable of object iterations, return an iterable of the most common object at each position of the
inner iterations.
E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects
at the first and second positions respectively.
:param all_votes: an iterable of object iterations
:return: the most common objects in the iterations (the major vote)
"""
return [Counter(votes).most_common()[0][0] for votes in zip(*all_votes)] | python | def major_vote(all_votes: Iterable[Iterable[Hashable]]) -> Iterable[Hashable]:
"""
For the given iterable of object iterations, return an iterable of the most common object at each position of the
inner iterations.
E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects
at the first and second positions respectively.
:param all_votes: an iterable of object iterations
:return: the most common objects in the iterations (the major vote)
"""
return [Counter(votes).most_common()[0][0] for votes in zip(*all_votes)] | [
"def",
"major_vote",
"(",
"all_votes",
":",
"Iterable",
"[",
"Iterable",
"[",
"Hashable",
"]",
"]",
")",
"->",
"Iterable",
"[",
"Hashable",
"]",
":",
"return",
"[",
"Counter",
"(",
"votes",
")",
".",
"most_common",
"(",
")",
"[",
"0",
"]",
"[",
"0",
... | For the given iterable of object iterations, return an iterable of the most common object at each position of the
inner iterations.
E.g.: for [[1, 2], [1, 3], [2, 3]] the return value would be [1, 3] as 1 and 3 are the most common objects
at the first and second positions respectively.
:param all_votes: an iterable of object iterations
:return: the most common objects in the iterations (the major vote) | [
"For",
"the",
"given",
"iterable",
"of",
"object",
"iterations",
"return",
"an",
"iterable",
"of",
"the",
"most",
"common",
"object",
"at",
"each",
"position",
"of",
"the",
"inner",
"iterations",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/models/ensemble.py#L17-L28 | train | 36,513 |
Cognexa/cxflow | cxflow/models/ensemble.py | Ensemble._load_models | def _load_models(self) -> None:
"""Maybe load all the models to be assembled together and save them to the ``self._models`` attribute."""
if self._models is None:
logging.info('Loading %d models', len(self._model_paths))
def load_model(model_path: str):
logging.debug('\tloading %s', model_path)
if path.isdir(model_path):
model_path = path.join(model_path, CXF_CONFIG_FILE)
config = load_config(model_path)
config['model']['inputs'] = self._inputs
config['model']['outputs'] = self._outputs
return create_model(config, output_dir=None, dataset=self._dataset,
restore_from=path.dirname(model_path))
self._models = list(map(load_model, self._model_paths)) | python | def _load_models(self) -> None:
"""Maybe load all the models to be assembled together and save them to the ``self._models`` attribute."""
if self._models is None:
logging.info('Loading %d models', len(self._model_paths))
def load_model(model_path: str):
logging.debug('\tloading %s', model_path)
if path.isdir(model_path):
model_path = path.join(model_path, CXF_CONFIG_FILE)
config = load_config(model_path)
config['model']['inputs'] = self._inputs
config['model']['outputs'] = self._outputs
return create_model(config, output_dir=None, dataset=self._dataset,
restore_from=path.dirname(model_path))
self._models = list(map(load_model, self._model_paths)) | [
"def",
"_load_models",
"(",
"self",
")",
"->",
"None",
":",
"if",
"self",
".",
"_models",
"is",
"None",
":",
"logging",
".",
"info",
"(",
"'Loading %d models'",
",",
"len",
"(",
"self",
".",
"_model_paths",
")",
")",
"def",
"load_model",
"(",
"model_path... | Maybe load all the models to be assembled together and save them to the ``self._models`` attribute. | [
"Maybe",
"load",
"all",
"the",
"models",
"to",
"be",
"assembled",
"together",
"and",
"save",
"them",
"to",
"the",
"self",
".",
"_models",
"attribute",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/models/ensemble.py#L113-L129 | train | 36,514 |
Cognexa/cxflow | cxflow/models/ensemble.py | Ensemble.run | def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:
"""
Run feed-forward pass with the given batch using all the models, aggregate and return the results.
.. warning::
:py:class:`Ensemble` can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: aggregated results dict
:raise ValueError: if the ``train`` flag is set to ``True``
"""
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models
batch_outputs = [model.run(batch, False, stream) for model in self._models]
# aggregate the outputs
aggregated = {}
for output_name in self._outputs:
output_values = [batch_output[output_name] for batch_output in batch_outputs]
if self._aggregation == 'mean':
aggregated[output_name] = np.mean(output_values, axis=0)
elif self._aggregation == 'major_vote':
output_values_arr = np.array(output_values)
output = major_vote(output_values_arr.reshape((output_values_arr.shape[0], -1)))
aggregated[output_name] = np.array(output).reshape(output_values_arr[0].shape)
return aggregated | python | def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:
"""
Run feed-forward pass with the given batch using all the models, aggregate and return the results.
.. warning::
:py:class:`Ensemble` can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: aggregated results dict
:raise ValueError: if the ``train`` flag is set to ``True``
"""
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models
batch_outputs = [model.run(batch, False, stream) for model in self._models]
# aggregate the outputs
aggregated = {}
for output_name in self._outputs:
output_values = [batch_output[output_name] for batch_output in batch_outputs]
if self._aggregation == 'mean':
aggregated[output_name] = np.mean(output_values, axis=0)
elif self._aggregation == 'major_vote':
output_values_arr = np.array(output_values)
output = major_vote(output_values_arr.reshape((output_values_arr.shape[0], -1)))
aggregated[output_name] = np.array(output).reshape(output_values_arr[0].shape)
return aggregated | [
"def",
"run",
"(",
"self",
",",
"batch",
":",
"Batch",
",",
"train",
":",
"bool",
"=",
"False",
",",
"stream",
":",
"StreamWrapper",
"=",
"None",
")",
"->",
"Batch",
":",
"if",
"train",
":",
"raise",
"ValueError",
"(",
"'Ensemble model cannot be trained.'"... | Run feed-forward pass with the given batch using all the models, aggregate and return the results.
.. warning::
:py:class:`Ensemble` can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: aggregated results dict
:raise ValueError: if the ``train`` flag is set to ``True`` | [
"Run",
"feed",
"-",
"forward",
"pass",
"with",
"the",
"given",
"batch",
"using",
"all",
"the",
"models",
"aggregate",
"and",
"return",
"the",
"results",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/models/ensemble.py#L141-L172 | train | 36,515 |
Cognexa/cxflow | cxflow/utils/reflection.py | get_attribute | def get_attribute(module_name: str, attribute_name: str):
"""
Get the specified module attribute. It most cases, it will be a class or function.
:param module_name: module name
:param attribute_name: attribute name
:return: module attribute
"""
assert isinstance(module_name, str)
assert isinstance(attribute_name, str)
_module = importlib.import_module(module_name)
return getattr(_module, attribute_name) | python | def get_attribute(module_name: str, attribute_name: str):
"""
Get the specified module attribute. It most cases, it will be a class or function.
:param module_name: module name
:param attribute_name: attribute name
:return: module attribute
"""
assert isinstance(module_name, str)
assert isinstance(attribute_name, str)
_module = importlib.import_module(module_name)
return getattr(_module, attribute_name) | [
"def",
"get_attribute",
"(",
"module_name",
":",
"str",
",",
"attribute_name",
":",
"str",
")",
":",
"assert",
"isinstance",
"(",
"module_name",
",",
"str",
")",
"assert",
"isinstance",
"(",
"attribute_name",
",",
"str",
")",
"_module",
"=",
"importlib",
"."... | Get the specified module attribute. It most cases, it will be a class or function.
:param module_name: module name
:param attribute_name: attribute name
:return: module attribute | [
"Get",
"the",
"specified",
"module",
"attribute",
".",
"It",
"most",
"cases",
"it",
"will",
"be",
"a",
"class",
"or",
"function",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/reflection.py#L28-L40 | train | 36,516 |
Cognexa/cxflow | cxflow/utils/reflection.py | create_object | def create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT):
"""
Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance
"""
return get_attribute(module_name, class_name)(*args, **kwargs) | python | def create_object(module_name: str, class_name: str, args: Iterable=(), kwargs: Dict[str, Any]=_EMPTY_DICT):
"""
Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance
"""
return get_attribute(module_name, class_name)(*args, **kwargs) | [
"def",
"create_object",
"(",
"module_name",
":",
"str",
",",
"class_name",
":",
"str",
",",
"args",
":",
"Iterable",
"=",
"(",
")",
",",
"kwargs",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
"=",
"_EMPTY_DICT",
")",
":",
"return",
"get_attribute",
"(",
... | Create an object instance of the given class from the given module.
Args and kwargs are passed to the constructor.
This mimics the following code:
.. code-block:: python
from module import class
return class(*args, **kwargs)
:param module_name: module name
:param class_name: class name
:param args: args to be passed to the object constructor
:param kwargs: kwargs to be passed to the object constructor
:return: created object instance | [
"Create",
"an",
"object",
"instance",
"of",
"the",
"given",
"class",
"from",
"the",
"given",
"module",
".",
"Args",
"and",
"kwargs",
"are",
"passed",
"to",
"the",
"constructor",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/reflection.py#L43-L61 | train | 36,517 |
Cognexa/cxflow | cxflow/utils/reflection.py | list_submodules | def list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index
"""
List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed
"""
_module = importlib.import_module(module_name)
return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)] | python | def list_submodules(module_name: str) -> List[str]: # pylint: disable=invalid-sequence-index
"""
List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed
"""
_module = importlib.import_module(module_name)
return [module_name+'.'+submodule_name for _, submodule_name, _ in pkgutil.iter_modules(_module.__path__)] | [
"def",
"list_submodules",
"(",
"module_name",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"# pylint: disable=invalid-sequence-index",
"_module",
"=",
"importlib",
".",
"import_module",
"(",
"module_name",
")",
"return",
"[",
"module_name",
"+",
"'.'",
"... | List full names of all the submodules in the given module.
:param module_name: name of the module of which the submodules will be listed | [
"List",
"full",
"names",
"of",
"all",
"the",
"submodules",
"in",
"the",
"given",
"module",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/reflection.py#L64-L71 | train | 36,518 |
Cognexa/cxflow | cxflow/utils/reflection.py | find_class_module | def find_class_module(module_name: str, class_name: str) \
-> Tuple[List[str], List[Tuple[str, Exception]]]: # pylint: disable=invalid-sequence-index
"""
Find sub-modules of the given module that contain the given class.
Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples.
:param module_name: name of the module to be searched
:param class_name: searched class name
:return: a tuple of sub-modules having the searched class and sub-modules that could not be searched
"""
matched_submodules = []
erroneous_submodules = []
for submodule_name in list_submodules(module_name):
try: # the sub-module to be included may be erroneous and we need to continue
submodule = importlib.import_module(submodule_name)
if hasattr(submodule, class_name):
matched_submodules.append(submodule_name)
except Exception as ex: # pylint: disable=broad-except
erroneous_submodules.append((submodule_name, ex))
return matched_submodules, erroneous_submodules | python | def find_class_module(module_name: str, class_name: str) \
-> Tuple[List[str], List[Tuple[str, Exception]]]: # pylint: disable=invalid-sequence-index
"""
Find sub-modules of the given module that contain the given class.
Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples.
:param module_name: name of the module to be searched
:param class_name: searched class name
:return: a tuple of sub-modules having the searched class and sub-modules that could not be searched
"""
matched_submodules = []
erroneous_submodules = []
for submodule_name in list_submodules(module_name):
try: # the sub-module to be included may be erroneous and we need to continue
submodule = importlib.import_module(submodule_name)
if hasattr(submodule, class_name):
matched_submodules.append(submodule_name)
except Exception as ex: # pylint: disable=broad-except
erroneous_submodules.append((submodule_name, ex))
return matched_submodules, erroneous_submodules | [
"def",
"find_class_module",
"(",
"module_name",
":",
"str",
",",
"class_name",
":",
"str",
")",
"->",
"Tuple",
"[",
"List",
"[",
"str",
"]",
",",
"List",
"[",
"Tuple",
"[",
"str",
",",
"Exception",
"]",
"]",
"]",
":",
"# pylint: disable=invalid-sequence-in... | Find sub-modules of the given module that contain the given class.
Moreover, return a list of sub-modules that could not be imported as a list of (sub-module name, Exception) tuples.
:param module_name: name of the module to be searched
:param class_name: searched class name
:return: a tuple of sub-modules having the searched class and sub-modules that could not be searched | [
"Find",
"sub",
"-",
"modules",
"of",
"the",
"given",
"module",
"that",
"contain",
"the",
"given",
"class",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/reflection.py#L74-L94 | train | 36,519 |
Cognexa/cxflow | cxflow/utils/reflection.py | get_class_module | def get_class_module(module_name: str, class_name: str) -> Optional[str]:
"""
Get a sub-module of the given module which has the given class.
This method wraps `utils.reflection.find_class_module method` with the following behavior:
- raise error when multiple sub-modules with different classes with the same name are found
- return None when no sub-module is found
- warn about non-searchable sub-modules
.. note::
This function logs!
:param module_name: module to be searched
:param class_name: searched class name
:return: sub-module with the searched class or None
"""
matched_modules, erroneous_modules = find_class_module(module_name, class_name)
for submodule, error in erroneous_modules:
logging.warning('Could not inspect sub-module `%s` due to `%s` '
'when searching for `%s` in sub-modules of `%s`.',
submodule, type(error).__name__, class_name, module_name)
if len(matched_modules) == 1:
return matched_modules[0]
if len(matched_modules) > 1:
# check if all the module attributes point to the same class
first_class = getattr(importlib.import_module(matched_modules[0]), class_name)
for matched_module in matched_modules:
another_class = getattr(importlib.import_module(matched_module), class_name)
if another_class is not first_class:
raise ValueError('Found more than one sub-module when searching for `{}` in sub-modules of `{}`. '
'Please specify the module explicitly. Found sub-modules: `{}`'
.format(class_name, module_name, matched_modules))
return matched_modules[0]
return None | python | def get_class_module(module_name: str, class_name: str) -> Optional[str]:
"""
Get a sub-module of the given module which has the given class.
This method wraps `utils.reflection.find_class_module method` with the following behavior:
- raise error when multiple sub-modules with different classes with the same name are found
- return None when no sub-module is found
- warn about non-searchable sub-modules
.. note::
This function logs!
:param module_name: module to be searched
:param class_name: searched class name
:return: sub-module with the searched class or None
"""
matched_modules, erroneous_modules = find_class_module(module_name, class_name)
for submodule, error in erroneous_modules:
logging.warning('Could not inspect sub-module `%s` due to `%s` '
'when searching for `%s` in sub-modules of `%s`.',
submodule, type(error).__name__, class_name, module_name)
if len(matched_modules) == 1:
return matched_modules[0]
if len(matched_modules) > 1:
# check if all the module attributes point to the same class
first_class = getattr(importlib.import_module(matched_modules[0]), class_name)
for matched_module in matched_modules:
another_class = getattr(importlib.import_module(matched_module), class_name)
if another_class is not first_class:
raise ValueError('Found more than one sub-module when searching for `{}` in sub-modules of `{}`. '
'Please specify the module explicitly. Found sub-modules: `{}`'
.format(class_name, module_name, matched_modules))
return matched_modules[0]
return None | [
"def",
"get_class_module",
"(",
"module_name",
":",
"str",
",",
"class_name",
":",
"str",
")",
"->",
"Optional",
"[",
"str",
"]",
":",
"matched_modules",
",",
"erroneous_modules",
"=",
"find_class_module",
"(",
"module_name",
",",
"class_name",
")",
"for",
"su... | Get a sub-module of the given module which has the given class.
This method wraps `utils.reflection.find_class_module method` with the following behavior:
- raise error when multiple sub-modules with different classes with the same name are found
- return None when no sub-module is found
- warn about non-searchable sub-modules
.. note::
This function logs!
:param module_name: module to be searched
:param class_name: searched class name
:return: sub-module with the searched class or None | [
"Get",
"a",
"sub",
"-",
"module",
"of",
"the",
"given",
"module",
"which",
"has",
"the",
"given",
"class",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/reflection.py#L97-L133 | train | 36,520 |
Cognexa/cxflow | cxflow/datasets/stream_wrapper.py | StreamWrapper._get_stream | def _get_stream(self) -> Iterator:
"""Possibly create and return raw dataset stream iterator."""
if self._stream is None:
self._stream = iter(self._get_stream_fn())
return self._stream | python | def _get_stream(self) -> Iterator:
"""Possibly create and return raw dataset stream iterator."""
if self._stream is None:
self._stream = iter(self._get_stream_fn())
return self._stream | [
"def",
"_get_stream",
"(",
"self",
")",
"->",
"Iterator",
":",
"if",
"self",
".",
"_stream",
"is",
"None",
":",
"self",
".",
"_stream",
"=",
"iter",
"(",
"self",
".",
"_get_stream_fn",
"(",
")",
")",
"return",
"self",
".",
"_stream"
] | Possibly create and return raw dataset stream iterator. | [
"Possibly",
"create",
"and",
"return",
"raw",
"dataset",
"stream",
"iterator",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/stream_wrapper.py#L78-L82 | train | 36,521 |
Cognexa/cxflow | cxflow/datasets/stream_wrapper.py | StreamWrapper._enqueue_batches | def _enqueue_batches(self, stop_event: Event) -> None:
"""
Enqueue all the stream batches. If specified, stop after ``epoch_size`` batches.
.. note::
Signal the epoch end with ``None``.
Stop when:
- ``stop_event`` is risen
- stream ends and epoch size is not set
- specified number of batches is enqueued
.. note::
This is used only with ``buffer`` > 0.
:param stop_event: event signaling stop instruction
"""
while True:
self._stream = self._get_stream()
while True:
# Acquire the semaphore before processing the next batch
# but immediately release it so that other threads
# are not blocked when they decide to acquire it again.
with self._semaphore:
pass
# It always takes a short moment before the native call actually
# releases the GIL and we are free to compute. The following sleep
# is here to compensate for this short moment - we don't want to
# slow down the native call before the GIL is released.
time.sleep(CXF_BUFFER_SLEEP)
try:
batch = next(self._stream)
except StopIteration:
break
self._queue.put(batch)
self._batch_count += 1
if stop_event.is_set():
return
if self._epoch_limit_reached():
self._queue.put(None)
self._batch_count = 0
return
self._stream = None # yield a new iterator next time
if self._epoch_size <= 0: # for non-fixed size epochs
self._queue.put(None)
self._batch_count = 0
return | python | def _enqueue_batches(self, stop_event: Event) -> None:
"""
Enqueue all the stream batches. If specified, stop after ``epoch_size`` batches.
.. note::
Signal the epoch end with ``None``.
Stop when:
- ``stop_event`` is risen
- stream ends and epoch size is not set
- specified number of batches is enqueued
.. note::
This is used only with ``buffer`` > 0.
:param stop_event: event signaling stop instruction
"""
while True:
self._stream = self._get_stream()
while True:
# Acquire the semaphore before processing the next batch
# but immediately release it so that other threads
# are not blocked when they decide to acquire it again.
with self._semaphore:
pass
# It always takes a short moment before the native call actually
# releases the GIL and we are free to compute. The following sleep
# is here to compensate for this short moment - we don't want to
# slow down the native call before the GIL is released.
time.sleep(CXF_BUFFER_SLEEP)
try:
batch = next(self._stream)
except StopIteration:
break
self._queue.put(batch)
self._batch_count += 1
if stop_event.is_set():
return
if self._epoch_limit_reached():
self._queue.put(None)
self._batch_count = 0
return
self._stream = None # yield a new iterator next time
if self._epoch_size <= 0: # for non-fixed size epochs
self._queue.put(None)
self._batch_count = 0
return | [
"def",
"_enqueue_batches",
"(",
"self",
",",
"stop_event",
":",
"Event",
")",
"->",
"None",
":",
"while",
"True",
":",
"self",
".",
"_stream",
"=",
"self",
".",
"_get_stream",
"(",
")",
"while",
"True",
":",
"# Acquire the semaphore before processing the next ba... | Enqueue all the stream batches. If specified, stop after ``epoch_size`` batches.
.. note::
Signal the epoch end with ``None``.
Stop when:
- ``stop_event`` is risen
- stream ends and epoch size is not set
- specified number of batches is enqueued
.. note::
This is used only with ``buffer`` > 0.
:param stop_event: event signaling stop instruction | [
"Enqueue",
"all",
"the",
"stream",
"batches",
".",
"If",
"specified",
"stop",
"after",
"epoch_size",
"batches",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/stream_wrapper.py#L92-L138 | train | 36,522 |
Cognexa/cxflow | cxflow/datasets/stream_wrapper.py | StreamWrapper._dequeue_batch | def _dequeue_batch(self) -> Optional[Batch]:
"""
Return a single batch from queue or ``None`` signaling epoch end.
:raise ChildProcessError: if the enqueueing thread ended unexpectedly
"""
if self._enqueueing_thread is None:
raise ValueError('StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.'
.format(self._name, self._buffer_size))
if not self._enqueueing_thread.is_alive() and self._queue.empty():
self._start_thread()
while True:
try:
batch = self._queue.get(timeout=2)
self._queue.task_done()
break
except Empty:
if not self._enqueueing_thread.is_alive():
try:
# the enqueueing thread may just finished properly so lets check the queue eagerly
batch = self._queue.get_nowait()
self._queue.task_done()
break
except Empty:
# so we failed to retrieve a batch and the enqueueing thread is dead
# there is no hope, something must went wrong
raise ChildProcessError('Enqueueing thread ended unexpectedly.')
return batch | python | def _dequeue_batch(self) -> Optional[Batch]:
"""
Return a single batch from queue or ``None`` signaling epoch end.
:raise ChildProcessError: if the enqueueing thread ended unexpectedly
"""
if self._enqueueing_thread is None:
raise ValueError('StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.'
.format(self._name, self._buffer_size))
if not self._enqueueing_thread.is_alive() and self._queue.empty():
self._start_thread()
while True:
try:
batch = self._queue.get(timeout=2)
self._queue.task_done()
break
except Empty:
if not self._enqueueing_thread.is_alive():
try:
# the enqueueing thread may just finished properly so lets check the queue eagerly
batch = self._queue.get_nowait()
self._queue.task_done()
break
except Empty:
# so we failed to retrieve a batch and the enqueueing thread is dead
# there is no hope, something must went wrong
raise ChildProcessError('Enqueueing thread ended unexpectedly.')
return batch | [
"def",
"_dequeue_batch",
"(",
"self",
")",
"->",
"Optional",
"[",
"Batch",
"]",
":",
"if",
"self",
".",
"_enqueueing_thread",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'StreamWrapper `{}` with buffer of size `{}` was used outside with-resource environment.'",
".",
... | Return a single batch from queue or ``None`` signaling epoch end.
:raise ChildProcessError: if the enqueueing thread ended unexpectedly | [
"Return",
"a",
"single",
"batch",
"from",
"queue",
"or",
"None",
"signaling",
"epoch",
"end",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/stream_wrapper.py#L140-L167 | train | 36,523 |
Cognexa/cxflow | cxflow/datasets/stream_wrapper.py | StreamWrapper._next_batch | def _next_batch(self) -> Optional[Batch]:
"""
Return a single batch or ``None`` signaling epoch end.
.. note::
Signal the epoch end with ``None``.
Stop when:
- stream ends and epoch size is not set
- specified number of batches is returned
:return: a single batch or ``None`` signaling epoch end
"""
if self._epoch_limit_reached():
self._batch_count = 0
return None
try:
batch = next(self._get_stream())
self._batch_count += 1
return batch
except StopIteration:
self._stream = None # yield a new iterator next time
if self._epoch_size > 0: # underlying stream ended but our fixed size epoch did not
batch = next(self._get_stream()) # get another stream and return its 1st batch
self._batch_count += 1
return batch
else:
self._batch_count = 0
return None | python | def _next_batch(self) -> Optional[Batch]:
"""
Return a single batch or ``None`` signaling epoch end.
.. note::
Signal the epoch end with ``None``.
Stop when:
- stream ends and epoch size is not set
- specified number of batches is returned
:return: a single batch or ``None`` signaling epoch end
"""
if self._epoch_limit_reached():
self._batch_count = 0
return None
try:
batch = next(self._get_stream())
self._batch_count += 1
return batch
except StopIteration:
self._stream = None # yield a new iterator next time
if self._epoch_size > 0: # underlying stream ended but our fixed size epoch did not
batch = next(self._get_stream()) # get another stream and return its 1st batch
self._batch_count += 1
return batch
else:
self._batch_count = 0
return None | [
"def",
"_next_batch",
"(",
"self",
")",
"->",
"Optional",
"[",
"Batch",
"]",
":",
"if",
"self",
".",
"_epoch_limit_reached",
"(",
")",
":",
"self",
".",
"_batch_count",
"=",
"0",
"return",
"None",
"try",
":",
"batch",
"=",
"next",
"(",
"self",
".",
"... | Return a single batch or ``None`` signaling epoch end.
.. note::
Signal the epoch end with ``None``.
Stop when:
- stream ends and epoch size is not set
- specified number of batches is returned
:return: a single batch or ``None`` signaling epoch end | [
"Return",
"a",
"single",
"batch",
"or",
"None",
"signaling",
"epoch",
"end",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/stream_wrapper.py#L169-L197 | train | 36,524 |
Cognexa/cxflow | cxflow/datasets/stream_wrapper.py | StreamWrapper._start_thread | def _start_thread(self):
"""Start an enqueueing thread."""
self._stopping_event = Event()
self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,))
self._enqueueing_thread.start() | python | def _start_thread(self):
"""Start an enqueueing thread."""
self._stopping_event = Event()
self._enqueueing_thread = Thread(target=self._enqueue_batches, args=(self._stopping_event,))
self._enqueueing_thread.start() | [
"def",
"_start_thread",
"(",
"self",
")",
":",
"self",
".",
"_stopping_event",
"=",
"Event",
"(",
")",
"self",
".",
"_enqueueing_thread",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"_enqueue_batches",
",",
"args",
"=",
"(",
"self",
".",
"_stopping_eve... | Start an enqueueing thread. | [
"Start",
"an",
"enqueueing",
"thread",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/stream_wrapper.py#L199-L203 | train | 36,525 |
Cognexa/cxflow | cxflow/datasets/stream_wrapper.py | StreamWrapper._stop_thread | def _stop_thread(self):
"""Stop the enqueueing thread. Keep the queue content and stream state."""
self._stopping_event.set()
queue_content = []
try: # give the enqueueing thread chance to put a batch to the queue and check the stopping event
while True:
queue_content.append(self._queue.get_nowait())
except Empty:
pass
self._enqueueing_thread.join()
try:
queue_content.append(self._queue.get_nowait()) # collect the very last item
except Empty:
pass
self._queue = Queue(max(len(queue_content), self._buffer_size)) # queue content may be bigger than queue size
for batch in queue_content:
self._queue.put(batch) | python | def _stop_thread(self):
"""Stop the enqueueing thread. Keep the queue content and stream state."""
self._stopping_event.set()
queue_content = []
try: # give the enqueueing thread chance to put a batch to the queue and check the stopping event
while True:
queue_content.append(self._queue.get_nowait())
except Empty:
pass
self._enqueueing_thread.join()
try:
queue_content.append(self._queue.get_nowait()) # collect the very last item
except Empty:
pass
self._queue = Queue(max(len(queue_content), self._buffer_size)) # queue content may be bigger than queue size
for batch in queue_content:
self._queue.put(batch) | [
"def",
"_stop_thread",
"(",
"self",
")",
":",
"self",
".",
"_stopping_event",
".",
"set",
"(",
")",
"queue_content",
"=",
"[",
"]",
"try",
":",
"# give the enqueueing thread chance to put a batch to the queue and check the stopping event",
"while",
"True",
":",
"queue_c... | Stop the enqueueing thread. Keep the queue content and stream state. | [
"Stop",
"the",
"enqueueing",
"thread",
".",
"Keep",
"the",
"queue",
"content",
"and",
"stream",
"state",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/stream_wrapper.py#L205-L221 | train | 36,526 |
Cognexa/cxflow | cxflow/hooks/save.py | SaveEvery._after_n_epoch | def _after_n_epoch(self, epoch_id: int, **_) -> None:
"""
Save the model every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch
"""
SaveEvery.save_model(model=self._model, name_suffix=str(epoch_id), on_failure=self._on_save_failure) | python | def _after_n_epoch(self, epoch_id: int, **_) -> None:
"""
Save the model every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch
"""
SaveEvery.save_model(model=self._model, name_suffix=str(epoch_id), on_failure=self._on_save_failure) | [
"def",
"_after_n_epoch",
"(",
"self",
",",
"epoch_id",
":",
"int",
",",
"*",
"*",
"_",
")",
"->",
"None",
":",
"SaveEvery",
".",
"save_model",
"(",
"model",
"=",
"self",
".",
"_model",
",",
"name_suffix",
"=",
"str",
"(",
"epoch_id",
")",
",",
"on_fa... | Save the model every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch | [
"Save",
"the",
"model",
"every",
"n_epochs",
"epoch",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/save.py#L47-L53 | train | 36,527 |
Cognexa/cxflow | cxflow/hooks/save.py | SaveEvery.save_model | def save_model(model: AbstractModel, name_suffix: str, on_failure: str) -> None:
"""
Save the given model with the given name_suffix. On failure, take the specified action.
:param model: the model to be saved
:param name_suffix: name to be used for saving
:param on_failure: action to be taken on failure; one of :py:attr:`SAVE_FAILURE_ACTIONS`
:raise IOError: on save failure with ``on_failure`` set to ``error``
"""
try:
logging.debug('Saving the model')
save_path = model.save(name_suffix)
logging.info('Model saved to: %s', save_path)
except Exception as ex: # pylint: disable=broad-except
if on_failure == 'error':
raise IOError('Failed to save the model.') from ex
elif on_failure == 'warn':
logging.warning('Failed to save the model.') | python | def save_model(model: AbstractModel, name_suffix: str, on_failure: str) -> None:
"""
Save the given model with the given name_suffix. On failure, take the specified action.
:param model: the model to be saved
:param name_suffix: name to be used for saving
:param on_failure: action to be taken on failure; one of :py:attr:`SAVE_FAILURE_ACTIONS`
:raise IOError: on save failure with ``on_failure`` set to ``error``
"""
try:
logging.debug('Saving the model')
save_path = model.save(name_suffix)
logging.info('Model saved to: %s', save_path)
except Exception as ex: # pylint: disable=broad-except
if on_failure == 'error':
raise IOError('Failed to save the model.') from ex
elif on_failure == 'warn':
logging.warning('Failed to save the model.') | [
"def",
"save_model",
"(",
"model",
":",
"AbstractModel",
",",
"name_suffix",
":",
"str",
",",
"on_failure",
":",
"str",
")",
"->",
"None",
":",
"try",
":",
"logging",
".",
"debug",
"(",
"'Saving the model'",
")",
"save_path",
"=",
"model",
".",
"save",
"... | Save the given model with the given name_suffix. On failure, take the specified action.
:param model: the model to be saved
:param name_suffix: name to be used for saving
:param on_failure: action to be taken on failure; one of :py:attr:`SAVE_FAILURE_ACTIONS`
:raise IOError: on save failure with ``on_failure`` set to ``error`` | [
"Save",
"the",
"given",
"model",
"with",
"the",
"given",
"name_suffix",
".",
"On",
"failure",
"take",
"the",
"specified",
"action",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/save.py#L56-L73 | train | 36,528 |
Cognexa/cxflow | cxflow/hooks/save.py | SaveBest._get_value | def _get_value(self, epoch_data: EpochData) -> float:
"""
Retrieve the value of the monitored variable from the given epoch data.
:param epoch_data: epoch data which determine whether the model will be saved or not
:raise KeyError: if any of the specified stream, variable or aggregation is not present in the ``epoch_data``
:raise TypeError: if the variable value is not a dict when aggregation is specified
:raise ValueError: if the variable value is not a scalar
"""
if self._stream_name not in epoch_data:
raise KeyError('Stream `{}` was not found in the epoch data.\nAvailable streams are `{}`.'
.format(self._stream_name, epoch_data.keys()))
stream_data = epoch_data[self._stream_name]
if self._variable not in stream_data:
raise KeyError('Variable `{}` for stream `{}` was not found in the epoch data. '
'Available variables for stream `{}` are `{}`.'
.format(self._variable, self._stream_name, self._stream_name, stream_data.keys()))
value = stream_data[self._variable]
if self._aggregation:
if not isinstance(value, dict):
raise TypeError('Variable `{}` is expected to be a dict when aggregation is specified. '
'Got `{}` instead.'.format(self._variable, type(value).__name__))
if self._aggregation not in value:
raise KeyError('Specified aggregation `{}` was not found in the variable `{}`. '
'Available aggregations: `{}`.'.format(self._aggregation, self._variable, value.keys()))
value = value[self._aggregation]
if not np.isscalar(value):
raise ValueError('Variable `{}` value is not a scalar.'.format(value))
return value | python | def _get_value(self, epoch_data: EpochData) -> float:
"""
Retrieve the value of the monitored variable from the given epoch data.
:param epoch_data: epoch data which determine whether the model will be saved or not
:raise KeyError: if any of the specified stream, variable or aggregation is not present in the ``epoch_data``
:raise TypeError: if the variable value is not a dict when aggregation is specified
:raise ValueError: if the variable value is not a scalar
"""
if self._stream_name not in epoch_data:
raise KeyError('Stream `{}` was not found in the epoch data.\nAvailable streams are `{}`.'
.format(self._stream_name, epoch_data.keys()))
stream_data = epoch_data[self._stream_name]
if self._variable not in stream_data:
raise KeyError('Variable `{}` for stream `{}` was not found in the epoch data. '
'Available variables for stream `{}` are `{}`.'
.format(self._variable, self._stream_name, self._stream_name, stream_data.keys()))
value = stream_data[self._variable]
if self._aggregation:
if not isinstance(value, dict):
raise TypeError('Variable `{}` is expected to be a dict when aggregation is specified. '
'Got `{}` instead.'.format(self._variable, type(value).__name__))
if self._aggregation not in value:
raise KeyError('Specified aggregation `{}` was not found in the variable `{}`. '
'Available aggregations: `{}`.'.format(self._aggregation, self._variable, value.keys()))
value = value[self._aggregation]
if not np.isscalar(value):
raise ValueError('Variable `{}` value is not a scalar.'.format(value))
return value | [
"def",
"_get_value",
"(",
"self",
",",
"epoch_data",
":",
"EpochData",
")",
"->",
"float",
":",
"if",
"self",
".",
"_stream_name",
"not",
"in",
"epoch_data",
":",
"raise",
"KeyError",
"(",
"'Stream `{}` was not found in the epoch data.\\nAvailable streams are `{}`.'",
... | Retrieve the value of the monitored variable from the given epoch data.
:param epoch_data: epoch data which determine whether the model will be saved or not
:raise KeyError: if any of the specified stream, variable or aggregation is not present in the ``epoch_data``
:raise TypeError: if the variable value is not a dict when aggregation is specified
:raise ValueError: if the variable value is not a scalar | [
"Retrieve",
"the",
"value",
"of",
"the",
"monitored",
"variable",
"from",
"the",
"given",
"epoch",
"data",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/save.py#L129-L160 | train | 36,529 |
Cognexa/cxflow | cxflow/hooks/save.py | SaveBest._is_value_better | def _is_value_better(self, new_value: float) -> bool:
"""
Test if the new value is better than the best so far.
:param new_value: current value of the objective function
"""
if self._best_value is None:
return True
if self._condition == 'min':
return new_value < self._best_value
if self._condition == 'max':
return new_value > self._best_value | python | def _is_value_better(self, new_value: float) -> bool:
"""
Test if the new value is better than the best so far.
:param new_value: current value of the objective function
"""
if self._best_value is None:
return True
if self._condition == 'min':
return new_value < self._best_value
if self._condition == 'max':
return new_value > self._best_value | [
"def",
"_is_value_better",
"(",
"self",
",",
"new_value",
":",
"float",
")",
"->",
"bool",
":",
"if",
"self",
".",
"_best_value",
"is",
"None",
":",
"return",
"True",
"if",
"self",
".",
"_condition",
"==",
"'min'",
":",
"return",
"new_value",
"<",
"self"... | Test if the new value is better than the best so far.
:param new_value: current value of the objective function | [
"Test",
"if",
"the",
"new",
"value",
"is",
"better",
"than",
"the",
"best",
"so",
"far",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/save.py#L162-L173 | train | 36,530 |
Cognexa/cxflow | cxflow/hooks/save.py | SaveBest.after_epoch | def after_epoch(self, epoch_data: EpochData, **_) -> None:
"""
Save the model if the new value of the monitored variable is better than the best value so far.
:param epoch_data: epoch data to be processed
"""
new_value = self._get_value(epoch_data)
if self._is_value_better(new_value):
self._best_value = new_value
SaveEvery.save_model(model=self._model, name_suffix=self._OUTPUT_NAME, on_failure=self._on_save_failure) | python | def after_epoch(self, epoch_data: EpochData, **_) -> None:
"""
Save the model if the new value of the monitored variable is better than the best value so far.
:param epoch_data: epoch data to be processed
"""
new_value = self._get_value(epoch_data)
if self._is_value_better(new_value):
self._best_value = new_value
SaveEvery.save_model(model=self._model, name_suffix=self._OUTPUT_NAME, on_failure=self._on_save_failure) | [
"def",
"after_epoch",
"(",
"self",
",",
"epoch_data",
":",
"EpochData",
",",
"*",
"*",
"_",
")",
"->",
"None",
":",
"new_value",
"=",
"self",
".",
"_get_value",
"(",
"epoch_data",
")",
"if",
"self",
".",
"_is_value_better",
"(",
"new_value",
")",
":",
... | Save the model if the new value of the monitored variable is better than the best value so far.
:param epoch_data: epoch data to be processed | [
"Save",
"the",
"model",
"if",
"the",
"new",
"value",
"of",
"the",
"monitored",
"variable",
"is",
"better",
"than",
"the",
"best",
"value",
"so",
"far",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/save.py#L175-L185 | train | 36,531 |
Cognexa/cxflow | cxflow/hooks/show_progress.py | print_progress_bar | def print_progress_bar(done: int, total: int, prefix: str = '', suffix: str = '') -> None:
"""
Print a progressbar with the given prefix and suffix, without newline at the end.
param done: current step in computation
param total: total count of steps in computation
param prefix: info text displayed before the progress bar
param suffix: info text displayed after the progress bar
"""
percent = '{0:.1f}'.format(100 * (done / float(total)))
base_len = shutil.get_terminal_size().columns - 7 - len(prefix) - len(suffix)
base_len = min([base_len, 50])
min_length = base_len - 1 - len('{}/{}={}'.format(total, total, '100.0'))
length = base_len - len('{}/{}={}'.format(done, total, percent))
if min_length > 0:
filled_len = int(min_length * done // total)
bar = '='*filled_len + '-'*(min_length - filled_len)
spacing = ' '*(length - min_length)
print('\r{}: |{}|{}{}/{}={}% {}'.format(prefix, bar, spacing, done, total, percent, suffix), end='\r')
else:
short_progress = '\r{}: {}/{}'.format(prefix, done, total)
if len(short_progress) <= shutil.get_terminal_size().columns:
print(short_progress, end='\r')
else:
print(['-', '\\', '|', '/'][done % 4], end='\r') | python | def print_progress_bar(done: int, total: int, prefix: str = '', suffix: str = '') -> None:
"""
Print a progressbar with the given prefix and suffix, without newline at the end.
param done: current step in computation
param total: total count of steps in computation
param prefix: info text displayed before the progress bar
param suffix: info text displayed after the progress bar
"""
percent = '{0:.1f}'.format(100 * (done / float(total)))
base_len = shutil.get_terminal_size().columns - 7 - len(prefix) - len(suffix)
base_len = min([base_len, 50])
min_length = base_len - 1 - len('{}/{}={}'.format(total, total, '100.0'))
length = base_len - len('{}/{}={}'.format(done, total, percent))
if min_length > 0:
filled_len = int(min_length * done // total)
bar = '='*filled_len + '-'*(min_length - filled_len)
spacing = ' '*(length - min_length)
print('\r{}: |{}|{}{}/{}={}% {}'.format(prefix, bar, spacing, done, total, percent, suffix), end='\r')
else:
short_progress = '\r{}: {}/{}'.format(prefix, done, total)
if len(short_progress) <= shutil.get_terminal_size().columns:
print(short_progress, end='\r')
else:
print(['-', '\\', '|', '/'][done % 4], end='\r') | [
"def",
"print_progress_bar",
"(",
"done",
":",
"int",
",",
"total",
":",
"int",
",",
"prefix",
":",
"str",
"=",
"''",
",",
"suffix",
":",
"str",
"=",
"''",
")",
"->",
"None",
":",
"percent",
"=",
"'{0:.1f}'",
".",
"format",
"(",
"100",
"*",
"(",
... | Print a progressbar with the given prefix and suffix, without newline at the end.
param done: current step in computation
param total: total count of steps in computation
param prefix: info text displayed before the progress bar
param suffix: info text displayed after the progress bar | [
"Print",
"a",
"progressbar",
"with",
"the",
"given",
"prefix",
"and",
"suffix",
"without",
"newline",
"at",
"the",
"end",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/show_progress.py#L14-L39 | train | 36,532 |
Cognexa/cxflow | cxflow/hooks/show_progress.py | ShowProgress.after_epoch | def after_epoch(self, **_) -> None:
"""
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
"""
if not self._total_batch_count_saved:
self._total_batch_count = self._current_batch_count.copy()
self._total_batch_count_saved = True
self._current_batch_count.clear()
self._current_stream_start = None
self._current_stream_name = None
erase_line() | python | def after_epoch(self, **_) -> None:
"""
Reset progress counters. Save ``total_batch_count`` after the 1st epoch.
"""
if not self._total_batch_count_saved:
self._total_batch_count = self._current_batch_count.copy()
self._total_batch_count_saved = True
self._current_batch_count.clear()
self._current_stream_start = None
self._current_stream_name = None
erase_line() | [
"def",
"after_epoch",
"(",
"self",
",",
"*",
"*",
"_",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"_total_batch_count_saved",
":",
"self",
".",
"_total_batch_count",
"=",
"self",
".",
"_current_batch_count",
".",
"copy",
"(",
")",
"self",
".",
"_t... | Reset progress counters. Save ``total_batch_count`` after the 1st epoch. | [
"Reset",
"progress",
"counters",
".",
"Save",
"total_batch_count",
"after",
"the",
"1st",
"epoch",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/show_progress.py#L134-L144 | train | 36,533 |
Cognexa/cxflow | cxflow/hooks/log_profile.py | LogProfile.after_epoch_profile | def after_epoch_profile(self, epoch_id, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
Summarize and log the given epoch profile.
The profile is expected to contain at least:
- ``read_data_train``, ``eval_batch_train`` and ``after_batch_hooks_train`` entries produced by the train
stream (if train stream name is `train`)
- ``after_epoch_hooks`` entry
:param profile: epoch timings profile
:param extra_streams: enumeration of additional stream names
"""
read_data_total = 0
eval_total = 0
train_total = sum(profile.get('eval_batch_{}'.format(train_stream_name), []))
hooks_total = sum(profile.get('after_epoch_hooks', []))
for stream_name in chain(extra_streams, [train_stream_name]):
read_data_total += sum(profile.get('read_batch_' + stream_name, []))
hooks_total += sum(profile.get('after_batch_hooks_' + stream_name, []))
if stream_name != train_stream_name:
eval_total += sum(profile.get('eval_batch_' + stream_name, []))
logging.info('\tT read data:\t%f', read_data_total)
logging.info('\tT train:\t%f', train_total)
logging.info('\tT eval:\t%f', eval_total)
logging.info('\tT hooks:\t%f', hooks_total) | python | def after_epoch_profile(self, epoch_id, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
Summarize and log the given epoch profile.
The profile is expected to contain at least:
- ``read_data_train``, ``eval_batch_train`` and ``after_batch_hooks_train`` entries produced by the train
stream (if train stream name is `train`)
- ``after_epoch_hooks`` entry
:param profile: epoch timings profile
:param extra_streams: enumeration of additional stream names
"""
read_data_total = 0
eval_total = 0
train_total = sum(profile.get('eval_batch_{}'.format(train_stream_name), []))
hooks_total = sum(profile.get('after_epoch_hooks', []))
for stream_name in chain(extra_streams, [train_stream_name]):
read_data_total += sum(profile.get('read_batch_' + stream_name, []))
hooks_total += sum(profile.get('after_batch_hooks_' + stream_name, []))
if stream_name != train_stream_name:
eval_total += sum(profile.get('eval_batch_' + stream_name, []))
logging.info('\tT read data:\t%f', read_data_total)
logging.info('\tT train:\t%f', train_total)
logging.info('\tT eval:\t%f', eval_total)
logging.info('\tT hooks:\t%f', hooks_total) | [
"def",
"after_epoch_profile",
"(",
"self",
",",
"epoch_id",
",",
"profile",
":",
"TimeProfile",
",",
"train_stream_name",
":",
"str",
",",
"extra_streams",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"None",
":",
"read_data_total",
"=",
"0",
"eval_total",
"... | Summarize and log the given epoch profile.
The profile is expected to contain at least:
- ``read_data_train``, ``eval_batch_train`` and ``after_batch_hooks_train`` entries produced by the train
stream (if train stream name is `train`)
- ``after_epoch_hooks`` entry
:param profile: epoch timings profile
:param extra_streams: enumeration of additional stream names | [
"Summarize",
"and",
"log",
"the",
"given",
"epoch",
"profile",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/log_profile.py#L27-L54 | train | 36,534 |
Cognexa/cxflow | cxflow/utils/confusion_matrix.py | confusion_matrix | def confusion_matrix(expected: np.ndarray, predicted: np.ndarray, num_classes: int) -> np.ndarray:
"""
Calculate and return confusion matrix for the predicted and expected labels
:param expected: array of expected classes (integers) with shape `[num_of_data]`
:param predicted: array of predicted classes (integers) with shape `[num_of_data]`
:param num_classes: number of classification classes
:return: confusion matrix (cm) with absolute values
"""
assert np.issubclass_(expected.dtype.type, np.integer), " Classes' indices must be integers"
assert np.issubclass_(predicted.dtype.type, np.integer), " Classes' indices must be integers"
assert expected.shape == predicted.shape, "Predicted and expected data must be the same length"
assert num_classes > np.max([predicted, expected]), \
"Number of classes must be at least the number of indices in predicted/expected data"
assert np.min([predicted, expected]) >= 0, " Classes' indices must be positive integers"
cm_abs = np.zeros((num_classes, num_classes), dtype=np.int32)
for pred, exp in zip(predicted, expected):
cm_abs[exp, pred] += 1
return cm_abs | python | def confusion_matrix(expected: np.ndarray, predicted: np.ndarray, num_classes: int) -> np.ndarray:
"""
Calculate and return confusion matrix for the predicted and expected labels
:param expected: array of expected classes (integers) with shape `[num_of_data]`
:param predicted: array of predicted classes (integers) with shape `[num_of_data]`
:param num_classes: number of classification classes
:return: confusion matrix (cm) with absolute values
"""
assert np.issubclass_(expected.dtype.type, np.integer), " Classes' indices must be integers"
assert np.issubclass_(predicted.dtype.type, np.integer), " Classes' indices must be integers"
assert expected.shape == predicted.shape, "Predicted and expected data must be the same length"
assert num_classes > np.max([predicted, expected]), \
"Number of classes must be at least the number of indices in predicted/expected data"
assert np.min([predicted, expected]) >= 0, " Classes' indices must be positive integers"
cm_abs = np.zeros((num_classes, num_classes), dtype=np.int32)
for pred, exp in zip(predicted, expected):
cm_abs[exp, pred] += 1
return cm_abs | [
"def",
"confusion_matrix",
"(",
"expected",
":",
"np",
".",
"ndarray",
",",
"predicted",
":",
"np",
".",
"ndarray",
",",
"num_classes",
":",
"int",
")",
"->",
"np",
".",
"ndarray",
":",
"assert",
"np",
".",
"issubclass_",
"(",
"expected",
".",
"dtype",
... | Calculate and return confusion matrix for the predicted and expected labels
:param expected: array of expected classes (integers) with shape `[num_of_data]`
:param predicted: array of predicted classes (integers) with shape `[num_of_data]`
:param num_classes: number of classification classes
:return: confusion matrix (cm) with absolute values | [
"Calculate",
"and",
"return",
"confusion",
"matrix",
"for",
"the",
"predicted",
"and",
"expected",
"labels"
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/confusion_matrix.py#L4-L22 | train | 36,535 |
Cognexa/cxflow | cxflow/cli/grid_search.py | _build_grid_search_commands | def _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]:
"""
Build all grid search parameter configurations.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
"""
param_space = OrderedDict()
for arg in params:
assert '=' in arg
name = arg[:arg.index('=')]
options = arg[arg.index('=') + 1:]
options = ast.literal_eval(options)
assert isinstance(options, list), options
param_space[name] = options
param_names = param_space.keys()
commands = []
for values in itertools.product(*[param_space[name] for name in param_names]):
command = str(script).split()
for name, value in zip(param_names, values):
command.append(str(name) + '="' + str(value) + '"')
commands.append(command)
return commands | python | def _build_grid_search_commands(script: str, params: typing.Iterable[str]) -> typing.Iterable[typing.List[str]]:
"""
Build all grid search parameter configurations.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
"""
param_space = OrderedDict()
for arg in params:
assert '=' in arg
name = arg[:arg.index('=')]
options = arg[arg.index('=') + 1:]
options = ast.literal_eval(options)
assert isinstance(options, list), options
param_space[name] = options
param_names = param_space.keys()
commands = []
for values in itertools.product(*[param_space[name] for name in param_names]):
command = str(script).split()
for name, value in zip(param_names, values):
command.append(str(name) + '="' + str(value) + '"')
commands.append(command)
return commands | [
"def",
"_build_grid_search_commands",
"(",
"script",
":",
"str",
",",
"params",
":",
"typing",
".",
"Iterable",
"[",
"str",
"]",
")",
"->",
"typing",
".",
"Iterable",
"[",
"typing",
".",
"List",
"[",
"str",
"]",
"]",
":",
"param_space",
"=",
"OrderedDict... | Build all grid search parameter configurations.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``. | [
"Build",
"all",
"grid",
"search",
"parameter",
"configurations",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/grid_search.py#L13-L41 | train | 36,536 |
Cognexa/cxflow | cxflow/cli/grid_search.py | grid_search | def grid_search(script: str, params: typing.Iterable[str], dry_run: bool=False) -> None:
"""
Build all grid search parameter configurations and optionally run them.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
:param dry_run: If set to ``True``, the built commands will only be printed instead of executed.
"""
commands = _build_grid_search_commands(script=script, params=params)
if dry_run:
logging.warning('Dry run')
for command in commands:
logging.info(command)
else:
for command in commands:
try:
completed_process = subprocess.run(command)
logging.info('Command `%s` completed with exit code %d', command, completed_process.returncode)
except Exception as _: # pylint: disable=broad-except
logging.error('Command `%s` failed.', command) | python | def grid_search(script: str, params: typing.Iterable[str], dry_run: bool=False) -> None:
"""
Build all grid search parameter configurations and optionally run them.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
:param dry_run: If set to ``True``, the built commands will only be printed instead of executed.
"""
commands = _build_grid_search_commands(script=script, params=params)
if dry_run:
logging.warning('Dry run')
for command in commands:
logging.info(command)
else:
for command in commands:
try:
completed_process = subprocess.run(command)
logging.info('Command `%s` completed with exit code %d', command, completed_process.returncode)
except Exception as _: # pylint: disable=broad-except
logging.error('Command `%s` failed.', command) | [
"def",
"grid_search",
"(",
"script",
":",
"str",
",",
"params",
":",
"typing",
".",
"Iterable",
"[",
"str",
"]",
",",
"dry_run",
":",
"bool",
"=",
"False",
")",
"->",
"None",
":",
"commands",
"=",
"_build_grid_search_commands",
"(",
"script",
"=",
"scrip... | Build all grid search parameter configurations and optionally run them.
:param script: String of command prefix, e.g. ``cxflow train -v -o log``.
:param params: Iterable collection of strings in standard **cxflow** param form, e.g. ``'numerical_param=[1, 2]'``
or ``'text_param=["hello", "cio"]'``.
:param dry_run: If set to ``True``, the built commands will only be printed instead of executed. | [
"Build",
"all",
"grid",
"search",
"parameter",
"configurations",
"and",
"optionally",
"run",
"them",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/grid_search.py#L44-L66 | train | 36,537 |
Cognexa/cxflow | cxflow/datasets/base_dataset.py | BaseDataset.stream_info | def stream_info(self) -> None:
"""Check and report source names, dtypes and shapes of all the streams available."""
stream_names = [stream_name for stream_name in dir(self)
if 'stream' in stream_name and stream_name != 'stream_info']
logging.info('Found %s stream candidates: %s', len(stream_names), stream_names)
for stream_name in stream_names:
try:
stream_fn = getattr(self, stream_name)
logging.info(stream_name)
batch = next(iter(stream_fn()))
rows = []
for key, value in batch.items():
try:
value_arr = np.array(value)
row = [key, value_arr.dtype, value_arr.shape]
if value_arr.dtype.kind in 'bui': # boolean, unsigned, integer
row.append('{} - {}'.format(value_arr.min(), value_arr.max()))
elif value_arr.dtype.kind is 'f':
row.append('{0:.2f} - {1:.2f}'.format(value_arr.min(), value_arr.max()))
except ValueError: # np broadcasting failed (ragged array)
value_arr = None
row = [key, '{}'.format(type(value[0]).__name__), '({},)'.format(len(list(value)))]
if value_arr is None or \
(value_arr.ndim > 0 and value_arr.shape[1:] != np.array(value_arr[0]).shape):
logging.warning('*** stream source `%s` appears to be ragged (non-rectangular) ***', key)
rows.append(row)
for line in tabulate.tabulate(rows, headers=['name', 'dtype', 'shape', 'range'],
tablefmt='grid').split('\n'):
logging.info(line)
except Exception:
logging.warning('Exception was raised during checking stream `%s`, '
'(stack trace is displayed only with --verbose flag)', stream_name)
logging.debug(traceback.format_exc()) | python | def stream_info(self) -> None:
"""Check and report source names, dtypes and shapes of all the streams available."""
stream_names = [stream_name for stream_name in dir(self)
if 'stream' in stream_name and stream_name != 'stream_info']
logging.info('Found %s stream candidates: %s', len(stream_names), stream_names)
for stream_name in stream_names:
try:
stream_fn = getattr(self, stream_name)
logging.info(stream_name)
batch = next(iter(stream_fn()))
rows = []
for key, value in batch.items():
try:
value_arr = np.array(value)
row = [key, value_arr.dtype, value_arr.shape]
if value_arr.dtype.kind in 'bui': # boolean, unsigned, integer
row.append('{} - {}'.format(value_arr.min(), value_arr.max()))
elif value_arr.dtype.kind is 'f':
row.append('{0:.2f} - {1:.2f}'.format(value_arr.min(), value_arr.max()))
except ValueError: # np broadcasting failed (ragged array)
value_arr = None
row = [key, '{}'.format(type(value[0]).__name__), '({},)'.format(len(list(value)))]
if value_arr is None or \
(value_arr.ndim > 0 and value_arr.shape[1:] != np.array(value_arr[0]).shape):
logging.warning('*** stream source `%s` appears to be ragged (non-rectangular) ***', key)
rows.append(row)
for line in tabulate.tabulate(rows, headers=['name', 'dtype', 'shape', 'range'],
tablefmt='grid').split('\n'):
logging.info(line)
except Exception:
logging.warning('Exception was raised during checking stream `%s`, '
'(stack trace is displayed only with --verbose flag)', stream_name)
logging.debug(traceback.format_exc()) | [
"def",
"stream_info",
"(",
"self",
")",
"->",
"None",
":",
"stream_names",
"=",
"[",
"stream_name",
"for",
"stream_name",
"in",
"dir",
"(",
"self",
")",
"if",
"'stream'",
"in",
"stream_name",
"and",
"stream_name",
"!=",
"'stream_info'",
"]",
"logging",
".",
... | Check and report source names, dtypes and shapes of all the streams available. | [
"Check",
"and",
"report",
"source",
"names",
"dtypes",
"and",
"shapes",
"of",
"all",
"the",
"streams",
"available",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/base_dataset.py#L52-L86 | train | 36,538 |
Cognexa/cxflow | cxflow/cli/util.py | find_config | def find_config(config_path: str) -> str:
"""
Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path
"""
if path.isdir(config_path): # dir specified instead of config file
config_path = path.join(config_path, CXF_CONFIG_FILE)
assert path.exists(config_path), '`{}` does not exist'.format(config_path)
return config_path | python | def find_config(config_path: str) -> str:
"""
Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path
"""
if path.isdir(config_path): # dir specified instead of config file
config_path = path.join(config_path, CXF_CONFIG_FILE)
assert path.exists(config_path), '`{}` does not exist'.format(config_path)
return config_path | [
"def",
"find_config",
"(",
"config_path",
":",
"str",
")",
"->",
"str",
":",
"if",
"path",
".",
"isdir",
"(",
"config_path",
")",
":",
"# dir specified instead of config file",
"config_path",
"=",
"path",
".",
"join",
"(",
"config_path",
",",
"CXF_CONFIG_FILE",
... | Derive configuration file path from the given path and check its existence.
The given path is expected to be either
1. path to the file
2. path to a dir, in such case the path is joined with ``CXF_CONFIG_FILE``
:param config_path: path to the configuration file or its parent directory
:return: validated configuration file path | [
"Derive",
"configuration",
"file",
"path",
"from",
"the",
"given",
"path",
"and",
"check",
"its",
"existence",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/util.py#L8-L23 | train | 36,539 |
Cognexa/cxflow | cxflow/cli/util.py | fallback | def fallback(message: str, ex: Exception) -> None:
"""
Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure
"""
logging.error('%s', message)
logging.exception('%s', ex)
sys.exit(1) | python | def fallback(message: str, ex: Exception) -> None:
"""
Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure
"""
logging.error('%s', message)
logging.exception('%s', ex)
sys.exit(1) | [
"def",
"fallback",
"(",
"message",
":",
"str",
",",
"ex",
":",
"Exception",
")",
"->",
"None",
":",
"logging",
".",
"error",
"(",
"'%s'",
",",
"message",
")",
"logging",
".",
"exception",
"(",
"'%s'",
",",
"ex",
")",
"sys",
".",
"exit",
"(",
"1",
... | Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure | [
"Fallback",
"procedure",
"when",
"a",
"cli",
"command",
"fails",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/util.py#L40-L49 | train | 36,540 |
Cognexa/cxflow | cxflow/datasets/downloadable_dataset.py | DownloadableDataset._configure_dataset | def _configure_dataset(self, data_root: str=None, download_urls: Iterable[str]=None, **kwargs) -> None:
"""
Save the passed values and use them as a default property implementation.
:param data_root: directory to which the files will be downloaded
:param download_urls: list of URLs to be downloaded
"""
self._data_root = data_root
self._download_urls = download_urls | python | def _configure_dataset(self, data_root: str=None, download_urls: Iterable[str]=None, **kwargs) -> None:
"""
Save the passed values and use them as a default property implementation.
:param data_root: directory to which the files will be downloaded
:param download_urls: list of URLs to be downloaded
"""
self._data_root = data_root
self._download_urls = download_urls | [
"def",
"_configure_dataset",
"(",
"self",
",",
"data_root",
":",
"str",
"=",
"None",
",",
"download_urls",
":",
"Iterable",
"[",
"str",
"]",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"self",
".",
"_data_root",
"=",
"data_root",
"sel... | Save the passed values and use them as a default property implementation.
:param data_root: directory to which the files will be downloaded
:param download_urls: list of URLs to be downloaded | [
"Save",
"the",
"passed",
"values",
"and",
"use",
"them",
"as",
"a",
"default",
"property",
"implementation",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/datasets/downloadable_dataset.py#L23-L31 | train | 36,541 |
Cognexa/cxflow | cxflow/cli/resume.py | resume | def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from) | python | def resume(config_path: str, restore_from: Optional[str], cl_arguments: Iterable[str], output_root: str) -> None:
"""
Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = None
try:
config_path = find_config(config_path)
restore_from = restore_from or path.dirname(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
validate_config(config)
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
run(config=config, output_root=output_root, restore_from=restore_from) | [
"def",
"resume",
"(",
"config_path",
":",
"str",
",",
"restore_from",
":",
"Optional",
"[",
"str",
"]",
",",
"cl_arguments",
":",
"Iterable",
"[",
"str",
"]",
",",
"output_root",
":",
"str",
")",
"->",
"None",
":",
"config",
"=",
"None",
"try",
":",
... | Load config from the directory specified and start the training.
:param config_path: path to the config file or the directory in which it is stored
:param restore_from: backend-specific path to the already trained model to be restored from.
If ``None`` is passed, it is inferred from the configuration file location as the directory
it is located in.
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | [
"Load",
"config",
"from",
"the",
"directory",
"specified",
"and",
"start",
"the",
"training",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/resume.py#L11-L35 | train | 36,542 |
Cognexa/cxflow | cxflow/hooks/abstract_hook.py | AbstractHook.after_epoch_profile | def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass | python | def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass | [
"def",
"after_epoch_profile",
"(",
"self",
",",
"epoch_id",
":",
"int",
",",
"profile",
":",
"TimeProfile",
",",
"train_stream_name",
":",
"str",
",",
"extra_streams",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"None",
":",
"pass"
] | After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names | [
"After",
"epoch",
"profile",
"event",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/abstract_hook.py#L84-L94 | train | 36,543 |
Cognexa/cxflow | cxflow/utils/yaml.py | load_yaml | def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) | python | def load_yaml(yaml_file: str) -> Any:
"""
Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list
"""
with open(yaml_file, 'r') as file:
return ruamel.yaml.load(file, ruamel.yaml.RoundTripLoader) | [
"def",
"load_yaml",
"(",
"yaml_file",
":",
"str",
")",
"->",
"Any",
":",
"with",
"open",
"(",
"yaml_file",
",",
"'r'",
")",
"as",
"file",
":",
"return",
"ruamel",
".",
"yaml",
".",
"load",
"(",
"file",
",",
"ruamel",
".",
"yaml",
".",
"RoundTripLoade... | Load YAML from file.
:param yaml_file: path to YAML file
:return: content of the YAML as dict/list | [
"Load",
"YAML",
"from",
"file",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/yaml.py#L11-L19 | train | 36,544 |
Cognexa/cxflow | cxflow/utils/yaml.py | yaml_to_file | def yaml_to_file(data: Mapping, output_dir: str, name: str) -> str:
"""
Save the given object to the given path in YAML.
:param data: dict/list to be dumped
:param output_dir: target output directory
:param name: target filename
:return: target path
"""
dumped_config_f = path.join(output_dir, name)
with open(dumped_config_f, 'w') as file:
yaml.dump(data, file, Dumper=ruamel.yaml.RoundTripDumper)
return dumped_config_f | python | def yaml_to_file(data: Mapping, output_dir: str, name: str) -> str:
"""
Save the given object to the given path in YAML.
:param data: dict/list to be dumped
:param output_dir: target output directory
:param name: target filename
:return: target path
"""
dumped_config_f = path.join(output_dir, name)
with open(dumped_config_f, 'w') as file:
yaml.dump(data, file, Dumper=ruamel.yaml.RoundTripDumper)
return dumped_config_f | [
"def",
"yaml_to_file",
"(",
"data",
":",
"Mapping",
",",
"output_dir",
":",
"str",
",",
"name",
":",
"str",
")",
"->",
"str",
":",
"dumped_config_f",
"=",
"path",
".",
"join",
"(",
"output_dir",
",",
"name",
")",
"with",
"open",
"(",
"dumped_config_f",
... | Save the given object to the given path in YAML.
:param data: dict/list to be dumped
:param output_dir: target output directory
:param name: target filename
:return: target path | [
"Save",
"the",
"given",
"object",
"to",
"the",
"given",
"path",
"in",
"YAML",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/yaml.py#L22-L34 | train | 36,545 |
Cognexa/cxflow | cxflow/utils/yaml.py | yaml_to_str | def yaml_to_str(data: Mapping) -> str:
"""
Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str
"""
return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper) | python | def yaml_to_str(data: Mapping) -> str:
"""
Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str
"""
return yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper) | [
"def",
"yaml_to_str",
"(",
"data",
":",
"Mapping",
")",
"->",
"str",
":",
"return",
"yaml",
".",
"dump",
"(",
"data",
",",
"Dumper",
"=",
"ruamel",
".",
"yaml",
".",
"RoundTripDumper",
")"
] | Return the given given config as YAML str.
:param data: configuration dict
:return: given configuration as yaml str | [
"Return",
"the",
"given",
"given",
"config",
"as",
"YAML",
"str",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/yaml.py#L37-L44 | train | 36,546 |
Cognexa/cxflow | cxflow/utils/yaml.py | reload | def reload(data: Any) -> Any:
"""
Dump and load yaml data.
This is useful to avoid many anchor parsing bugs. When you edit a yaml config, reload it to make sure
the changes are propagated to anchor expansions.
:param data: data to be reloaded
:return: reloaded data
"""
return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.RoundTripLoader) | python | def reload(data: Any) -> Any:
"""
Dump and load yaml data.
This is useful to avoid many anchor parsing bugs. When you edit a yaml config, reload it to make sure
the changes are propagated to anchor expansions.
:param data: data to be reloaded
:return: reloaded data
"""
return yaml.load(yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper), Loader=ruamel.yaml.RoundTripLoader) | [
"def",
"reload",
"(",
"data",
":",
"Any",
")",
"->",
"Any",
":",
"return",
"yaml",
".",
"load",
"(",
"yaml",
".",
"dump",
"(",
"data",
",",
"Dumper",
"=",
"ruamel",
".",
"yaml",
".",
"RoundTripDumper",
")",
",",
"Loader",
"=",
"ruamel",
".",
"yaml"... | Dump and load yaml data.
This is useful to avoid many anchor parsing bugs. When you edit a yaml config, reload it to make sure
the changes are propagated to anchor expansions.
:param data: data to be reloaded
:return: reloaded data | [
"Dump",
"and",
"load",
"yaml",
"data",
".",
"This",
"is",
"useful",
"to",
"avoid",
"many",
"anchor",
"parsing",
"bugs",
".",
"When",
"you",
"edit",
"a",
"yaml",
"config",
"reload",
"it",
"to",
"make",
"sure",
"the",
"changes",
"are",
"propagated",
"to",
... | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/yaml.py#L58-L67 | train | 36,547 |
Cognexa/cxflow | cxflow/hooks/stop_on_nan.py | StopOnNaN._is_nan | def _is_nan(self, variable: str, data) -> bool:
"""
Recursively search passed data and find NaNs.
:param variable: name of variable to be checked
:param data: data object (dict, list, scalar)
:return: `True` if there is a NaN value in the data; `False` otherwise.
:raise ValueError: if the variable value is of unsupported type and ``on_unknown_type`` is set to ``error``
"""
if isinstance(data, np.ndarray) or isinstance(data, list):
return any(np.isnan(data)) or (self._stop_on_inf and any(np.isinf(data)))
elif np.isscalar(data):
return np.isnan(data) or (self._stop_on_inf and np.isinf(data))
elif isinstance(data, dict):
return any([self._is_nan(key, value) for key, value in data.items()])
else:
message = 'Variable `{}` of type `{}` can not be checked for NaNs.'.format(variable, type(data))
if self._on_unkown_type == 'warn':
logging.warning(message)
elif self._on_unkown_type == 'error':
raise ValueError(message)
return False | python | def _is_nan(self, variable: str, data) -> bool:
"""
Recursively search passed data and find NaNs.
:param variable: name of variable to be checked
:param data: data object (dict, list, scalar)
:return: `True` if there is a NaN value in the data; `False` otherwise.
:raise ValueError: if the variable value is of unsupported type and ``on_unknown_type`` is set to ``error``
"""
if isinstance(data, np.ndarray) or isinstance(data, list):
return any(np.isnan(data)) or (self._stop_on_inf and any(np.isinf(data)))
elif np.isscalar(data):
return np.isnan(data) or (self._stop_on_inf and np.isinf(data))
elif isinstance(data, dict):
return any([self._is_nan(key, value) for key, value in data.items()])
else:
message = 'Variable `{}` of type `{}` can not be checked for NaNs.'.format(variable, type(data))
if self._on_unkown_type == 'warn':
logging.warning(message)
elif self._on_unkown_type == 'error':
raise ValueError(message)
return False | [
"def",
"_is_nan",
"(",
"self",
",",
"variable",
":",
"str",
",",
"data",
")",
"->",
"bool",
":",
"if",
"isinstance",
"(",
"data",
",",
"np",
".",
"ndarray",
")",
"or",
"isinstance",
"(",
"data",
",",
"list",
")",
":",
"return",
"any",
"(",
"np",
... | Recursively search passed data and find NaNs.
:param variable: name of variable to be checked
:param data: data object (dict, list, scalar)
:return: `True` if there is a NaN value in the data; `False` otherwise.
:raise ValueError: if the variable value is of unsupported type and ``on_unknown_type`` is set to ``error`` | [
"Recursively",
"search",
"passed",
"data",
"and",
"find",
"NaNs",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/stop_on_nan.py#L60-L81 | train | 36,548 |
Cognexa/cxflow | cxflow/hooks/stop_on_nan.py | StopOnNaN._check_nan | def _check_nan(self, epoch_data: EpochData) -> None:
"""
Raise an exception when some of the monitored data is NaN.
:param epoch_data: epoch data checked
:raise KeyError: if the specified variable is not found in the stream
:raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be nan-checked was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if self._is_nan(variable, value):
raise TrainingTerminated('Variable `{}` is NaN.'.format(variable)) | python | def _check_nan(self, epoch_data: EpochData) -> None:
"""
Raise an exception when some of the monitored data is NaN.
:param epoch_data: epoch data checked
:raise KeyError: if the specified variable is not found in the stream
:raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be nan-checked was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if self._is_nan(variable, value):
raise TrainingTerminated('Variable `{}` is NaN.'.format(variable)) | [
"def",
"_check_nan",
"(",
"self",
",",
"epoch_data",
":",
"EpochData",
")",
"->",
"None",
":",
"for",
"stream_name",
"in",
"epoch_data",
".",
"keys",
"(",
")",
":",
"stream_data",
"=",
"epoch_data",
"[",
"stream_name",
"]",
"variables",
"=",
"self",
".",
... | Raise an exception when some of the monitored data is NaN.
:param epoch_data: epoch data checked
:raise KeyError: if the specified variable is not found in the stream
:raise ValueError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error`` | [
"Raise",
"an",
"exception",
"when",
"some",
"of",
"the",
"monitored",
"data",
"is",
"NaN",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/stop_on_nan.py#L83-L101 | train | 36,549 |
Cognexa/cxflow | cxflow/hooks/stop_on_nan.py | StopOnNaN.after_epoch | def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:
"""
If initialized to check after each epoch, stop the training once the epoch data contains a monitored
variable equal to NaN.
:param epoch_data: epoch data to be checked
"""
if self._after_epoch:
self._check_nan(epoch_data) | python | def after_epoch(self, epoch_data: EpochData, **kwargs) -> None:
"""
If initialized to check after each epoch, stop the training once the epoch data contains a monitored
variable equal to NaN.
:param epoch_data: epoch data to be checked
"""
if self._after_epoch:
self._check_nan(epoch_data) | [
"def",
"after_epoch",
"(",
"self",
",",
"epoch_data",
":",
"EpochData",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"if",
"self",
".",
"_after_epoch",
":",
"self",
".",
"_check_nan",
"(",
"epoch_data",
")"
] | If initialized to check after each epoch, stop the training once the epoch data contains a monitored
variable equal to NaN.
:param epoch_data: epoch data to be checked | [
"If",
"initialized",
"to",
"check",
"after",
"each",
"epoch",
"stop",
"the",
"training",
"once",
"the",
"epoch",
"data",
"contains",
"a",
"monitored",
"variable",
"equal",
"to",
"NaN",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/stop_on_nan.py#L103-L112 | train | 36,550 |
Cognexa/cxflow | cxflow/hooks/stop_on_nan.py | StopOnNaN.after_batch | def after_batch(self, stream_name: str, batch_data) -> None:
"""
If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked
"""
if self._after_batch:
self._check_nan({stream_name: batch_data}) | python | def after_batch(self, stream_name: str, batch_data) -> None:
"""
If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked
"""
if self._after_batch:
self._check_nan({stream_name: batch_data}) | [
"def",
"after_batch",
"(",
"self",
",",
"stream_name",
":",
"str",
",",
"batch_data",
")",
"->",
"None",
":",
"if",
"self",
".",
"_after_batch",
":",
"self",
".",
"_check_nan",
"(",
"{",
"stream_name",
":",
"batch_data",
"}",
")"
] | If initialized to check after each batch, stop the training once the batch data contains a monitored
variable equal to NaN.
:param stream_name: name of the stream to be checked
:param batch_data: batch data to be checked | [
"If",
"initialized",
"to",
"check",
"after",
"each",
"batch",
"stop",
"the",
"training",
"once",
"the",
"batch",
"data",
"contains",
"a",
"monitored",
"variable",
"equal",
"to",
"NaN",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/stop_on_nan.py#L114-L124 | train | 36,551 |
Cognexa/cxflow | cxflow/hooks/every_n_epoch.py | EveryNEpoch.after_epoch | def after_epoch(self, epoch_id: int, **kwargs) -> None:
"""
Call ``_after_n_epoch`` method every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch
"""
if epoch_id % self._n_epochs == 0:
self._after_n_epoch(epoch_id=epoch_id, **kwargs) | python | def after_epoch(self, epoch_id: int, **kwargs) -> None:
"""
Call ``_after_n_epoch`` method every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch
"""
if epoch_id % self._n_epochs == 0:
self._after_n_epoch(epoch_id=epoch_id, **kwargs) | [
"def",
"after_epoch",
"(",
"self",
",",
"epoch_id",
":",
"int",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"if",
"epoch_id",
"%",
"self",
".",
"_n_epochs",
"==",
"0",
":",
"self",
".",
"_after_n_epoch",
"(",
"epoch_id",
"=",
"epoch_id",
",",
"*... | Call ``_after_n_epoch`` method every ``n_epochs`` epoch.
:param epoch_id: number of the processed epoch | [
"Call",
"_after_n_epoch",
"method",
"every",
"n_epochs",
"epoch",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/every_n_epoch.py#L33-L40 | train | 36,552 |
Cognexa/cxflow | cxflow/cli/ls.py | humanize_filesize | def humanize_filesize(filesize: int) -> Tuple[str, str]:
"""Return human readable pair of size and unit from the given filesize in bytes."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if filesize < 1024.0:
return '{:3.1f}'.format(filesize), unit+'B'
filesize /= 1024.0 | python | def humanize_filesize(filesize: int) -> Tuple[str, str]:
"""Return human readable pair of size and unit from the given filesize in bytes."""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if filesize < 1024.0:
return '{:3.1f}'.format(filesize), unit+'B'
filesize /= 1024.0 | [
"def",
"humanize_filesize",
"(",
"filesize",
":",
"int",
")",
"->",
"Tuple",
"[",
"str",
",",
"str",
"]",
":",
"for",
"unit",
"in",
"[",
"''",
",",
"'K'",
",",
"'M'",
",",
"'G'",
",",
"'T'",
",",
"'P'",
",",
"'E'",
",",
"'Z'",
"]",
":",
"if",
... | Return human readable pair of size and unit from the given filesize in bytes. | [
"Return",
"human",
"readable",
"pair",
"of",
"size",
"and",
"unit",
"from",
"the",
"given",
"filesize",
"in",
"bytes",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L35-L40 | train | 36,553 |
Cognexa/cxflow | cxflow/cli/ls.py | is_train_dir | def is_train_dir(dir_: str) -> bool:
"""Test if the given dir contains training artifacts."""
return path.exists(path.join(dir_, CXF_CONFIG_FILE)) and \
path.exists(path.join(dir_, CXF_TRACE_FILE)) and \
path.exists(path.join(dir_, CXF_LOG_FILE)) | python | def is_train_dir(dir_: str) -> bool:
"""Test if the given dir contains training artifacts."""
return path.exists(path.join(dir_, CXF_CONFIG_FILE)) and \
path.exists(path.join(dir_, CXF_TRACE_FILE)) and \
path.exists(path.join(dir_, CXF_LOG_FILE)) | [
"def",
"is_train_dir",
"(",
"dir_",
":",
"str",
")",
"->",
"bool",
":",
"return",
"path",
".",
"exists",
"(",
"path",
".",
"join",
"(",
"dir_",
",",
"CXF_CONFIG_FILE",
")",
")",
"and",
"path",
".",
"exists",
"(",
"path",
".",
"join",
"(",
"dir_",
"... | Test if the given dir contains training artifacts. | [
"Test",
"if",
"the",
"given",
"dir",
"contains",
"training",
"artifacts",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L43-L47 | train | 36,554 |
Cognexa/cxflow | cxflow/cli/ls.py | _print_trainings_long | def _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace)
"""
long_table = []
for train_dir, config, trace in trainings:
start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]
if start_datetime:
age = format_timedelta(datetime.now() - start_datetime) + ' ago'
if end_datetime:
duration = format_timedelta(end_datetime - start_datetime)
else:
duration = CXF_NA_STR
else:
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0
long_table.append([path.basename(train_dir)] +
list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +
[age, duration, epochs_done])
print(tabulate(long_table, tablefmt='plain')) | python | def _print_trainings_long(trainings: Iterable[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace)
"""
long_table = []
for train_dir, config, trace in trainings:
start_datetime, end_datetime = trace[TrainingTraceKeys.TRAIN_BEGIN], trace[TrainingTraceKeys.TRAIN_END]
if start_datetime:
age = format_timedelta(datetime.now() - start_datetime) + ' ago'
if end_datetime:
duration = format_timedelta(end_datetime - start_datetime)
else:
duration = CXF_NA_STR
else:
age = CXF_NA_STR
duration = CXF_NA_STR
epochs_done = trace[TrainingTraceKeys.EPOCHS_DONE] if trace[TrainingTraceKeys.EPOCHS_DONE] else 0
long_table.append([path.basename(train_dir)] +
list(map(lambda fq_name: fq_name.split('.')[-1], get_classes(config))) +
[age, duration, epochs_done])
print(tabulate(long_table, tablefmt='plain')) | [
"def",
"_print_trainings_long",
"(",
"trainings",
":",
"Iterable",
"[",
"Tuple",
"[",
"str",
",",
"dict",
",",
"TrainingTrace",
"]",
"]",
")",
"->",
"None",
":",
"long_table",
"=",
"[",
"]",
"for",
"train_dir",
",",
"config",
",",
"trace",
"in",
"trainin... | Print a plain table with the details of the given trainings.
:param trainings: iterable of tuples (train_dir, configuration dict, trace) | [
"Print",
"a",
"plain",
"table",
"with",
"the",
"details",
"of",
"the",
"given",
"trainings",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L89-L114 | train | 36,555 |
Cognexa/cxflow | cxflow/cli/ls.py | _ls_print_listing | def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:
"""
Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace)
"""
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | python | def _ls_print_listing(dir_: str, recursive: bool, all_: bool, long: bool) -> List[Tuple[str, dict, TrainingTrace]]:
"""
Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace)
"""
all_trainings = []
for root_dir, train_dirs in walk_train_dirs(dir_):
if train_dirs:
if recursive:
print(root_dir + ':')
trainings = [(train_dir,
load_config(path.join(train_dir, CXF_CONFIG_FILE), []),
TrainingTrace.from_file(path.join(train_dir, CXF_TRACE_FILE)))
for train_dir
in [os.path.join(root_dir, train_dir) for train_dir in train_dirs]]
if not all_:
trainings = [train_dir for train_dir in trainings if train_dir[2][TrainingTraceKeys.EPOCHS_DONE]]
if long:
print('total {}'.format(len(trainings)))
_print_trainings_long(trainings)
else:
for train_dir, _, _ in trainings:
print(path.basename(train_dir))
all_trainings.extend(trainings)
if recursive:
print()
if not recursive:
break
return all_trainings | [
"def",
"_ls_print_listing",
"(",
"dir_",
":",
"str",
",",
"recursive",
":",
"bool",
",",
"all_",
":",
"bool",
",",
"long",
":",
"bool",
")",
"->",
"List",
"[",
"Tuple",
"[",
"str",
",",
"dict",
",",
"TrainingTrace",
"]",
"]",
":",
"all_trainings",
"=... | Print names of the train dirs contained in the given dir.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, model and dataset classes,
age, duration and epochs done (--long option)
:return: list of found training tuples (train_dir, configuration dict, trace) | [
"Print",
"names",
"of",
"the",
"train",
"dirs",
"contained",
"in",
"the",
"given",
"dir",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L117-L152 | train | 36,556 |
Cognexa/cxflow | cxflow/cli/ls.py | _ls_print_summary | def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace)
"""
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed('summary')
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))
print() | python | def _ls_print_summary(all_trainings: List[Tuple[str, dict, TrainingTrace]]) -> None:
"""
Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace)
"""
counts_by_name = defaultdict(int)
counts_by_classes = defaultdict(int)
for _, config, _ in all_trainings:
counts_by_name[get_model_name(config)] += 1
counts_by_classes[get_classes(config)] += 1
print_boxed('summary')
print()
counts_table = [[name, count] for name, count in counts_by_name.items()]
print(tabulate(counts_table, headers=['model.name', 'count'], tablefmt='grid'))
print()
counts_table = [[classes[0], classes[1], count] for classes, count in counts_by_classes.items()]
print(tabulate(counts_table, headers=['model.class', 'dataset.class', 'count'], tablefmt='grid'))
print() | [
"def",
"_ls_print_summary",
"(",
"all_trainings",
":",
"List",
"[",
"Tuple",
"[",
"str",
",",
"dict",
",",
"TrainingTrace",
"]",
"]",
")",
"->",
"None",
":",
"counts_by_name",
"=",
"defaultdict",
"(",
"int",
")",
"counts_by_classes",
"=",
"defaultdict",
"(",... | Print trainings summary.
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
:param all_trainings: a list of training tuples (train_dir, configuration dict, trace) | [
"Print",
"trainings",
"summary",
".",
"In",
"particular",
"print",
"tables",
"summarizing",
"the",
"number",
"of",
"trainings",
"with",
"-",
"particular",
"model",
"names",
"-",
"particular",
"combinations",
"of",
"models",
"and",
"datasets"
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L155-L179 | train | 36,557 |
Cognexa/cxflow | cxflow/cli/ls.py | list_train_dirs | def list_train_dirs(dir_: str, recursive: bool, all_: bool, long: bool, verbose: bool) -> None:
"""
List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.
The function is accessible through cxflow CLI `cxflow ls`.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, odel and dataset class,
age, duration and epochs done (--long option)
:param verbose: print more verbose output with list of additional artifacts and training config,
applicable only when a single train dir is listed (--verbose option)
"""
if verbose:
long = True
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
all_trainings = _ls_print_listing(dir_, recursive, all_, long)
if long and len(all_trainings) > 1:
if not recursive:
print()
_ls_print_summary(all_trainings)
if verbose and len(all_trainings) == 1:
if not recursive:
print()
_ls_print_verbose(all_trainings[0]) | python | def list_train_dirs(dir_: str, recursive: bool, all_: bool, long: bool, verbose: bool) -> None:
"""
List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.
The function is accessible through cxflow CLI `cxflow ls`.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, odel and dataset class,
age, duration and epochs done (--long option)
:param verbose: print more verbose output with list of additional artifacts and training config,
applicable only when a single train dir is listed (--verbose option)
"""
if verbose:
long = True
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
all_trainings = _ls_print_listing(dir_, recursive, all_, long)
if long and len(all_trainings) > 1:
if not recursive:
print()
_ls_print_summary(all_trainings)
if verbose and len(all_trainings) == 1:
if not recursive:
print()
_ls_print_verbose(all_trainings[0]) | [
"def",
"list_train_dirs",
"(",
"dir_",
":",
"str",
",",
"recursive",
":",
"bool",
",",
"all_",
":",
"bool",
",",
"long",
":",
"bool",
",",
"verbose",
":",
"bool",
")",
"->",
"None",
":",
"if",
"verbose",
":",
"long",
"=",
"True",
"if",
"dir_",
"=="... | List training dirs contained in the given dir with options and outputs similar to the regular `ls` command.
The function is accessible through cxflow CLI `cxflow ls`.
:param dir_: dir to be listed
:param recursive: walk recursively in sub-directories, stop at train dirs (--recursive option)
:param all_: include train dirs with no epochs done (--all option)
:param long: list more details including model name, odel and dataset class,
age, duration and epochs done (--long option)
:param verbose: print more verbose output with list of additional artifacts and training config,
applicable only when a single train dir is listed (--verbose option) | [
"List",
"training",
"dirs",
"contained",
"in",
"the",
"given",
"dir",
"with",
"options",
"and",
"outputs",
"similar",
"to",
"the",
"regular",
"ls",
"command",
".",
"The",
"function",
"is",
"accessible",
"through",
"cxflow",
"CLI",
"cxflow",
"ls",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/ls.py#L203-L238 | train | 36,558 |
Cognexa/cxflow | cxflow/hooks/accumulate_variables.py | AccumulateVariables.after_batch | def after_batch(self, stream_name: str, batch_data: Batch):
"""
Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar)
"""
for variable in self._variables:
if variable in batch_data:
value = batch_data[variable]
if not hasattr(value, '__iter__'):
raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))
self._accumulator[stream_name][variable] += list(value)
else:
raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '
'Available variables are `{}`.'.format(variable, batch_data.keys())) | python | def after_batch(self, stream_name: str, batch_data: Batch):
"""
Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar)
"""
for variable in self._variables:
if variable in batch_data:
value = batch_data[variable]
if not hasattr(value, '__iter__'):
raise TypeError('Variable `{}` to be accumulated is not iterable.'.format(variable))
self._accumulator[stream_name][variable] += list(value)
else:
raise KeyError('Variable `{}` to be accumulated was not found in the batch data. '
'Available variables are `{}`.'.format(variable, batch_data.keys())) | [
"def",
"after_batch",
"(",
"self",
",",
"stream_name",
":",
"str",
",",
"batch_data",
":",
"Batch",
")",
":",
"for",
"variable",
"in",
"self",
".",
"_variables",
":",
"if",
"variable",
"in",
"batch_data",
":",
"value",
"=",
"batch_data",
"[",
"variable",
... | Extend the accumulated variables with the given batch data.
:param stream_name: stream name; e.g. ``train`` or any other...
:param batch_data: batch data = stream sources + model outputs
:raise KeyError: if the variables to be aggregated are missing
:raise TypeError: if the variable value is not iterable (e.g. it is only a scalar) | [
"Extend",
"the",
"accumulated",
"variables",
"with",
"the",
"given",
"batch",
"data",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/accumulate_variables.py#L40-L57 | train | 36,559 |
Cognexa/cxflow | cxflow/cli/dataset.py | invoke_dataset_method | def invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None:
"""
Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = dataset = method = output_dir = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
assert 'dataset' in config, '`dataset` section not present in the config'
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
try:
dataset = create_dataset(config)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
method = getattr(dataset, method_name)
except AttributeError as ex:
fallback('Method `{}` not found in the dataset'.format(method_name), ex)
try:
method()
except Exception as ex: # pylint: disable=broad-except
fallback('Exception occurred during method `{}` invocation'.format(method_name), ex) | python | def invoke_dataset_method(config_path: str, method_name: str, output_root: str, cl_arguments: Iterable[str]) -> None:
"""
Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created
"""
config = dataset = method = output_dir = None
try:
config_path = find_config(config_path)
config = load_config(config_file=config_path, additional_args=cl_arguments)
assert 'dataset' in config, '`dataset` section not present in the config'
logging.debug('\tLoaded config: %s', config)
except Exception as ex: # pylint: disable=broad-except
fallback('Loading config failed', ex)
try:
dataset = create_dataset(config)
except Exception as ex: # pylint: disable=broad-except
fallback('Creating dataset failed', ex)
try:
method = getattr(dataset, method_name)
except AttributeError as ex:
fallback('Method `{}` not found in the dataset'.format(method_name), ex)
try:
method()
except Exception as ex: # pylint: disable=broad-except
fallback('Exception occurred during method `{}` invocation'.format(method_name), ex) | [
"def",
"invoke_dataset_method",
"(",
"config_path",
":",
"str",
",",
"method_name",
":",
"str",
",",
"output_root",
":",
"str",
",",
"cl_arguments",
":",
"Iterable",
"[",
"str",
"]",
")",
"->",
"None",
":",
"config",
"=",
"dataset",
"=",
"method",
"=",
"... | Create the specified dataset and invoke its specified method.
:param config_path: path to the config file or the directory in which it is stored
:param method_name: name of the method to be invoked on the specified dataset
:param cl_arguments: additional command line arguments which will update the configuration
:param output_root: output root in which the training directory will be created | [
"Create",
"the",
"specified",
"dataset",
"and",
"invoke",
"its",
"specified",
"method",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/dataset.py#L11-L44 | train | 36,560 |
Cognexa/cxflow | cxflow/utils/misc.py | CaughtInterrupts._signal_handler | def _signal_handler(self, *_) -> None:
"""
On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal.
"""
if self._num_signals == 0:
logging.warning('Interrupt signal caught - training will be terminated')
logging.warning('Another interrupt signal will terminate the program immediately')
self._num_signals += 1
else:
logging.error('Another interrupt signal caught - terminating program immediately')
sys.exit(2) | python | def _signal_handler(self, *_) -> None:
"""
On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal.
"""
if self._num_signals == 0:
logging.warning('Interrupt signal caught - training will be terminated')
logging.warning('Another interrupt signal will terminate the program immediately')
self._num_signals += 1
else:
logging.error('Another interrupt signal caught - terminating program immediately')
sys.exit(2) | [
"def",
"_signal_handler",
"(",
"self",
",",
"*",
"_",
")",
"->",
"None",
":",
"if",
"self",
".",
"_num_signals",
"==",
"0",
":",
"logging",
".",
"warning",
"(",
"'Interrupt signal caught - training will be terminated'",
")",
"logging",
".",
"warning",
"(",
"'A... | On the first signal, increase the ``self._num_signals`` counter.
Call ``sys.exit`` on any subsequent signal. | [
"On",
"the",
"first",
"signal",
"increase",
"the",
"self",
".",
"_num_signals",
"counter",
".",
"Call",
"sys",
".",
"exit",
"on",
"any",
"subsequent",
"signal",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/utils/misc.py#L95-L106 | train | 36,561 |
Cognexa/cxflow | cxflow/cli/common.py | create_output_dir | def create_output_dir(config: dict, output_root: str, default_model_name: str='Unnamed') -> str:
"""
Create output_dir under the given ``output_root`` and
- dump the given config to YAML file under this dir
- register a file logger logging to a file under this dir
:param config: config to be dumped
:param output_root: dir wherein output_dir shall be created
:param default_model_name: name to be used when `model.name` is not found in the config
:return: path to the created output_dir
"""
logging.info('Creating output dir')
# create output dir
model_name = default_model_name
if 'name' not in config['model']:
logging.warning('\tmodel.name not found in config, defaulting to: %s', model_name)
else:
model_name = config['model']['name']
if not os.path.exists(output_root):
logging.info('\tOutput root folder "%s" does not exist and will be created', output_root)
os.makedirs(output_root)
# keep trying to create new output dir until it succeeds
# this is neccessary due to improbable yet possible output dir name conflicts
while True:
try:
output_dir = path.join(output_root, '{}_{}_{}'.format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),
model_name, get_random_name()))
os.mkdir(output_dir)
break
except OSError as ex:
if ex.errno != errno.EEXIST:
raise ex
logging.info('\tOutput dir: %s', output_dir)
# create file logger
file_handler = logging.FileHandler(path.join(output_dir, CXF_LOG_FILE))
file_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))
logging.getLogger().addHandler(file_handler)
return output_dir | python | def create_output_dir(config: dict, output_root: str, default_model_name: str='Unnamed') -> str:
"""
Create output_dir under the given ``output_root`` and
- dump the given config to YAML file under this dir
- register a file logger logging to a file under this dir
:param config: config to be dumped
:param output_root: dir wherein output_dir shall be created
:param default_model_name: name to be used when `model.name` is not found in the config
:return: path to the created output_dir
"""
logging.info('Creating output dir')
# create output dir
model_name = default_model_name
if 'name' not in config['model']:
logging.warning('\tmodel.name not found in config, defaulting to: %s', model_name)
else:
model_name = config['model']['name']
if not os.path.exists(output_root):
logging.info('\tOutput root folder "%s" does not exist and will be created', output_root)
os.makedirs(output_root)
# keep trying to create new output dir until it succeeds
# this is neccessary due to improbable yet possible output dir name conflicts
while True:
try:
output_dir = path.join(output_root, '{}_{}_{}'.format(datetime.now().strftime('%Y-%m-%d-%H-%M-%S'),
model_name, get_random_name()))
os.mkdir(output_dir)
break
except OSError as ex:
if ex.errno != errno.EEXIST:
raise ex
logging.info('\tOutput dir: %s', output_dir)
# create file logger
file_handler = logging.FileHandler(path.join(output_dir, CXF_LOG_FILE))
file_handler.setFormatter(logging.Formatter(CXF_LOG_FORMAT, datefmt=CXF_LOG_DATE_FORMAT))
logging.getLogger().addHandler(file_handler)
return output_dir | [
"def",
"create_output_dir",
"(",
"config",
":",
"dict",
",",
"output_root",
":",
"str",
",",
"default_model_name",
":",
"str",
"=",
"'Unnamed'",
")",
"->",
"str",
":",
"logging",
".",
"info",
"(",
"'Creating output dir'",
")",
"# create output dir",
"model_name"... | Create output_dir under the given ``output_root`` and
- dump the given config to YAML file under this dir
- register a file logger logging to a file under this dir
:param config: config to be dumped
:param output_root: dir wherein output_dir shall be created
:param default_model_name: name to be used when `model.name` is not found in the config
:return: path to the created output_dir | [
"Create",
"output_dir",
"under",
"the",
"given",
"output_root",
"and",
"-",
"dump",
"the",
"given",
"config",
"to",
"YAML",
"file",
"under",
"this",
"dir",
"-",
"register",
"a",
"file",
"logger",
"logging",
"to",
"a",
"file",
"under",
"this",
"dir"
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/common.py#L21-L63 | train | 36,562 |
Cognexa/cxflow | cxflow/cli/common.py | create_dataset | def create_dataset(config: dict, output_dir: Optional[str]=None) -> AbstractDataset:
"""
Create a dataset object according to the given config.
Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.
:param config: config dict with dataset config
:param output_dir: path to the training output dir or None
:return: dataset object
"""
logging.info('Creating dataset')
dataset_config = make_simple(config)['dataset']
assert 'class' in dataset_config, '`dataset.class` not present in the config'
dataset_module, dataset_class = parse_fully_qualified_name(dataset_config['class'])
if 'output_dir' in dataset_config:
raise ValueError('The `output_dir` key is reserved and can not be used in dataset configuration.')
dataset_config = {'output_dir': output_dir, **config['dataset']}
del dataset_config['class']
dataset = create_object(dataset_module, dataset_class, args=(yaml_to_str(dataset_config),))
logging.info('\t%s created', type(dataset).__name__)
return dataset | python | def create_dataset(config: dict, output_dir: Optional[str]=None) -> AbstractDataset:
"""
Create a dataset object according to the given config.
Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.
:param config: config dict with dataset config
:param output_dir: path to the training output dir or None
:return: dataset object
"""
logging.info('Creating dataset')
dataset_config = make_simple(config)['dataset']
assert 'class' in dataset_config, '`dataset.class` not present in the config'
dataset_module, dataset_class = parse_fully_qualified_name(dataset_config['class'])
if 'output_dir' in dataset_config:
raise ValueError('The `output_dir` key is reserved and can not be used in dataset configuration.')
dataset_config = {'output_dir': output_dir, **config['dataset']}
del dataset_config['class']
dataset = create_object(dataset_module, dataset_class, args=(yaml_to_str(dataset_config),))
logging.info('\t%s created', type(dataset).__name__)
return dataset | [
"def",
"create_dataset",
"(",
"config",
":",
"dict",
",",
"output_dir",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"AbstractDataset",
":",
"logging",
".",
"info",
"(",
"'Creating dataset'",
")",
"dataset_config",
"=",
"make_simple",
"(",
"conf... | Create a dataset object according to the given config.
Dataset config section and the `output_dir` are passed to the constructor in a single YAML-encoded string.
:param config: config dict with dataset config
:param output_dir: path to the training output dir or None
:return: dataset object | [
"Create",
"a",
"dataset",
"object",
"according",
"to",
"the",
"given",
"config",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/common.py#L66-L90 | train | 36,563 |
Cognexa/cxflow | cxflow/cli/common.py | create_model | def create_model(config: dict, output_dir: Optional[str], dataset: AbstractDataset,
restore_from: Optional[str]=None) -> AbstractModel:
"""
Create a model object either from scratch of from the checkpoint in ``resume_dir``.
Cxflow allows the following scenarios
1. Create model: leave ``restore_from=None`` and specify ``class``;
2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.
:param config: config dict with model config
:param output_dir: path to the training output dir
:param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept
:param restore_from: from whence the model should be restored (backend-specific information)
:return: model object
"""
logging.info('Creating a model')
model_config = config['model']
# workaround for ruamel.yaml expansion bug; see #222
model_config = dict(model_config.items())
assert 'class' in model_config, '`model.class` not present in the config'
model_module, model_class = parse_fully_qualified_name(model_config['class'])
# create model kwargs (without `class` and `name`)
model_kwargs = {'dataset': dataset, 'log_dir': output_dir, 'restore_from': restore_from, **model_config}
del model_kwargs['class']
if 'name' in model_kwargs:
del model_kwargs['name']
try:
model = create_object(model_module, model_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex:
if restore_from is None: # training case
raise ImportError('Cannot create model from the specified model module `{}` and class `{}`.'.format(
model_module, model_class)) from ex
else: # restore cases (resume, predict)
logging.warning('Cannot create model from the specified model class `%s`.', model_config['class'])
assert 'restore_fallback' in model_config, '`model.restore_fallback` not present in the config'
logging.info('Trying to restore with fallback `%s` instead.', model_config['restore_fallback'])
try: # try fallback class
fallback_module, fallback_class = parse_fully_qualified_name(model_config['restore_fallback'])
model = create_object(fallback_module, fallback_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex: # if fallback module/class specified but it fails
raise ImportError('Cannot create model from the specified restore_fallback `{}`.'.format(
model_config['restore_fallback'],)) from ex
logging.info('\t%s created', type(model).__name__)
return model | python | def create_model(config: dict, output_dir: Optional[str], dataset: AbstractDataset,
restore_from: Optional[str]=None) -> AbstractModel:
"""
Create a model object either from scratch of from the checkpoint in ``resume_dir``.
Cxflow allows the following scenarios
1. Create model: leave ``restore_from=None`` and specify ``class``;
2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.
:param config: config dict with model config
:param output_dir: path to the training output dir
:param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept
:param restore_from: from whence the model should be restored (backend-specific information)
:return: model object
"""
logging.info('Creating a model')
model_config = config['model']
# workaround for ruamel.yaml expansion bug; see #222
model_config = dict(model_config.items())
assert 'class' in model_config, '`model.class` not present in the config'
model_module, model_class = parse_fully_qualified_name(model_config['class'])
# create model kwargs (without `class` and `name`)
model_kwargs = {'dataset': dataset, 'log_dir': output_dir, 'restore_from': restore_from, **model_config}
del model_kwargs['class']
if 'name' in model_kwargs:
del model_kwargs['name']
try:
model = create_object(model_module, model_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex:
if restore_from is None: # training case
raise ImportError('Cannot create model from the specified model module `{}` and class `{}`.'.format(
model_module, model_class)) from ex
else: # restore cases (resume, predict)
logging.warning('Cannot create model from the specified model class `%s`.', model_config['class'])
assert 'restore_fallback' in model_config, '`model.restore_fallback` not present in the config'
logging.info('Trying to restore with fallback `%s` instead.', model_config['restore_fallback'])
try: # try fallback class
fallback_module, fallback_class = parse_fully_qualified_name(model_config['restore_fallback'])
model = create_object(fallback_module, fallback_class, kwargs=model_kwargs)
except (ImportError, AttributeError) as ex: # if fallback module/class specified but it fails
raise ImportError('Cannot create model from the specified restore_fallback `{}`.'.format(
model_config['restore_fallback'],)) from ex
logging.info('\t%s created', type(model).__name__)
return model | [
"def",
"create_model",
"(",
"config",
":",
"dict",
",",
"output_dir",
":",
"Optional",
"[",
"str",
"]",
",",
"dataset",
":",
"AbstractDataset",
",",
"restore_from",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"AbstractModel",
":",
"logging",
... | Create a model object either from scratch of from the checkpoint in ``resume_dir``.
Cxflow allows the following scenarios
1. Create model: leave ``restore_from=None`` and specify ``class``;
2. Restore model: specify ``restore_from`` which is a backend-specific path to (a directory with) the saved model.
:param config: config dict with model config
:param output_dir: path to the training output dir
:param dataset: dataset object implementing the :py:class:`cxflow.datasets.AbstractDataset` concept
:param restore_from: from whence the model should be restored (backend-specific information)
:return: model object | [
"Create",
"a",
"model",
"object",
"either",
"from",
"scratch",
"of",
"from",
"the",
"checkpoint",
"in",
"resume_dir",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/common.py#L93-L146 | train | 36,564 |
Cognexa/cxflow | cxflow/cli/prune.py | _prune_subdirs | def _prune_subdirs(dir_: str) -> None:
"""
Delete all subdirs in training log dirs.
:param dir_: dir with training log dirs
"""
for logdir in [path.join(dir_, f) for f in listdir(dir_) if is_train_dir(path.join(dir_, f))]:
for subdir in [path.join(logdir, f) for f in listdir(logdir) if path.isdir(path.join(logdir, f))]:
_safe_rmtree(subdir) | python | def _prune_subdirs(dir_: str) -> None:
"""
Delete all subdirs in training log dirs.
:param dir_: dir with training log dirs
"""
for logdir in [path.join(dir_, f) for f in listdir(dir_) if is_train_dir(path.join(dir_, f))]:
for subdir in [path.join(logdir, f) for f in listdir(logdir) if path.isdir(path.join(logdir, f))]:
_safe_rmtree(subdir) | [
"def",
"_prune_subdirs",
"(",
"dir_",
":",
"str",
")",
"->",
"None",
":",
"for",
"logdir",
"in",
"[",
"path",
".",
"join",
"(",
"dir_",
",",
"f",
")",
"for",
"f",
"in",
"listdir",
"(",
"dir_",
")",
"if",
"is_train_dir",
"(",
"path",
".",
"join",
... | Delete all subdirs in training log dirs.
:param dir_: dir with training log dirs | [
"Delete",
"all",
"subdirs",
"in",
"training",
"log",
"dirs",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/prune.py#L23-L31 | train | 36,565 |
Cognexa/cxflow | cxflow/cli/prune.py | _prune | def _prune(dir_: str, epochs: int) -> None:
"""
Delete all training dirs with incomplete training artifacts or with less than specified epochs done.
:param dir_: dir with training log dirs
:param epochs: minimum number of finished epochs to keep the training logs
:return: number of log dirs pruned
"""
for logdir in [path.join(dir_, f) for f in listdir(dir_) if path.isdir(path.join(dir_, f))]:
if not is_train_dir(logdir):
_safe_rmtree(logdir)
else:
trace_path = path.join(logdir, CXF_TRACE_FILE)
try:
epochs_done = TrainingTrace.from_file(trace_path)[TrainingTraceKeys.EPOCHS_DONE]
except (KeyError, TypeError):
epochs_done = 0
if not epochs_done or epochs_done < epochs:
_safe_rmtree(logdir) | python | def _prune(dir_: str, epochs: int) -> None:
"""
Delete all training dirs with incomplete training artifacts or with less than specified epochs done.
:param dir_: dir with training log dirs
:param epochs: minimum number of finished epochs to keep the training logs
:return: number of log dirs pruned
"""
for logdir in [path.join(dir_, f) for f in listdir(dir_) if path.isdir(path.join(dir_, f))]:
if not is_train_dir(logdir):
_safe_rmtree(logdir)
else:
trace_path = path.join(logdir, CXF_TRACE_FILE)
try:
epochs_done = TrainingTrace.from_file(trace_path)[TrainingTraceKeys.EPOCHS_DONE]
except (KeyError, TypeError):
epochs_done = 0
if not epochs_done or epochs_done < epochs:
_safe_rmtree(logdir) | [
"def",
"_prune",
"(",
"dir_",
":",
"str",
",",
"epochs",
":",
"int",
")",
"->",
"None",
":",
"for",
"logdir",
"in",
"[",
"path",
".",
"join",
"(",
"dir_",
",",
"f",
")",
"for",
"f",
"in",
"listdir",
"(",
"dir_",
")",
"if",
"path",
".",
"isdir",... | Delete all training dirs with incomplete training artifacts or with less than specified epochs done.
:param dir_: dir with training log dirs
:param epochs: minimum number of finished epochs to keep the training logs
:return: number of log dirs pruned | [
"Delete",
"all",
"training",
"dirs",
"with",
"incomplete",
"training",
"artifacts",
"or",
"with",
"less",
"than",
"specified",
"epochs",
"done",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/prune.py#L34-L52 | train | 36,566 |
Cognexa/cxflow | cxflow/cli/prune.py | prune_train_dirs | def prune_train_dirs(dir_: str, epochs: int, subdirs: bool) -> None:
"""
Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs
"""
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
_prune(dir_, epochs)
if subdirs:
_prune_subdirs(dir_) | python | def prune_train_dirs(dir_: str, epochs: int, subdirs: bool) -> None:
"""
Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs
"""
if dir_ == CXF_DEFAULT_LOG_DIR and not path.exists(CXF_DEFAULT_LOG_DIR):
print('The default log directory `{}` does not exist.\n'
'Consider specifying the directory to be listed as an argument.'.format(CXF_DEFAULT_LOG_DIR))
quit(1)
if not path.exists(dir_):
print('Specified dir `{}` does not exist'.format(dir_))
quit(1)
_prune(dir_, epochs)
if subdirs:
_prune_subdirs(dir_) | [
"def",
"prune_train_dirs",
"(",
"dir_",
":",
"str",
",",
"epochs",
":",
"int",
",",
"subdirs",
":",
"bool",
")",
"->",
"None",
":",
"if",
"dir_",
"==",
"CXF_DEFAULT_LOG_DIR",
"and",
"not",
"path",
".",
"exists",
"(",
"CXF_DEFAULT_LOG_DIR",
")",
":",
"pri... | Prune training log dirs contained in the given dir. The function is accessible through cxflow CLI `cxflow prune`.
:param dir_: dir to be pruned
:param epochs: minimum number of finished epochs to keep the training logs
:param subdirs: delete subdirs in training log dirs | [
"Prune",
"training",
"log",
"dirs",
"contained",
"in",
"the",
"given",
"dir",
".",
"The",
"function",
"is",
"accessible",
"through",
"cxflow",
"CLI",
"cxflow",
"prune",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/cli/prune.py#L55-L75 | train | 36,567 |
Cognexa/cxflow | cxflow/models/sequence.py | Sequence.output_names | def output_names(self) -> Iterable[str]:
"""List of model output names."""
self._load_models()
return chain.from_iterable(map(lambda m: m.output_names, self._models)) | python | def output_names(self) -> Iterable[str]:
"""List of model output names."""
self._load_models()
return chain.from_iterable(map(lambda m: m.output_names, self._models)) | [
"def",
"output_names",
"(",
"self",
")",
"->",
"Iterable",
"[",
"str",
"]",
":",
"self",
".",
"_load_models",
"(",
")",
"return",
"chain",
".",
"from_iterable",
"(",
"map",
"(",
"lambda",
"m",
":",
"m",
".",
"output_names",
",",
"self",
".",
"_models",... | List of model output names. | [
"List",
"of",
"model",
"output",
"names",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/models/sequence.py#L89-L92 | train | 36,568 |
Cognexa/cxflow | cxflow/models/sequence.py | Sequence.run | def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:
"""
Run all the models in-order and return accumulated outputs.
N-th model is fed with the original inputs and outputs of all the models that were run before it.
.. warning::
:py:class:`Sequence` model can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: accumulated model outputs
:raise ValueError: if the ``train`` flag is set to ``True``
"""
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models in-order
current_batch = dict(copy.deepcopy(batch))
for model in self._models:
current_batch.update(model.run(current_batch, False, None))
return {key: current_batch[key] for key in self.output_names} | python | def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch:
"""
Run all the models in-order and return accumulated outputs.
N-th model is fed with the original inputs and outputs of all the models that were run before it.
.. warning::
:py:class:`Sequence` model can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: accumulated model outputs
:raise ValueError: if the ``train`` flag is set to ``True``
"""
if train:
raise ValueError('Ensemble model cannot be trained.')
self._load_models()
# run all the models in-order
current_batch = dict(copy.deepcopy(batch))
for model in self._models:
current_batch.update(model.run(current_batch, False, None))
return {key: current_batch[key] for key in self.output_names} | [
"def",
"run",
"(",
"self",
",",
"batch",
":",
"Batch",
",",
"train",
":",
"bool",
"=",
"False",
",",
"stream",
":",
"StreamWrapper",
"=",
"None",
")",
"->",
"Batch",
":",
"if",
"train",
":",
"raise",
"ValueError",
"(",
"'Ensemble model cannot be trained.'"... | Run all the models in-order and return accumulated outputs.
N-th model is fed with the original inputs and outputs of all the models that were run before it.
.. warning::
:py:class:`Sequence` model can not be trained.
:param batch: batch to be processed
:param train: ``True`` if this batch should be used for model update, ``False`` otherwise
:param stream: stream wrapper (useful for precise buffer management)
:return: accumulated model outputs
:raise ValueError: if the ``train`` flag is set to ``True`` | [
"Run",
"all",
"the",
"models",
"in",
"-",
"order",
"and",
"return",
"accumulated",
"outputs",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/models/sequence.py#L94-L118 | train | 36,569 |
Cognexa/cxflow | cxflow/hooks/log_variables.py | LogVariables._log_variables | def _log_variables(self, epoch_data: EpochData):
"""
Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be logged was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if np.isscalar(value):
logging.info('\t%s %s: %f', stream_name, variable, value)
elif isinstance(value, dict):
keys = list(value.keys())
if len(keys) == 1:
logging.info('\t%s %s %s: %f', stream_name, variable, keys[0], value[keys[0]])
else:
logging.info('\t%s %s:', stream_name, variable)
for key, val in value.items():
logging.info('\t\t%s: %f', key, val)
else:
if self._on_unknown_type == 'error':
raise TypeError('Variable type `{}` can not be logged. Variable name: `{}`.'
.format(type(value).__name__, variable))
elif self._on_unknown_type == 'warn':
logging.warning('Variable type `%s` can not be logged. Variable name: `%s`.',
type(value).__name__, variable)
elif self._on_unknown_type == 'str':
logging.info('\t%s %s: %s', stream_name, variable, value) | python | def _log_variables(self, epoch_data: EpochData):
"""
Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error``
"""
for stream_name in epoch_data.keys():
stream_data = epoch_data[stream_name]
variables = self._variables if self._variables is not None else stream_data.keys()
for variable in variables:
if variable not in stream_data:
raise KeyError('Variable `{}` to be logged was not found in the batch data for stream `{}`. '
'Available variables are `{}`.'.format(variable, stream_name, stream_data.keys()))
value = stream_data[variable]
if np.isscalar(value):
logging.info('\t%s %s: %f', stream_name, variable, value)
elif isinstance(value, dict):
keys = list(value.keys())
if len(keys) == 1:
logging.info('\t%s %s %s: %f', stream_name, variable, keys[0], value[keys[0]])
else:
logging.info('\t%s %s:', stream_name, variable)
for key, val in value.items():
logging.info('\t\t%s: %f', key, val)
else:
if self._on_unknown_type == 'error':
raise TypeError('Variable type `{}` can not be logged. Variable name: `{}`.'
.format(type(value).__name__, variable))
elif self._on_unknown_type == 'warn':
logging.warning('Variable type `%s` can not be logged. Variable name: `%s`.',
type(value).__name__, variable)
elif self._on_unknown_type == 'str':
logging.info('\t%s %s: %s', stream_name, variable, value) | [
"def",
"_log_variables",
"(",
"self",
",",
"epoch_data",
":",
"EpochData",
")",
":",
"for",
"stream_name",
"in",
"epoch_data",
".",
"keys",
"(",
")",
":",
"stream_data",
"=",
"epoch_data",
"[",
"stream_name",
"]",
"variables",
"=",
"self",
".",
"_variables",... | Log variables from the epoch data.
.. warning::
At the moment, only scalars and dicts of scalars are properly formatted and logged.
Other value types are ignored by default.
One may set ``on_unknown_type`` to ``str`` in order to log all the variables anyways.
:param epoch_data: epoch data to be logged
:raise KeyError: if the specified variable is not found in the stream
:raise TypeError: if the variable value is of unsupported type and ``self._on_unknown_type`` is set to ``error`` | [
"Log",
"variables",
"from",
"the",
"epoch",
"data",
"."
] | dd609e6b0bd854424a8f86781dd77801a13038f9 | https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/log_variables.py#L56-L96 | train | 36,570 |
hall-lab/svtyper | svtyper/parsers.py | SplitRead.get_reference_end_from_cigar | def get_reference_end_from_cigar(reference_start, cigar):
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end | python | def get_reference_end_from_cigar(reference_start, cigar):
'''
This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method
'''
reference_end = reference_start
# iterate through cigartuple
for i in xrange(len(cigar)):
k, n = cigar[i]
if k in (0,2,3,7,8): # M, D, N, =, X
reference_end += n
return reference_end | [
"def",
"get_reference_end_from_cigar",
"(",
"reference_start",
",",
"cigar",
")",
":",
"reference_end",
"=",
"reference_start",
"# iterate through cigartuple",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"cigar",
")",
")",
":",
"k",
",",
"n",
"=",
"cigar",
"[... | This returns the coordinate just past the last aligned base.
This matches the behavior of pysam's reference_end method | [
"This",
"returns",
"the",
"coordinate",
"just",
"past",
"the",
"last",
"aligned",
"base",
".",
"This",
"matches",
"the",
"behavior",
"of",
"pysam",
"s",
"reference_end",
"method"
] | 5fc30763fd3025793ee712a563de800c010f6bea | https://github.com/hall-lab/svtyper/blob/5fc30763fd3025793ee712a563de800c010f6bea/svtyper/parsers.py#L1089-L1101 | train | 36,571 |
hall-lab/svtyper | svtyper/parsers.py | SplitRead.set_order_by_clip | def set_order_by_clip(self, a, b):
'''
Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation
'''
if self.is_left_clip(a.cigar):
self.query_left = b
self.query_right = a
else:
self.query_left = a
self.query_right = b | python | def set_order_by_clip(self, a, b):
'''
Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation
'''
if self.is_left_clip(a.cigar):
self.query_left = b
self.query_right = a
else:
self.query_left = a
self.query_right = b | [
"def",
"set_order_by_clip",
"(",
"self",
",",
"a",
",",
"b",
")",
":",
"if",
"self",
".",
"is_left_clip",
"(",
"a",
".",
"cigar",
")",
":",
"self",
".",
"query_left",
"=",
"b",
"self",
".",
"query_right",
"=",
"a",
"else",
":",
"self",
".",
"query_... | Determine which SplitPiece is the leftmost based
on the side of the longest clipping operation | [
"Determine",
"which",
"SplitPiece",
"is",
"the",
"leftmost",
"based",
"on",
"the",
"side",
"of",
"the",
"longest",
"clipping",
"operation"
] | 5fc30763fd3025793ee712a563de800c010f6bea | https://github.com/hall-lab/svtyper/blob/5fc30763fd3025793ee712a563de800c010f6bea/svtyper/parsers.py#L1230-L1240 | train | 36,572 |
uber/tchannel-python | tchannel/tornado/request.py | Request._is_streaming_request | def _is_streaming_request(self):
"""check request is stream request or not"""
arg2 = self.argstreams[1]
arg3 = self.argstreams[2]
return not (isinstance(arg2, InMemStream) and
isinstance(arg3, InMemStream) and
((arg2.auto_close and arg3.auto_close) or (
arg2.state == StreamState.completed and
arg3.state == StreamState.completed))) | python | def _is_streaming_request(self):
"""check request is stream request or not"""
arg2 = self.argstreams[1]
arg3 = self.argstreams[2]
return not (isinstance(arg2, InMemStream) and
isinstance(arg3, InMemStream) and
((arg2.auto_close and arg3.auto_close) or (
arg2.state == StreamState.completed and
arg3.state == StreamState.completed))) | [
"def",
"_is_streaming_request",
"(",
"self",
")",
":",
"arg2",
"=",
"self",
".",
"argstreams",
"[",
"1",
"]",
"arg3",
"=",
"self",
".",
"argstreams",
"[",
"2",
"]",
"return",
"not",
"(",
"isinstance",
"(",
"arg2",
",",
"InMemStream",
")",
"and",
"isins... | check request is stream request or not | [
"check",
"request",
"is",
"stream",
"request",
"or",
"not"
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/request.py#L155-L163 | train | 36,573 |
uber/tchannel-python | tchannel/tornado/request.py | Request.should_retry_on_error | def should_retry_on_error(self, error):
"""rules for retry
:param error:
ProtocolException that returns from Server
"""
if self.is_streaming_request:
# not retry for streaming request
return False
retry_flag = self.headers.get('re', retry.DEFAULT)
if retry_flag == retry.NEVER:
return False
if isinstance(error, StreamClosedError):
return True
if error.code in [ErrorCode.bad_request, ErrorCode.cancelled,
ErrorCode.unhealthy]:
return False
elif error.code in [ErrorCode.busy, ErrorCode.declined]:
return True
elif error.code is ErrorCode.timeout:
return retry_flag is not retry.CONNECTION_ERROR
elif error.code in [ErrorCode.network_error,
ErrorCode.fatal,
ErrorCode.unexpected]:
return retry_flag is not retry.TIMEOUT
else:
return False | python | def should_retry_on_error(self, error):
"""rules for retry
:param error:
ProtocolException that returns from Server
"""
if self.is_streaming_request:
# not retry for streaming request
return False
retry_flag = self.headers.get('re', retry.DEFAULT)
if retry_flag == retry.NEVER:
return False
if isinstance(error, StreamClosedError):
return True
if error.code in [ErrorCode.bad_request, ErrorCode.cancelled,
ErrorCode.unhealthy]:
return False
elif error.code in [ErrorCode.busy, ErrorCode.declined]:
return True
elif error.code is ErrorCode.timeout:
return retry_flag is not retry.CONNECTION_ERROR
elif error.code in [ErrorCode.network_error,
ErrorCode.fatal,
ErrorCode.unexpected]:
return retry_flag is not retry.TIMEOUT
else:
return False | [
"def",
"should_retry_on_error",
"(",
"self",
",",
"error",
")",
":",
"if",
"self",
".",
"is_streaming_request",
":",
"# not retry for streaming request",
"return",
"False",
"retry_flag",
"=",
"self",
".",
"headers",
".",
"get",
"(",
"'re'",
",",
"retry",
".",
... | rules for retry
:param error:
ProtocolException that returns from Server | [
"rules",
"for",
"retry"
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/request.py#L165-L196 | train | 36,574 |
uber/tchannel-python | tchannel/sync/thrift.py | client_for | def client_for(service, service_module, thrift_service_name=None):
"""Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ = '__init__'
methods = {
'__init__': init,
'__async_client_class__': async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + 'Client', (object,), methods) | python | def client_for(service, service_module, thrift_service_name=None):
"""Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def init(
self,
tchannel,
hostport=None,
trace=False,
protocol_headers=None,
):
self.async_thrift = self.__async_client_class__(
tchannel=tchannel,
hostport=hostport,
trace=trace,
protocol_headers=protocol_headers,
)
self.threadloop = tchannel._threadloop
init.__name__ = '__init__'
methods = {
'__init__': init,
'__async_client_class__': async_client_for(
service=service,
service_module=service_module,
thrift_service_name=thrift_service_name,
)
}
methods.update({
method_name: generate_method(method_name)
for method_name in method_names
})
return type(thrift_service_name + 'Client', (object,), methods) | [
"def",
"client_for",
"(",
"service",
",",
"service_module",
",",
"thrift_service_name",
"=",
"None",
")",
":",
"assert",
"service_module",
",",
"'service_module is required'",
"service",
"=",
"service",
"or",
"''",
"# may be blank for non-hyperbahn use cases",
"if",
"no... | Build a synchronous client class for the given Thrift service.
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered
with Hyperbahn under the name "comment", here's how this might be used:
.. code-block:: python
from tchannel.sync import TChannelSyncClient
from tchannel.sync.thrift import client_for
from comment import CommentService
CommentServiceClient = client_for('comment', CommentService)
tchannel_sync = TChannelSyncClient('my-service')
comment_client = CommentServiceClient(tchannel_sync)
future = comment_client.postComment(
articleId,
CommentService.Comment("hi")
)
result = future.result()
:param service:
Name of the Hyperbahn service being called.
:param service_module:
The Thrift-generated module for that service. This usually has
the same name as definied for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use
this parameter to specify it.
:returns:
An Thrift-like class, ready to be instantiated and used
with TChannelSyncClient. | [
"Build",
"a",
"synchronous",
"client",
"class",
"for",
"the",
"given",
"Thrift",
"service",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/sync/thrift.py#L27-L106 | train | 36,575 |
uber/tchannel-python | tchannel/sync/thrift.py | generate_method | def generate_method(method_name):
"""Generate a method for a given Thrift service.
Uses the provided TChannelSyncClient's threadloop in order
to convert RPC calls to concurrent.futures
:param method_name: Method being called.
:return: A method that invokes the RPC using TChannelSyncClient
"""
def call(self, *args, **kwargs):
"""Forward RPC call to TChannelSyncClient
:return concurrent.futures.Future:
"""
if not self.threadloop.is_ready():
self.threadloop.start()
return self.threadloop.submit(
getattr(self.async_thrift, method_name), *args, **kwargs
)
return call | python | def generate_method(method_name):
"""Generate a method for a given Thrift service.
Uses the provided TChannelSyncClient's threadloop in order
to convert RPC calls to concurrent.futures
:param method_name: Method being called.
:return: A method that invokes the RPC using TChannelSyncClient
"""
def call(self, *args, **kwargs):
"""Forward RPC call to TChannelSyncClient
:return concurrent.futures.Future:
"""
if not self.threadloop.is_ready():
self.threadloop.start()
return self.threadloop.submit(
getattr(self.async_thrift, method_name), *args, **kwargs
)
return call | [
"def",
"generate_method",
"(",
"method_name",
")",
":",
"def",
"call",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Forward RPC call to TChannelSyncClient\n\n :return concurrent.futures.Future:\n \"\"\"",
"if",
"not",
"self",
".... | Generate a method for a given Thrift service.
Uses the provided TChannelSyncClient's threadloop in order
to convert RPC calls to concurrent.futures
:param method_name: Method being called.
:return: A method that invokes the RPC using TChannelSyncClient | [
"Generate",
"a",
"method",
"for",
"a",
"given",
"Thrift",
"service",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/sync/thrift.py#L109-L131 | train | 36,576 |
uber/tchannel-python | tchannel/tornado/stream.py | read_full | def read_full(stream):
"""Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents.
"""
assert stream, "stream is required"
chunks = []
chunk = yield stream.read()
while chunk:
chunks.append(chunk)
chunk = yield stream.read()
raise tornado.gen.Return(b''.join(chunks)) | python | def read_full(stream):
"""Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents.
"""
assert stream, "stream is required"
chunks = []
chunk = yield stream.read()
while chunk:
chunks.append(chunk)
chunk = yield stream.read()
raise tornado.gen.Return(b''.join(chunks)) | [
"def",
"read_full",
"(",
"stream",
")",
":",
"assert",
"stream",
",",
"\"stream is required\"",
"chunks",
"=",
"[",
"]",
"chunk",
"=",
"yield",
"stream",
".",
"read",
"(",
")",
"while",
"chunk",
":",
"chunks",
".",
"append",
"(",
"chunk",
")",
"chunk",
... | Read the full contents of the given stream into memory.
:return:
A future containing the complete stream contents. | [
"Read",
"the",
"full",
"contents",
"of",
"the",
"given",
"stream",
"into",
"memory",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/stream.py#L37-L52 | train | 36,577 |
uber/tchannel-python | tchannel/tornado/stream.py | maybe_stream | def maybe_stream(s):
"""Ensure that the given argument is a stream."""
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close() # we don't intend to write anything
return stream
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(s, bytearray):
s = bytes(s)
if isinstance(s, bytes):
stream = InMemStream(s)
stream.close() # we don't intend to write anything
return stream
# s may still conform to the Stream interface. Yay duck typing.
return s | python | def maybe_stream(s):
"""Ensure that the given argument is a stream."""
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close() # we don't intend to write anything
return stream
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(s, bytearray):
s = bytes(s)
if isinstance(s, bytes):
stream = InMemStream(s)
stream.close() # we don't intend to write anything
return stream
# s may still conform to the Stream interface. Yay duck typing.
return s | [
"def",
"maybe_stream",
"(",
"s",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"Stream",
")",
":",
"return",
"s",
"if",
"s",
"is",
"None",
":",
"stream",
"=",
"InMemStream",
"(",
")",
"stream",
".",
"close",
"(",
")",
"# we don't intend to write anything"... | Ensure that the given argument is a stream. | [
"Ensure",
"that",
"the",
"given",
"argument",
"is",
"a",
"stream",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/stream.py#L259-L280 | train | 36,578 |
uber/tchannel-python | tchannel/tornado/message_factory.py | build_raw_error_message | def build_raw_error_message(protocol_exception):
"""build protocol level error message based on Error object"""
message = ErrorMessage(
id=protocol_exception.id,
code=protocol_exception.code,
tracing=protocol_exception.tracing,
description=protocol_exception.description,
)
return message | python | def build_raw_error_message(protocol_exception):
"""build protocol level error message based on Error object"""
message = ErrorMessage(
id=protocol_exception.id,
code=protocol_exception.code,
tracing=protocol_exception.tracing,
description=protocol_exception.description,
)
return message | [
"def",
"build_raw_error_message",
"(",
"protocol_exception",
")",
":",
"message",
"=",
"ErrorMessage",
"(",
"id",
"=",
"protocol_exception",
".",
"id",
",",
"code",
"=",
"protocol_exception",
".",
"code",
",",
"tracing",
"=",
"protocol_exception",
".",
"tracing",
... | build protocol level error message based on Error object | [
"build",
"protocol",
"level",
"error",
"message",
"based",
"on",
"Error",
"object"
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L49-L58 | train | 36,579 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.build_raw_request_message | def build_raw_request_message(self, request, args, is_completed=False):
"""build protocol level message based on request and args.
request object contains meta information about outgoing request.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param request: Request
:param args: array of arg streams
:param is_completed: message flags
:return: CallRequestMessage/CallRequestContinueMessage
"""
request.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if request.state == StreamState.init:
message = CallRequestMessage(
flags=request.flags,
ttl=request.ttl * 1000,
tracing=request.tracing,
service=request.service,
headers=request.headers,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif request.state == StreamState.streaming:
message = CallRequestContinueMessage(
flags=request.flags,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = request.id
return message | python | def build_raw_request_message(self, request, args, is_completed=False):
"""build protocol level message based on request and args.
request object contains meta information about outgoing request.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param request: Request
:param args: array of arg streams
:param is_completed: message flags
:return: CallRequestMessage/CallRequestContinueMessage
"""
request.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if request.state == StreamState.init:
message = CallRequestMessage(
flags=request.flags,
ttl=request.ttl * 1000,
tracing=request.tracing,
service=request.service,
headers=request.headers,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif request.state == StreamState.streaming:
message = CallRequestContinueMessage(
flags=request.flags,
checksum=request.checksum,
args=args
)
request.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = request.id
return message | [
"def",
"build_raw_request_message",
"(",
"self",
",",
"request",
",",
"args",
",",
"is_completed",
"=",
"False",
")",
":",
"request",
".",
"flags",
"=",
"FlagsType",
".",
"none",
"if",
"is_completed",
"else",
"FlagsType",
".",
"fragment",
"# TODO decide what nee... | build protocol level message based on request and args.
request object contains meta information about outgoing request.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param request: Request
:param args: array of arg streams
:param is_completed: message flags
:return: CallRequestMessage/CallRequestContinueMessage | [
"build",
"protocol",
"level",
"message",
"based",
"on",
"request",
"and",
"args",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L76-L113 | train | 36,580 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.build_raw_response_message | def build_raw_response_message(self, response, args, is_completed=False):
"""build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage
"""
response.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if response.state == StreamState.init:
message = CallResponseMessage(
flags=response.flags,
code=response.code,
tracing=response.tracing,
headers=response.headers,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif response.state == StreamState.streaming:
message = CallResponseContinueMessage(
flags=response.flags,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = response.id
return message | python | def build_raw_response_message(self, response, args, is_completed=False):
"""build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage
"""
response.flags = FlagsType.none if is_completed else FlagsType.fragment
# TODO decide what need to pass from request
if response.state == StreamState.init:
message = CallResponseMessage(
flags=response.flags,
code=response.code,
tracing=response.tracing,
headers=response.headers,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
elif response.state == StreamState.streaming:
message = CallResponseContinueMessage(
flags=response.flags,
checksum=response.checksum,
args=args
)
response.state = (StreamState.completed if is_completed
else StreamState.streaming)
message.id = response.id
return message | [
"def",
"build_raw_response_message",
"(",
"self",
",",
"response",
",",
"args",
",",
"is_completed",
"=",
"False",
")",
":",
"response",
".",
"flags",
"=",
"FlagsType",
".",
"none",
"if",
"is_completed",
"else",
"FlagsType",
".",
"fragment",
"# TODO decide what ... | build protocol level message based on response and args.
response object contains meta information about outgoing response.
args are the currently chunk data from argstreams
is_completed tells the flags of the message
:param response: Response
:param args: array of arg streams
:param is_completed: message flags
:return: CallResponseMessage/CallResponseContinueMessage | [
"build",
"protocol",
"level",
"message",
"based",
"on",
"response",
"and",
"args",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L115-L151 | train | 36,581 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.build_request | def build_request(self, message):
"""Build inbound request object from protocol level message info.
It is allowed to take incompleted CallRequestMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallRequestMessage
:return: request object
"""
args = self.prepare_args(message)
# TODO decide what to pass to Request from message
req = Request(
flags=message.flags,
ttl=message.ttl / 1000.0,
tracing=message.tracing,
service=message.service,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return req | python | def build_request(self, message):
"""Build inbound request object from protocol level message info.
It is allowed to take incompleted CallRequestMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallRequestMessage
:return: request object
"""
args = self.prepare_args(message)
# TODO decide what to pass to Request from message
req = Request(
flags=message.flags,
ttl=message.ttl / 1000.0,
tracing=message.tracing,
service=message.service,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return req | [
"def",
"build_request",
"(",
"self",
",",
"message",
")",
":",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"message",
")",
"# TODO decide what to pass to Request from message",
"req",
"=",
"Request",
"(",
"flags",
"=",
"message",
".",
"flags",
",",
"ttl",
"... | Build inbound request object from protocol level message info.
It is allowed to take incompleted CallRequestMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallRequestMessage
:return: request object | [
"Build",
"inbound",
"request",
"object",
"from",
"protocol",
"level",
"message",
"info",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L172-L195 | train | 36,582 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.build_response | def build_response(self, message):
"""Build response object from protocol level message info
It is allowed to take incompleted CallResponseMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallResponseMessage
:return: response object
"""
args = self.prepare_args(message)
# TODO decide what to pass to Response from message
res = Response(
flags=message.flags,
code=message.code,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return res | python | def build_response(self, message):
"""Build response object from protocol level message info
It is allowed to take incompleted CallResponseMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallResponseMessage
:return: response object
"""
args = self.prepare_args(message)
# TODO decide what to pass to Response from message
res = Response(
flags=message.flags,
code=message.code,
headers=message.headers,
checksum=message.checksum,
argstreams=args,
id=message.id,
)
return res | [
"def",
"build_response",
"(",
"self",
",",
"message",
")",
":",
"args",
"=",
"self",
".",
"prepare_args",
"(",
"message",
")",
"# TODO decide what to pass to Response from message",
"res",
"=",
"Response",
"(",
"flags",
"=",
"message",
".",
"flags",
",",
"code",... | Build response object from protocol level message info
It is allowed to take incompleted CallResponseMessage. Therefore the
created request may not contain whole three arguments.
:param message: CallResponseMessage
:return: response object | [
"Build",
"response",
"object",
"from",
"protocol",
"level",
"message",
"info"
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L197-L218 | train | 36,583 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.build | def build(self, message):
"""buffer all the streaming messages based on the
message id. Reconstruct all fragments together.
:param message:
incoming message
:return: next complete message or None if streaming
is not done
"""
context = None
if message.message_type in [Types.CALL_REQ,
Types.CALL_RES]:
self.verify_message(message)
context = self.build_context(message)
# streaming message
if message.flags == common.FlagsType.fragment:
self.message_buffer[message.id] = context
# find the incompleted stream
num = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
num = i
break
self.close_argstream(context, num)
return context
elif message.message_type in [Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
context = self.message_buffer.get(message.id)
if context is None:
# missing call msg before continue msg
raise FatalProtocolError(
"missing call message after receiving continue message",
message.id,
)
# find the incompleted stream
dst = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
dst = i
break
try:
self.verify_message(message)
except InvalidChecksumError as e:
context.argstreams[dst].set_exception(e)
raise
src = 0
while src < len(message.args):
context.argstreams[dst].write(message.args[src])
dst += 1
src += 1
if message.flags != FlagsType.fragment:
# get last fragment. mark it as completed
assert (len(context.argstreams) ==
CallContinueMessage.max_args_num)
self.message_buffer.pop(message.id, None)
context.flags = FlagsType.none
self.close_argstream(context, dst - 1)
return None
elif message.message_type == Types.ERROR:
context = self.message_buffer.pop(message.id, None)
if context is None:
log.info('Unconsumed error %s', message)
return None
else:
error = TChannelError.from_code(
message.code,
description=message.description,
tracing=context.tracing,
)
context.set_exception(error)
return error
else:
return message | python | def build(self, message):
"""buffer all the streaming messages based on the
message id. Reconstruct all fragments together.
:param message:
incoming message
:return: next complete message or None if streaming
is not done
"""
context = None
if message.message_type in [Types.CALL_REQ,
Types.CALL_RES]:
self.verify_message(message)
context = self.build_context(message)
# streaming message
if message.flags == common.FlagsType.fragment:
self.message_buffer[message.id] = context
# find the incompleted stream
num = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
num = i
break
self.close_argstream(context, num)
return context
elif message.message_type in [Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
context = self.message_buffer.get(message.id)
if context is None:
# missing call msg before continue msg
raise FatalProtocolError(
"missing call message after receiving continue message",
message.id,
)
# find the incompleted stream
dst = 0
for i, arg in enumerate(context.argstreams):
if arg.state != StreamState.completed:
dst = i
break
try:
self.verify_message(message)
except InvalidChecksumError as e:
context.argstreams[dst].set_exception(e)
raise
src = 0
while src < len(message.args):
context.argstreams[dst].write(message.args[src])
dst += 1
src += 1
if message.flags != FlagsType.fragment:
# get last fragment. mark it as completed
assert (len(context.argstreams) ==
CallContinueMessage.max_args_num)
self.message_buffer.pop(message.id, None)
context.flags = FlagsType.none
self.close_argstream(context, dst - 1)
return None
elif message.message_type == Types.ERROR:
context = self.message_buffer.pop(message.id, None)
if context is None:
log.info('Unconsumed error %s', message)
return None
else:
error = TChannelError.from_code(
message.code,
description=message.description,
tracing=context.tracing,
)
context.set_exception(error)
return error
else:
return message | [
"def",
"build",
"(",
"self",
",",
"message",
")",
":",
"context",
"=",
"None",
"if",
"message",
".",
"message_type",
"in",
"[",
"Types",
".",
"CALL_REQ",
",",
"Types",
".",
"CALL_RES",
"]",
":",
"self",
".",
"verify_message",
"(",
"message",
")",
"cont... | buffer all the streaming messages based on the
message id. Reconstruct all fragments together.
:param message:
incoming message
:return: next complete message or None if streaming
is not done | [
"buffer",
"all",
"the",
"streaming",
"messages",
"based",
"on",
"the",
"message",
"id",
".",
"Reconstruct",
"all",
"fragments",
"together",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L226-L309 | train | 36,584 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.fragment | def fragment(self, message):
"""Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size
"""
if message.message_type in [Types.CALL_RES,
Types.CALL_REQ,
Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
# split a call/request message into an array
# with a call/request message and {0~n} continue
# message
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
while fragment_msg is not None:
message = fragment_msg
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
else:
yield message | python | def fragment(self, message):
"""Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size
"""
if message.message_type in [Types.CALL_RES,
Types.CALL_REQ,
Types.CALL_REQ_CONTINUE,
Types.CALL_RES_CONTINUE]:
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
# split a call/request message into an array
# with a call/request message and {0~n} continue
# message
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
while fragment_msg is not None:
message = fragment_msg
rw = RW[message.message_type]
payload_space = (common.MAX_PAYLOAD_SIZE -
rw.length_no_args(message))
fragment_msg = message.fragment(payload_space)
self.generate_checksum(message)
yield message
else:
yield message | [
"def",
"fragment",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"message_type",
"in",
"[",
"Types",
".",
"CALL_RES",
",",
"Types",
".",
"CALL_REQ",
",",
"Types",
".",
"CALL_REQ_CONTINUE",
",",
"Types",
".",
"CALL_RES_CONTINUE",
"]",
":",
... | Fragment message based on max payload size
note: if the message doesn't need to fragment,
it will return a list which only contains original
message itself.
:param message: raw message
:return: list of messages whose sizes <= max
payload size | [
"Fragment",
"message",
"based",
"on",
"max",
"payload",
"size"
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L311-L345 | train | 36,585 |
uber/tchannel-python | tchannel/tornado/message_factory.py | MessageFactory.verify_message | def verify_message(self, message):
"""Verify the checksum of the message."""
if verify_checksum(
message,
self.in_checksum.get(message.id, 0),
):
self.in_checksum[message.id] = message.checksum[1]
if message.flags == FlagsType.none:
self.in_checksum.pop(message.id)
else:
self.in_checksum.pop(message.id, None)
raise InvalidChecksumError(
description="Checksum does not match!",
id=message.id,
) | python | def verify_message(self, message):
"""Verify the checksum of the message."""
if verify_checksum(
message,
self.in_checksum.get(message.id, 0),
):
self.in_checksum[message.id] = message.checksum[1]
if message.flags == FlagsType.none:
self.in_checksum.pop(message.id)
else:
self.in_checksum.pop(message.id, None)
raise InvalidChecksumError(
description="Checksum does not match!",
id=message.id,
) | [
"def",
"verify_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"verify_checksum",
"(",
"message",
",",
"self",
".",
"in_checksum",
".",
"get",
"(",
"message",
".",
"id",
",",
"0",
")",
",",
")",
":",
"self",
".",
"in_checksum",
"[",
"message",
... | Verify the checksum of the message. | [
"Verify",
"the",
"checksum",
"of",
"the",
"message",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/message_factory.py#L359-L374 | train | 36,586 |
uber/tchannel-python | tchannel/rw.py | chain | def chain(*rws):
"""Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters
"""
assert rws is not None
if len(rws) == 1 and isinstance(rws[0], list):
# In case someone does chain([l0, l1, ...])
rws = rws[0]
return ChainReadWriter(rws) | python | def chain(*rws):
"""Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters
"""
assert rws is not None
if len(rws) == 1 and isinstance(rws[0], list):
# In case someone does chain([l0, l1, ...])
rws = rws[0]
return ChainReadWriter(rws) | [
"def",
"chain",
"(",
"*",
"rws",
")",
":",
"assert",
"rws",
"is",
"not",
"None",
"if",
"len",
"(",
"rws",
")",
"==",
"1",
"and",
"isinstance",
"(",
"rws",
"[",
"0",
"]",
",",
"list",
")",
":",
"# In case someone does chain([l0, l1, ...])",
"rws",
"=",
... | Build a ReadWriter from the given list of ReadWriters.
.. code-block:: python
chain(
number(1),
number(8),
len_prefixed_string(number(2)),
) # == n1:1 n2:8 s~2
Reads/writes from the given ReadWriters in-order. Returns lists of values
in the same order as the ReadWriters.
:param rws:
One or more ReadWriters | [
"Build",
"a",
"ReadWriter",
"from",
"the",
"given",
"list",
"of",
"ReadWriters",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/rw.py#L82-L103 | train | 36,587 |
uber/tchannel-python | tchannel/rw.py | ReadWriter.take | def take(self, stream, num):
"""Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected
"""
s = stream.read(num)
slen = len(s)
if slen != num:
raise ReadError(
"Expected %d bytes but got %d bytes." % (num, slen)
)
return s | python | def take(self, stream, num):
"""Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected
"""
s = stream.read(num)
slen = len(s)
if slen != num:
raise ReadError(
"Expected %d bytes but got %d bytes." % (num, slen)
)
return s | [
"def",
"take",
"(",
"self",
",",
"stream",
",",
"num",
")",
":",
"s",
"=",
"stream",
".",
"read",
"(",
"num",
")",
"slen",
"=",
"len",
"(",
"s",
")",
"if",
"slen",
"!=",
"num",
":",
"raise",
"ReadError",
"(",
"\"Expected %d bytes but got %d bytes.\"",
... | Read the given number of bytes from the stream.
:param stream:
stream to read from
:param num:
number of bytes to read
:raises ReadError:
if the stream did not yield the exact number of bytes expected | [
"Read",
"the",
"given",
"number",
"of",
"bytes",
"from",
"the",
"stream",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/rw.py#L266-L282 | train | 36,588 |
uber/tchannel-python | tchannel/thrift/reflection.py | get_service_methods | def get_service_methods(iface):
"""Get a list of methods defined in the interface for a Thrift service.
:param iface:
The Thrift-generated Iface class defining the interface for the
service.
:returns:
A set containing names of the methods defined for the service.
"""
methods = inspect.getmembers(iface, predicate=inspect.ismethod)
return set(
name for (name, method) in methods if not name.startswith('__')
) | python | def get_service_methods(iface):
"""Get a list of methods defined in the interface for a Thrift service.
:param iface:
The Thrift-generated Iface class defining the interface for the
service.
:returns:
A set containing names of the methods defined for the service.
"""
methods = inspect.getmembers(iface, predicate=inspect.ismethod)
return set(
name for (name, method) in methods if not name.startswith('__')
) | [
"def",
"get_service_methods",
"(",
"iface",
")",
":",
"methods",
"=",
"inspect",
".",
"getmembers",
"(",
"iface",
",",
"predicate",
"=",
"inspect",
".",
"ismethod",
")",
"return",
"set",
"(",
"name",
"for",
"(",
"name",
",",
"method",
")",
"in",
"methods... | Get a list of methods defined in the interface for a Thrift service.
:param iface:
The Thrift-generated Iface class defining the interface for the
service.
:returns:
A set containing names of the methods defined for the service. | [
"Get",
"a",
"list",
"of",
"methods",
"defined",
"in",
"the",
"interface",
"for",
"a",
"Thrift",
"service",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/thrift/reflection.py#L27-L40 | train | 36,589 |
uber/tchannel-python | tchannel/deprecate.py | deprecate | def deprecate(message):
"""Loudly prints warning."""
warnings.simplefilter('default')
warnings.warn(message, category=DeprecationWarning)
warnings.resetwarnings() | python | def deprecate(message):
"""Loudly prints warning."""
warnings.simplefilter('default')
warnings.warn(message, category=DeprecationWarning)
warnings.resetwarnings() | [
"def",
"deprecate",
"(",
"message",
")",
":",
"warnings",
".",
"simplefilter",
"(",
"'default'",
")",
"warnings",
".",
"warn",
"(",
"message",
",",
"category",
"=",
"DeprecationWarning",
")",
"warnings",
".",
"resetwarnings",
"(",
")"
] | Loudly prints warning. | [
"Loudly",
"prints",
"warning",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/deprecate.py#L29-L33 | train | 36,590 |
uber/tchannel-python | tchannel/deprecate.py | deprecated | def deprecated(message):
"""Warn every time a fn is called."""
def decorator(fn):
@functools.wraps(fn)
def new_fn(*args, **kwargs):
deprecate(message)
return fn(*args, **kwargs)
return new_fn
return decorator | python | def deprecated(message):
"""Warn every time a fn is called."""
def decorator(fn):
@functools.wraps(fn)
def new_fn(*args, **kwargs):
deprecate(message)
return fn(*args, **kwargs)
return new_fn
return decorator | [
"def",
"deprecated",
"(",
"message",
")",
":",
"def",
"decorator",
"(",
"fn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fn",
")",
"def",
"new_fn",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"deprecate",
"(",
"message",
")",
"return"... | Warn every time a fn is called. | [
"Warn",
"every",
"time",
"a",
"fn",
"is",
"called",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/deprecate.py#L36-L44 | train | 36,591 |
uber/tchannel-python | tchannel/thrift/rw.py | load | def load(path, service=None, hostport=None, module_name=None):
"""Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file.
"""
# TODO replace with more specific exceptions
# assert service, 'service is required'
# assert path, 'path is required'
# Backwards compatibility for callers passing in service name as first arg.
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport) | python | def load(path, service=None, hostport=None, module_name=None):
"""Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file.
"""
# TODO replace with more specific exceptions
# assert service, 'service is required'
# assert path, 'path is required'
# Backwards compatibility for callers passing in service name as first arg.
if not path.endswith('.thrift'):
service, path = path, service
module = thriftrw.load(path=path, name=module_name)
return TChannelThriftModule(service, module, hostport) | [
"def",
"load",
"(",
"path",
",",
"service",
"=",
"None",
",",
"hostport",
"=",
"None",
",",
"module_name",
"=",
"None",
")",
":",
"# TODO replace with more specific exceptions",
"# assert service, 'service is required'",
"# assert path, 'path is required'",
"# Backwards com... | Loads the Thrift file at the specified path.
The file is compiled in-memory and a Python module containing the result
is returned. It may be used with ``TChannel.thrift``. For example,
.. code-block:: python
from tchannel import TChannel, thrift
# Load our server's interface definition.
donuts = thrift.load(path='donuts.thrift')
# We need to specify a service name or hostport because this is a
# downstream service we'll be calling.
coffee = thrift.load(path='coffee.thrift', service='coffee')
tchannel = TChannel('donuts')
@tchannel.thrift.register(donuts.DonutsService)
@tornado.gen.coroutine
def submitOrder(request):
args = request.body
if args.coffee:
yield tchannel.thrift(
coffee.CoffeeService.order(args.coffee)
)
# ...
The returned module contains, one top-level type for each struct, enum,
union, exeption, and service defined in the Thrift file. For each service,
the corresponding class contains a classmethod for each function defined
in that service that accepts the arguments for that function and returns a
``ThriftRequest`` capable of being sent via ``TChannel.thrift``.
For more information on what gets generated by ``load``, see `thriftrw
<http://thriftrw.readthedocs.org/en/latest/>`_.
Note that the ``path`` accepted by ``load`` must be either an absolute
path or a path relative to the *the current directory*. If you need to
refer to Thrift files relative to the Python module in which ``load`` was
called, use the ``__file__`` magic variable.
.. code-block:: python
# Given,
#
# foo/
# myservice.thrift
# bar/
# x.py
#
# Inside foo/bar/x.py,
path = os.path.join(
os.path.dirname(__file__), '../myservice.thrift'
)
The returned value is a valid Python module. You can install the module by
adding it to the ``sys.modules`` dictionary. This will allow importing
items from this module directly. You can use the ``__name__`` magic
variable to make the generated module a submodule of the current module.
For example,
.. code-block:: python
# foo/bar.py
import sys
from tchannel import thrift
donuts = = thrift.load('donuts.thrift')
sys.modules[__name__ + '.donuts'] = donuts
This installs the module generated for ``donuts.thrift`` as the module
``foo.bar.donuts``. Callers can then import items from that module
directly. For example,
.. code-block:: python
# foo/baz.py
from foo.bar.donuts import DonutsService, Order
def baz(tchannel):
return tchannel.thrift(
DonutsService.submitOrder(Order(..))
)
:param str service:
Name of the service that the Thrift file represents. This name will be
used to route requests through Hyperbahn.
:param str path:
Path to the Thrift file. If this is a relative path, it must be
relative to the current directory.
:param str hostport:
Clients can use this to specify the hostport at which the service can
be found. If omitted, TChannel will route the requests through known
peers. This value is ignored by servers.
:param str module_name:
Name used for the generated Python module. Defaults to the name of the
Thrift file. | [
"Loads",
"the",
"Thrift",
"file",
"at",
"the",
"specified",
"path",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/thrift/rw.py#L40-L154 | train | 36,592 |
uber/tchannel-python | tchannel/net.py | interface_ip | def interface_ip(interface):
"""Determine the IP assigned to us by the given network interface."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(
sock.fileno(), 0x8915, struct.pack('256s', interface[:15])
)[20:24]
) | python | def interface_ip(interface):
"""Determine the IP assigned to us by the given network interface."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(
sock.fileno(), 0x8915, struct.pack('256s', interface[:15])
)[20:24]
) | [
"def",
"interface_ip",
"(",
"interface",
")",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_DGRAM",
")",
"return",
"socket",
".",
"inet_ntoa",
"(",
"fcntl",
".",
"ioctl",
"(",
"sock",
".",
"fileno",
... | Determine the IP assigned to us by the given network interface. | [
"Determine",
"the",
"IP",
"assigned",
"to",
"us",
"by",
"the",
"given",
"network",
"interface",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/net.py#L31-L38 | train | 36,593 |
uber/tchannel-python | tchannel/thrift/client.py | client_for | def client_for(service, service_module, thrift_service_name=None):
"""Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def new(cls, tchannel, hostport=None, trace=False, protocol_headers=None):
"""
:param tchannel:
TChannel through which the requests will be sent.
:param hostport:
Address of the machine to which the requests will be sent, or None
if the TChannel will do peer selection on a per-request basis.
:param trace:
Whether tracing is enabled.
:param protocol_headers:
Protocol-level headers to send with the request.
"""
protocol_headers = protocol_headers or {}
protocol_headers['as'] = 'thrift'
return _ClientBase.__new__(
cls, tchannel, hostport, service, trace, protocol_headers
)
new.__name__ = '__new__'
methods = {'__new__': new}
for method_name in method_names:
methods[method_name] = generate_method(
service_module, thrift_service_name, method_name
)
return type(thrift_service_name + 'Client', (_ClientBase,), methods) | python | def client_for(service, service_module, thrift_service_name=None):
"""Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service.
"""
assert service_module, 'service_module is required'
service = service or '' # may be blank for non-hyperbahn use cases
if not thrift_service_name:
thrift_service_name = service_module.__name__.rsplit('.', 1)[-1]
method_names = get_service_methods(service_module.Iface)
def new(cls, tchannel, hostport=None, trace=False, protocol_headers=None):
"""
:param tchannel:
TChannel through which the requests will be sent.
:param hostport:
Address of the machine to which the requests will be sent, or None
if the TChannel will do peer selection on a per-request basis.
:param trace:
Whether tracing is enabled.
:param protocol_headers:
Protocol-level headers to send with the request.
"""
protocol_headers = protocol_headers or {}
protocol_headers['as'] = 'thrift'
return _ClientBase.__new__(
cls, tchannel, hostport, service, trace, protocol_headers
)
new.__name__ = '__new__'
methods = {'__new__': new}
for method_name in method_names:
methods[method_name] = generate_method(
service_module, thrift_service_name, method_name
)
return type(thrift_service_name + 'Client', (_ClientBase,), methods) | [
"def",
"client_for",
"(",
"service",
",",
"service_module",
",",
"thrift_service_name",
"=",
"None",
")",
":",
"assert",
"service_module",
",",
"'service_module is required'",
"service",
"=",
"service",
"or",
"''",
"# may be blank for non-hyperbahn use cases",
"if",
"no... | Build a client class for the given Thrift service.
The generated class accepts a TChannel and an optional hostport as
initialization arguments.
Given ``CommentService`` defined in ``comment.thrift`` and registered with
Hyperbahn under the name "comment", here's how this may be used:
.. code-block:: python
from comment import CommentService
CommentServiceClient = client_for("comment", CommentService)
@gen.coroutine
def post_comment(articleId, msg, hostport=None):
client = CommentServiceClient(tchannel, hostport)
yield client.postComment(articleId, CommentService.Comment(msg))
:param service:
Name of the Hyperbahn service being called. This is the name with
which the service registered with Hyperbahn.
:param service_module:
The Thrift-generated module for that service. This usually has the
same name as defined for the service in the IDL.
:param thrift_service_name:
If the Thrift service has a different name than its module, use this
parameter to specify it.
:returns:
An object with the same interface as the service that uses the given
TChannel to call the service. | [
"Build",
"a",
"client",
"class",
"for",
"the",
"given",
"Thrift",
"service",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/thrift/client.py#L48-L114 | train | 36,594 |
uber/tchannel-python | tchannel/thrift/client.py | generate_method | def generate_method(service_module, service_name, method_name):
"""Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called
"""
assert service_module
assert service_name
assert method_name
args_type = getattr(service_module, method_name + '_args')
result_type = getattr(service_module, method_name + '_result', None)
serializer = ThriftSerializer(result_type)
# oneway not currently supported
# TODO - write test for this
if result_type is None:
def not_supported(self, *args, **kwags):
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway procedues'
)
return not_supported
result_spec = result_type.thrift_spec
# result_spec is a tuple of tuples in the form:
#
# (fieldId, fieldType, fieldName, ...)
#
# Where "..." is other information we don't care about right now.
#
# result_spec will be empty if there is no return value or exception for
# the method.
#
# Its first element, with field ID 0, contains the spec for the return
# value. It is None if the result type is void but the method may still
# throw exceptions.
#
# Elements after the first one are specs for the exceptions.
endpoint = '%s::%s' % (service_name, method_name)
@gen.coroutine
def send(self, *args, **kwargs):
params = inspect.getcallargs(
getattr(service_module.Iface, method_name), self, *args, **kwargs
)
params.pop('self') # self is already known
# $methodName_args is the implicit struct containing the various
# method parameters.
call_args = args_type()
for name, value in params.items():
setattr(call_args, name, value)
tracer = tracing.ClientTracer(channel=self.tchannel)
span, headers = tracer.start_span(
service=service_name, endpoint=method_name, headers={}
)
body = serializer.serialize_body(call_args)
header = serializer.serialize_header(headers)
# Glue for old API.
if hasattr(self.tchannel, 'request'):
tracing.apply_trace_flag(span, self.trace, True)
with span:
response = yield self.tchannel.request(
hostport=self.hostport, service=self.service
).send(
arg1=endpoint,
arg2=header,
arg3=body, # body
headers=self.protocol_headers,
)
body = yield response.get_body()
else:
with span:
response = yield self.tchannel.call(
scheme=schemes.THRIFT,
service=self.service,
arg1=endpoint,
arg2=header,
arg3=body,
hostport=self.hostport,
trace=self.trace,
tracing_span=span
# TODO: Need to handle these!
# headers=self.protocol_headers,
)
body = response.body
call_result = serializer.deserialize_body(body)
if not result_spec:
# void return type and no exceptions allowed
raise gen.Return(None)
for exc_spec in result_spec[1:]:
# May have failed with an exception
exc = getattr(call_result, exc_spec[2])
if exc is not None:
raise exc
if result_spec[0]:
# Non-void return type. Return the result.
success = getattr(call_result, result_spec[0][2])
if success is not None:
raise gen.Return(success)
else:
# No return type specified and no exceptions raised.
raise gen.Return(None)
# Expected a result but nothing was present in the object. Something
# went wrong.
from thrift import Thrift
raise Thrift.TApplicationException(
Thrift.TApplicationException.MISSING_RESULT,
'%s failed: did not receive a result as expected' % method_name
)
# TODO: We should probably throw a custom exception instead.
send.__name__ = method_name
return send | python | def generate_method(service_module, service_name, method_name):
"""Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called
"""
assert service_module
assert service_name
assert method_name
args_type = getattr(service_module, method_name + '_args')
result_type = getattr(service_module, method_name + '_result', None)
serializer = ThriftSerializer(result_type)
# oneway not currently supported
# TODO - write test for this
if result_type is None:
def not_supported(self, *args, **kwags):
raise OneWayNotSupportedError(
'TChannel+Thrift does not currently support oneway procedues'
)
return not_supported
result_spec = result_type.thrift_spec
# result_spec is a tuple of tuples in the form:
#
# (fieldId, fieldType, fieldName, ...)
#
# Where "..." is other information we don't care about right now.
#
# result_spec will be empty if there is no return value or exception for
# the method.
#
# Its first element, with field ID 0, contains the spec for the return
# value. It is None if the result type is void but the method may still
# throw exceptions.
#
# Elements after the first one are specs for the exceptions.
endpoint = '%s::%s' % (service_name, method_name)
@gen.coroutine
def send(self, *args, **kwargs):
params = inspect.getcallargs(
getattr(service_module.Iface, method_name), self, *args, **kwargs
)
params.pop('self') # self is already known
# $methodName_args is the implicit struct containing the various
# method parameters.
call_args = args_type()
for name, value in params.items():
setattr(call_args, name, value)
tracer = tracing.ClientTracer(channel=self.tchannel)
span, headers = tracer.start_span(
service=service_name, endpoint=method_name, headers={}
)
body = serializer.serialize_body(call_args)
header = serializer.serialize_header(headers)
# Glue for old API.
if hasattr(self.tchannel, 'request'):
tracing.apply_trace_flag(span, self.trace, True)
with span:
response = yield self.tchannel.request(
hostport=self.hostport, service=self.service
).send(
arg1=endpoint,
arg2=header,
arg3=body, # body
headers=self.protocol_headers,
)
body = yield response.get_body()
else:
with span:
response = yield self.tchannel.call(
scheme=schemes.THRIFT,
service=self.service,
arg1=endpoint,
arg2=header,
arg3=body,
hostport=self.hostport,
trace=self.trace,
tracing_span=span
# TODO: Need to handle these!
# headers=self.protocol_headers,
)
body = response.body
call_result = serializer.deserialize_body(body)
if not result_spec:
# void return type and no exceptions allowed
raise gen.Return(None)
for exc_spec in result_spec[1:]:
# May have failed with an exception
exc = getattr(call_result, exc_spec[2])
if exc is not None:
raise exc
if result_spec[0]:
# Non-void return type. Return the result.
success = getattr(call_result, result_spec[0][2])
if success is not None:
raise gen.Return(success)
else:
# No return type specified and no exceptions raised.
raise gen.Return(None)
# Expected a result but nothing was present in the object. Something
# went wrong.
from thrift import Thrift
raise Thrift.TApplicationException(
Thrift.TApplicationException.MISSING_RESULT,
'%s failed: did not receive a result as expected' % method_name
)
# TODO: We should probably throw a custom exception instead.
send.__name__ = method_name
return send | [
"def",
"generate_method",
"(",
"service_module",
",",
"service_name",
",",
"method_name",
")",
":",
"assert",
"service_module",
"assert",
"service_name",
"assert",
"method_name",
"args_type",
"=",
"getattr",
"(",
"service_module",
",",
"method_name",
"+",
"'_args'",
... | Generate a method for the given Thrift service.
:param service_module:
Thrift-generated service module
:param service_name:
Name of the Thrift service
:param method_name:
Method being called | [
"Generate",
"a",
"method",
"for",
"the",
"given",
"Thrift",
"service",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/thrift/client.py#L117-L242 | train | 36,595 |
uber/tchannel-python | tchannel/tornado/peer.py | Peer.connect | def connect(self):
"""Get a connection to this peer.
If an connection to the peer already exists (either incoming or
outgoing), that's returned. Otherwise, a new outgoing connection to
this peer is created.
:return:
A future containing a connection to this host.
"""
# Prefer incoming connections over outgoing connections.
if self.connections:
# First value is an incoming connection
future = gen.Future()
future.set_result(self.connections[0])
return future
if self._connecting:
# If we're in the process of connecting to the peer, just wait
# and re-use that connection.
return self._connecting
conn_future = self._connecting = self.connection_class.outgoing(
hostport=self.hostport,
process_name=self.tchannel.process_name,
serve_hostport=self.tchannel.hostport,
handler=self.tchannel.receive_call,
tchannel=self.tchannel,
)
def on_connect(_):
if not conn_future.exception():
# We don't actually need to handle the exception. That's on
# the caller.
connection = conn_future.result()
self.register_outgoing_conn(connection)
self._connecting = None
conn_future.add_done_callback(on_connect)
return conn_future | python | def connect(self):
"""Get a connection to this peer.
If an connection to the peer already exists (either incoming or
outgoing), that's returned. Otherwise, a new outgoing connection to
this peer is created.
:return:
A future containing a connection to this host.
"""
# Prefer incoming connections over outgoing connections.
if self.connections:
# First value is an incoming connection
future = gen.Future()
future.set_result(self.connections[0])
return future
if self._connecting:
# If we're in the process of connecting to the peer, just wait
# and re-use that connection.
return self._connecting
conn_future = self._connecting = self.connection_class.outgoing(
hostport=self.hostport,
process_name=self.tchannel.process_name,
serve_hostport=self.tchannel.hostport,
handler=self.tchannel.receive_call,
tchannel=self.tchannel,
)
def on_connect(_):
if not conn_future.exception():
# We don't actually need to handle the exception. That's on
# the caller.
connection = conn_future.result()
self.register_outgoing_conn(connection)
self._connecting = None
conn_future.add_done_callback(on_connect)
return conn_future | [
"def",
"connect",
"(",
"self",
")",
":",
"# Prefer incoming connections over outgoing connections.",
"if",
"self",
".",
"connections",
":",
"# First value is an incoming connection",
"future",
"=",
"gen",
".",
"Future",
"(",
")",
"future",
".",
"set_result",
"(",
"sel... | Get a connection to this peer.
If an connection to the peer already exists (either incoming or
outgoing), that's returned. Otherwise, a new outgoing connection to
this peer is created.
:return:
A future containing a connection to this host. | [
"Get",
"a",
"connection",
"to",
"this",
"peer",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/peer.py#L135-L174 | train | 36,596 |
uber/tchannel-python | tchannel/tornado/peer.py | Peer.register_outgoing_conn | def register_outgoing_conn(self, conn):
"""Add outgoing connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.append(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | python | def register_outgoing_conn(self, conn):
"""Add outgoing connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.append(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | [
"def",
"register_outgoing_conn",
"(",
"self",
",",
"conn",
")",
":",
"assert",
"conn",
",",
"\"conn is required\"",
"conn",
".",
"set_outbound_pending_change_callback",
"(",
"self",
".",
"_on_conn_change",
")",
"self",
".",
"connections",
".",
"append",
"(",
"conn... | Add outgoing connection into the heap. | [
"Add",
"outgoing",
"connection",
"into",
"the",
"heap",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/peer.py#L188-L194 | train | 36,597 |
uber/tchannel-python | tchannel/tornado/peer.py | Peer.register_incoming_conn | def register_incoming_conn(self, conn):
"""Add incoming connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | python | def register_incoming_conn(self, conn):
"""Add incoming connection into the heap."""
assert conn, "conn is required"
conn.set_outbound_pending_change_callback(self._on_conn_change)
self.connections.appendleft(conn)
self._set_on_close_cb(conn)
self._on_conn_change() | [
"def",
"register_incoming_conn",
"(",
"self",
",",
"conn",
")",
":",
"assert",
"conn",
",",
"\"conn is required\"",
"conn",
".",
"set_outbound_pending_change_callback",
"(",
"self",
".",
"_on_conn_change",
")",
"self",
".",
"connections",
".",
"appendleft",
"(",
"... | Add incoming connection into the heap. | [
"Add",
"incoming",
"connection",
"into",
"the",
"heap",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/peer.py#L196-L202 | train | 36,598 |
uber/tchannel-python | tchannel/tornado/peer.py | Peer.outgoing_connections | def outgoing_connections(self):
"""Returns a list of all outgoing connections for this peer."""
# Outgoing connections are on the right
return list(
dropwhile(lambda c: c.direction != OUTGOING, self.connections)
) | python | def outgoing_connections(self):
"""Returns a list of all outgoing connections for this peer."""
# Outgoing connections are on the right
return list(
dropwhile(lambda c: c.direction != OUTGOING, self.connections)
) | [
"def",
"outgoing_connections",
"(",
"self",
")",
":",
"# Outgoing connections are on the right",
"return",
"list",
"(",
"dropwhile",
"(",
"lambda",
"c",
":",
"c",
".",
"direction",
"!=",
"OUTGOING",
",",
"self",
".",
"connections",
")",
")"
] | Returns a list of all outgoing connections for this peer. | [
"Returns",
"a",
"list",
"of",
"all",
"outgoing",
"connections",
"for",
"this",
"peer",
"."
] | ee08cce6234f24fd2373774988186dd374306c43 | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/peer.py#L215-L221 | train | 36,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.