code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def _dequeue_return_value(self, tensors):
if self._names:
return {n: tensors[i] for i, n in enumerate(self._names)}
elif len(tensors) == 1:
return tensors[0]
else:
return tensors | Return the value to return from a dequeue op.
If the queue has names, return a dictionary with the
names as keys. Otherwise return either a single tensor
or a list of tensors depending on the length of `tensors`.
Args:
tensors: List of tensors from the dequeue op.
Returns:
A single tensor, a list of tensors, or a d... | github-repos |
def CheckDependencies(self, verbose_output=True):
print('Checking availability and versions of dependencies.')
check_result = True
for module_name, dependency in sorted(self.dependencies.items()):
if module_name == 'sqlite3':
result, status_message = self._CheckSQLite3()
else:
... | Checks the availability of the dependencies.
Args:
verbose_output (Optional[bool]): True if output should be verbose.
Returns:
bool: True if the dependencies are available, False otherwise. | juraj-google-style |
def __init__(self, name_context, spec, counter_factory, state_sampler):
assert isinstance(name_context, common.NameContext)
self.name_context = name_context
self.spec = spec
self.counter_factory = counter_factory
self.execution_context = None
self.consumers = collections.defaultdict(list)
se... | Initializes a worker operation instance.
Args:
name_context: A NameContext instance, with the name information for this
operation.
spec: A operation_specs.Worker* instance.
counter_factory: The CounterFactory to use for our counters.
state_sampler: The StateSampler for the current operation. | github-repos |
def __init__(self, residual_restriction=None, process_continuation=None, future_output_watermark=None):
self.residual_restriction = residual_restriction
self.process_continuation = process_continuation
self.future_output_watermark = future_output_watermark | Returned as a result of a `invoke_process_element()` invocation.
Args:
residual_restriction: a restriction for the unprocessed part of the
element.
process_continuation: a `ProcessContinuation` if one was returned as the
last element of the SDF `process()` invocation.
future_output_watermark: output watermark of the r... | github-repos |
def _dict_to_tensor(self, x, k):
return array_ops_stack.stack([x[i] for i in range(k)]) | Convert a dictionary to a tensor.
Args:
x: A dictionary of length k.
k: Dimension of x.
Returns:
A tensor with the same dimension. | github-repos |
def log_(
message: str,
logger: logging.Logger,
level: int = logging.INFO,
extra: Optional[Dict] = None,
trim: bool = False,
) -> None:
if extra is None:
extra = {}
if message:
message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{")
if trim:
... | Log a request or response
Args:
message: JSON-RPC request or response string.
logger:
level: Log level.
extra: More details to include in the log entry.
trim: Abbreviate log messages. | juraj-google-style |
def untar_to_directory(tarfile: str, directory: str, verbose: bool=False, gzipped: bool=False, skip_if_dir_exists: bool=True, run_func: Callable[([List[str]], Any)]=None, chdir_via_python: bool=True) -> None:
if (skip_if_dir_exists and os.path.isdir(directory)):
log.info('Skipping extraction of {} as direct... | Unpacks a TAR file into a specified directory.
Args:
tarfile: filename of the ``.tar`` file
directory: destination directory
verbose: be verbose?
gzipped: is the ``.tar`` also gzipped, e.g. a ``.tar.gz`` file?
skip_if_dir_exists: don't do anything if the destrination directory
exists?
run_func: function to use to call... | codesearchnet |
def _get_new_node_defs(self):
node_def_bytes = self.node_file.read()
node_defs = []
cur_pos = 0
while cur_pos < len(node_def_bytes):
size_bytes = node_def_bytes[cur_pos:cur_pos + 8]
size, = struct.unpack('<Q', size_bytes)
cur_pos += 8
node_def = node_def_pb2.NodeDef()
... | Gets new NodeDefs written by the NodeFileWriter.
Returns:
A list of new NodeDefs in the file written by NodeDefWriter since the last
time this method was called. | github-repos |
def __init__(self, s3_conn, es_client):
self.s3_conn = s3_conn
self.es_client = es_client | Base class for Elasticsearch indexers
Subclasses implement the index setting definition and transformation of data,
The base class handles index management and bulk indexing with ES
Args:
s3_conn - a boto s3 connection
es_client - an Elasticsearch indices client | juraj-google-style |
def _wait_for_any_job(provider, job_ids, poll_interval):
if (not job_ids):
return
while True:
tasks = provider.lookup_job_tasks({'*'}, job_ids=job_ids)
running_jobs = set()
failed_jobs = set()
for t in tasks:
status = t.get_field('task-status')
job... | Waits until any of the listed jobs is not running.
In particular, if any of the jobs sees one of its tasks fail,
we count the whole job as failing (but do not terminate the remaining
tasks ourselves).
Args:
provider: job service provider
job_ids: a list of job IDs (string) to wait for
poll_interval: integer seconds t... | codesearchnet |
def poll_stack(self):
logging.info('polling stack status, POLL_INTERVAL={}'.format(POLL_INTERVAL))
time.sleep(POLL_INTERVAL)
completed_states = ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'DELETE_COMPLETE']
stack_name = self._config.get('environment', {}).get('stack_name', None)
while True:
try:
... | Spin in a loop while the Cloud Formation process either fails or succeeds
Args:
None
Returns:
Good or bad; True or False | codesearchnet |
def ops_used_by_graph_def(graph_def):
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
used_ops = set()
functions_to_process = []
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_t... | Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph. | github-repos |
def Lookup(self, name):
if not self._name2item:
self._InitCache()
return self._name2item[name] | Convenience function: Look up a given name in the global namespace.
Tries to find a constant, function or class by this name.
Args:
name: Name to look up.
Returns:
A Constant, Function or Class.
Raises:
KeyError: if this identifier doesn't exist. | github-repos |
def get_field_to_observations_map(generator, query_for_tag=''):
def increment(stat, event, tag=''):
assert (stat in TRACKED_FIELDS)
field_to_obs[stat].append(Observation(step=event.step, wall_time=event.wall_time, tag=tag)._asdict())
field_to_obs = dict([(t, []) for t in TRACKED_FIELDS])
fo... | Return a field to `Observations` dict for the event generator.
Args:
generator: A generator over event protos.
query_for_tag: A string that if specified, only create observations for
events with this tag name.
Returns:
A dict mapping keys in `TRACKED_FIELDS` to an `Observation` list. | codesearchnet |
def _count_objs(self, obj, path=None, **kwargs):
sub_val = None
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, (list, dict)):
kwargs = self._count_objs(value,
se... | cycles through the object and adds in count values
Args:
-----
obj: the object to parse
path: the current path
kwargs:
-------
current: a dictionary of counts for current call
sub_val: the value to use for subtotal aggregation | juraj-google-style |
def _evolve(self, state, qargs=None):
if qargs is not None:
return SuperOp(self)._evolve(state, qargs)
state = self._format_state(state)
if state.shape[0] != self._input_dim:
raise QiskitError(
"QuantumChannel input dimension is... | Evolve a quantum state by the QuantumChannel.
Args:
state (QuantumState): The input statevector or density matrix.
qargs (list): a list of QuantumState subsystem positions to apply
the operator on.
Returns:
QuantumState: the output quantum state.
Raises:
QiskitError: if the operator dimension does not match the
spec... | juraj-google-style |
def _handle_azure_exception():
try:
(yield)
except _AzureHttpError as exception:
if (exception.status_code in _ERROR_CODES):
raise _ERROR_CODES[exception.status_code](str(exception))
raise | Handles Azure exception and convert to class IO exceptions
Raises:
OSError subclasses: IO error. | codesearchnet |
def _run_using_default_session(operation, feed_dict, graph, session=None) -> None:
if session is None:
session = stack.get_default_session()
if session is None:
raise ValueError('Cannot execute operation using `run()`: No default session is registered. Use `with sess.as_default():` or pa... | Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operatio... | github-repos |
def FilterItems(self, filterFn, key=None):
with self._mutex:
if key:
if key in self._buckets:
return self._buckets[key].FilterItems(filterFn)
else:
return 0
else:
return sum(bucket.FilterItems(filterFn)
for bucket in self._buckets.value... | Filter items within a Reservoir, using a filtering function.
Args:
filterFn: A function that returns True for the items to be kept.
key: An optional bucket key to filter. If not specified, will filter all
all buckets.
Returns:
The number of items removed. | juraj-google-style |
def Sample(self, tasks_status):
sample_time = time.time()
sample = '{0:f}\t{1:d}\t{2:d}\t{3:d}\t{4:d}\t{5:d}\n'.format(
sample_time, tasks_status.number_of_queued_tasks,
tasks_status.number_of_tasks_processing,
tasks_status.number_of_tasks_pending_merge,
tasks_status.number_... | Takes a sample of the status of queued tasks for profiling.
Args:
tasks_status (TasksStatus): status information about tasks. | juraj-google-style |
def make_qq_plot(kev, obs, mdl, unit, key_text):
import omega as om
kev = np.asarray(kev)
obs = np.asarray(obs)
mdl = np.asarray(mdl)
c_obs = np.cumsum(obs)
c_mdl = np.cumsum(mdl)
mx = max(c_obs[(- 1)], c_mdl[(- 1)])
p = om.RectPlot()
p.addXY([0, mx], [0, mx], '1:1')
p.addXY(c_md... | Make a quantile-quantile plot comparing events and a model.
*kev*
A 1D, sorted array of event energy bins measured in keV.
*obs*
A 1D array giving the number or rate of events in each bin.
*mdl*
A 1D array giving the modeled number or rate of events in each bin.
*unit*
Text describing the unit in which *obs* and *mdl*... | codesearchnet |
def parse_unique_urlencoded(content):
urlencoded_params = urllib.parse.parse_qs(content)
params = {}
for (key, value) in six.iteritems(urlencoded_params):
if (len(value) != 1):
msg = ('URL-encoded content contains a repeated value:%s -> %s' % (key, ', '.join(value)))
raise Va... | Parses unique key-value parameters from urlencoded content.
Args:
content: string, URL-encoded key-value pairs.
Returns:
dict, The key-value pairs from ``content``.
Raises:
ValueError: if one of the keys is repeated. | codesearchnet |
def batch_workflow_status(self, batch_workflow_id):
self.logger.debug('Get status of batch workflow: ' + batch_workflow_id)
url = '%(base_url)s/batch_workflows/%(batch_id)s' % {
'base_url': self.base_url, 'batch_id': batch_workflow_id
}
r = self.gbdx_connection.get(u... | Checks GBDX batch workflow status.
Args:
batch workflow_id (str): Batch workflow id.
Returns:
Batch Workflow status (str). | juraj-google-style |
def get_user_groups(self, user):
self.project_service.set_auth(self._token_project)
return self.project_service.get_user_groups(user) | Get user's group memberships.
Args:
user (string): User name.
Returns:
(list): User's groups.
Raises:
requests.HTTPError on failure. | juraj-google-style |
def __getattr__(self, name):
self._conn.send((self._ACCESS, name))
return self._receive() | Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute. | juraj-google-style |
def get_mnemonics(self, mnemonics, uwis=None, alias=None):
uwis = (uwis or self.uwis)
wells = [w for w in self.__list if (w.uwi in uwis)]
all_wells = []
for w in wells:
this_well = [w.get_mnemonic(m, alias=alias) for m in mnemonics]
all_wells.append(this_well)
return all_wells | Looks at all the wells in turn and returns the highest thing
in the alias table.
Args:
mnemonics (list)
alias (dict)
Returns:
list. A list of lists. | codesearchnet |
def checkDeterminism(self, dataset_fn, expect_determinism, expected_elements):
if expect_determinism:
dataset = dataset_fn(100)
actual = self.getDatasetOutput(dataset)
self.assertAllEqual(expected_elements, actual)
return
for delay_ms in [10, 100, 1000, 20000, 100000]:
da... | Tests whether a dataset produces its elements deterministically.
`dataset_fn` takes a delay_ms argument, which tells it how long to delay
production of the first dataset element. This gives us a way to trigger
out-of-order production of dataset elements.
Args:
dataset_fn: A function taking a delay_ms argument.
expect... | github-repos |
def as_object(obj):
LOGGER.debug('as_object(%s)', obj)
if isinstance(obj, datetime.date):
return as_date(obj)
elif hasattr(obj, '__dict__'):
out = {k: obj.__dict__[k] for k in obj.__dict__ if (not k.startswith('_'))}
for (k, v) in ((p, getattr(obj, p)) for (p, _) in inspect.getmember... | Return a JSON serializable type for ``o``.
Args:
obj (:py:class:`object`): the object to be serialized.
Raises:
:py:class:`AttributeError`:
when ``o`` is not a Python object.
Returns:
(dict): JSON serializable type for the given object. | codesearchnet |
def compareBulk(self, retina_name, body):
resourcePath = '/compare/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
postDat... | Bulk compare
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: Bulk comparison of elements 2 by 2 (required)
Returns: Array[Metric] | juraj-google-style |
def add_report(self, specification_name, report):
self._reports[specification_name] = report
self._total = self._total + report.testsRun
self._failures = self._failures + len(report.failures)
self._errors = self._errors + len(report.errors)
self._success = self._total -... | Adds a given report with the given specification_name as key
to the reports list and computes the number of success, failures
and errors
Args:
specification_name: string representing the specification (with ".spec")
report: The | juraj-google-style |
def _extend_before(self, other):
other_num_lines = other.num_lines()
self._lines = other.lines + self._lines
new_font_attr_segs = {}
for line_index in self.font_attr_segs:
new_font_attr_segs[other_num_lines + line_index] = self.font_attr_segs[line_index]
new_font_attr_segs.update(other.font_... | Add another RichTextLines object to the front.
Args:
other: (RichTextLines) The other object to add to the front to this
object. | github-repos |
def join(self, timeout_s=None):
if (not self.thread):
return False
self.thread.join(timeout_s)
return self.running | Joins blocking until the interval ends or until timeout is reached.
Args:
timeout_s: The time in seconds to wait, defaults to forever.
Returns:
True if the interval is still running and we reached the timeout. | codesearchnet |
def add_to_dumper(dumper: Type, classes: List[Type]) -> None:
if not isinstance(classes, list):
classes = [classes]
for class_ in classes:
if issubclass(class_, enum.Enum):
dumper.add_representer(class_, EnumRepresenter(class_))
elif issubclass(class_, str) or issubcla... | Register user-defined classes with the Dumper.
This enables the Dumper to write objects of your classes to a \
YAML file. Note that all the arguments are types, not instances!
Args:
dumper: Your dumper class(!), derived from yatiml.Dumper
classes: One or more classes to add. | juraj-google-style |
def _forward_backward_log(state_trans_log_probs, initial_state_log_probs, final_state_log_probs, observed_log_probs, sequence_length):
if state_trans_log_probs.shape.ndims == 2:
perm = [1, 0]
elif state_trans_log_probs.shape.ndims == 3:
perm = [0, 2, 1]
else:
raise ValueError(f'Rank ... | Forward-backward algorithm computed in log domain.
Args:
state_trans_log_probs: tensor of shape [states, states] or if different
transition matrix per batch [batch_size, states, states]
initial_state_log_probs: tensor of shape [batch_size, states]
final_state_log_probs: tensor of shape [batch_size, states]
observed_lo... | github-repos |
def get_blocks(self, block_structure=None):
if (block_structure is None):
block_structure = self.block_structure
try:
return self._get_blocks(block_structure)
except IncompatibleBlockStructures as e:
raise e | For a reducible circuit, get a sequence of subblocks that when
concatenated again yield the original circuit. The block structure
given has to be compatible with the circuits actual block structure,
i.e. it can only be more coarse-grained.
Args:
block_structure (tuple): The block structure according to which the
subb... | codesearchnet |
def quantile_for_list_of_values(self, **kwargs):
if self._is_transposed:
kwargs['axis'] = (kwargs.get('axis', 0) ^ 1)
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get('axis', 0)
q = kwargs.get('q')
numeric_only = kwargs.get('numeric_only', True)
assert ... | Returns Manager containing quantiles along an axis for numeric columns.
Returns:
DataManager containing quantiles of original DataManager along an axis. | codesearchnet |
def find_dependencies(self, dataset_keys, **dfilter):
unknown_datasets = set()
for key in dataset_keys.copy():
(n, unknowns) = self._find_dependencies(key, **dfilter)
dataset_keys.discard(key)
if (n is not None):
dataset_keys.add(n.name)
if unknowns:
unkno... | Create the dependency tree.
Args:
dataset_keys (iterable): Strings or DatasetIDs to find dependencies for
**dfilter (dict): Additional filter parameters. See
`satpy.readers.get_key` for more details.
Returns:
(Node, set): Root node of the dependency tree and a set of unknown datasets | codesearchnet |
def get(self) -> Union[(Event, None)]:
message = self._queue.get_message()
if (message and (message['type'] == 'message')):
event_id = DB.get_event(self._pub_key, self._processed_key)
event_data_str = DB.get_hash_value(self._data_key, event_id)
event_dict = ast.literal_eval(event_data_st... | Get the latest event from the queue.
Call this method to query the queue for the latest event.
If no event has been published None is returned.
Returns:
Event or None | codesearchnet |
def GetTSKFileByPathSpec(self, path_spec):
inode = getattr(path_spec, 'inode', None)
location = getattr(path_spec, 'location', None)
if inode is not None:
tsk_file = self._tsk_file_system.open_meta(inode=inode)
elif location is not None:
tsk_file = self._tsk_file_system.open(... | Retrieves the SleuthKit file object for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pytsk3.File: TSK file.
Raises:
PathSpecError: if the path specification is missing inode and location. | juraj-google-style |
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
if (not outdir):
outdir = self.structure_dir
if (not outdir):
raise ValueError('Output directory must be specified')
if (not pdb_file_type):
pdb_file_type = self.pdb_file_type
if (s... | Download ALL mapped experimental structures to the protein structures directory.
Args:
outdir (str): Path to output directory, if protein structures directory not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool)... | codesearchnet |
def confirmdir(self, target_directory):
try:
directory = self.resolve(target_directory)
except IOError as exc:
self.raise_os_error(exc.errno, target_directory)
if not directory.st_mode & S_IFDIR:
if self.is_windows_fs and IS_PY2:
error... | Test that the target is actually a directory, raising OSError
if not.
Args:
target_directory: Path to the target directory within the fake
filesystem.
Returns:
The FakeDirectory object corresponding to target_directory.
Raises:
OSError: if the target is not a directory. | juraj-google-style |
def args_to_dict(args):
arguments = dict()
for arg in args.split(','):
(key, value) = arg.split('=')
arguments[key] = value
return arguments | Convert command line arguments in a comma separated string to a dictionary
Args:
args (str): Command line arguments
Returns:
DictUpperBound[str,str]: Dictionary of arguments | codesearchnet |
def _compile_expression(self,
expr: Expression,
scope: Dict[str, TensorFluent],
batch_size: Optional[int] = None,
noise: Optional[List[tf.Tensor]] = None) -> TensorFluent:
etype2compiler = {
... | Compile the expression `expr` into a TensorFluent
in the given `scope` with optional batch size.
Args:
expr (:obj:`rddl2tf.expr.Expression`): A RDDL expression.
scope (Dict[str, :obj:`rddl2tf.fluent.TensorFluent`]): A fluent scope.
batch_size (Optional[size]): The batch size.
Returns:
:obj:`rddl2tf.fluent.TensorFluen... | juraj-google-style |
def returns_collection(self) -> bool:
return self.cardinality == Cardinality.COLLECTION or self.cardinality == Cardinality.CHILD_OF_COLLECTION | Indicates if the data type will evaluate to a collection.
Returns:
True in the following circumstances
- The data type represents an element with cardinality greater than one.
- The data type represents an element with a cardinality less than or
equal to one, but that element is a child of a collection and will
evalua... | github-repos |
def generate_entry_label(entry):
if isinstance(entry, MultiEntry):
return ' + '.join([latexify_ion(e.name) for e in entry.entry_list])
else:
return latexify_ion(latexify(entry.name)) | Generates a label for the pourbaix plotter
Args:
entry (PourbaixEntry or MultiEntry): entry to get a label for | codesearchnet |
def level_cond_prior(prior_dist, z, latent, hparams, state):
latent_dist_encoder = hparams.get('latent_dist_encoder', None)
latent_skip = hparams.get('latent_skip', False)
if (latent_dist_encoder == 'pointwise'):
last_latent = latent
merge_std = hparams.level_scale
latent_shape = com... | Returns a conditional prior for each level.
Args:
prior_dist: Distribution conditioned on the previous levels.
z: Tensor, output of the previous levels.
latent: Tensor or a list of tensors to condition the latent_distribution.
hparams: next_frame_glow hparams.
state: Current LSTM state. Used only if hparams.latent_dis... | codesearchnet |
def credits(self, **kwargs):
path = self._get_series_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API. | codesearchnet |
def save_summaries(frames, keys, selected_summaries, batch_dir, batch_name):
if (not frames):
logger.info('Could save summaries - no summaries to save!')
logger.info('You have no frames - aborting')
return None
if (not keys):
logger.info('Could save summaries - no summaries to sa... | Writes the summaries to csv-files
Args:
frames: list of ``cellpy`` summary DataFrames
keys: list of indexes (typically run-names) for the different runs
selected_summaries: list defining which summary data to save
batch_dir: directory to save to
batch_name: the batch name (will be used for making the file-name(s))
Re... | codesearchnet |
def DeleteRecords(cls, ids, token):
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.QueueDeleteRecords(ids) | Delete records identified by ids.
Args:
ids: A list of ids provided by ClaimRecords.
token: The database access token to delete with.
Raises:
LockError: If the queue is not locked. | codesearchnet |
def __str__(self):
text = super(Baken, self).__format__('dms')
if self._locator:
text = '%s (%s)' % (self._locator, text)
return text | Pretty printed location string.
Args:
mode (str): Coordinate formatting system to use
Returns:
str: Human readable string representation of ``Baken`` object | juraj-google-style |
def __init__(self, user_pipeline: beam.Pipeline, pcolls: Optional[Set[beam.pvalue.PCollection]]=None):
assert not pcolls or all((pcoll.pipeline is user_pipeline for pcoll in pcolls)), 'All %s need to belong to %s' % (pcolls, user_pipeline)
self._user_pipeline = user_pipeline
self._pcolls = pcolls
self._... | Initializes a pipelilne for augmenting interactive flavor.
Args:
user_pipeline: a beam.Pipeline instance defined by the user.
pcolls: cacheable pcolls to be computed/retrieved. If the set is
empty, all intermediate pcolls assigned to variables are applicable. | github-repos |
def start(self, extra_args='', tag=''):
if self.started:
return
utils.create_dir(self.log_path)
if tag:
tag = tag + ','
out_file_name = 'IPerfServer,{},{}{}.log'.format(self.port, tag, len(self.log_files))
full_out_path = os.path.join(self.log_path, out_file_name)
cmd = '%s %s > ... | Starts iperf server on specified port.
Args:
extra_args: A string representing extra arguments to start iperf
server with.
tag: Appended to log file name to identify logs from different
iperf runs. | github-repos |
def _bits_in_condition(self, cond):
all_bits = []
if (cond is not None):
all_bits.extend([(cond[0], j) for j in range(self.cregs[cond[0].name].size)])
return all_bits | Return a list of bits in the given condition.
Args:
cond (tuple or None): optional condition (ClassicalRegister, int)
Returns:
list[(ClassicalRegister, idx)]: list of bits | codesearchnet |
def heightmap_count_cells(hm: np.ndarray, mi: float, ma: float) -> int:
return int(lib.TCOD_heightmap_count_cells(_heightmap_cdata(hm), mi, ma)) | Return the number of map cells which value is between ``mi`` and ``ma``.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
mi (float): The lower bound.
ma (float): The upper bound.
Returns:
int: The count of values which fall between ``mi`` and ``ma``.
.. deprecated:: 8.1
Can be replaced b... | juraj-google-style |
def value_loss_given_predictions(value_prediction, rewards, reward_mask, gamma=0.99):
(B, T) = rewards.shape
assert ((B, T) == reward_mask.shape)
assert ((B, (T + 1), 1) == value_prediction.shape)
value_prediction = np.squeeze(value_prediction, axis=2)
value_prediction = (value_prediction[(:, :(- 1)... | Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
Returns:
The average L2 value loss, averaged over i... | codesearchnet |
def _checkResponseWriteData(payload, writedata):
_checkString(payload, minlength=4, description='payload')
_checkString(writedata, minlength=2, maxlength=2, description='writedata')
BYTERANGE_FOR_WRITEDATA = slice(2, 4)
receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]
if receivedWritedat... | Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError | juraj-google-style |
def __gt__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other | Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class. | juraj-google-style |
def menu(title, options, cancel_label="Cancel", flag_allow_empty=False, flag_cancel=True, ch='.'):
num_options, flag_ok = len(options), 0
option = None
min_allowed = 0 if flag_cancel else 1
while True:
print("")
for line in format_box(title, ch):
print(" "+line)
for i, s in enumera... | Text menu.
Arguments:
title -- menu title, to appear at the top
options -- sequence of strings
cancel_label='Cancel' -- label to show at last "zero" option
flag_allow_empty=0 -- Whether to allow empty option
flag_cancel=True -- whether there is a "0 - Cancel" option
ch="." -- character to use to draw frame around titl... | juraj-google-style |
def category(msg):
if ((common.typecode(msg) < 1) or (common.typecode(msg) > 4)):
raise RuntimeError(('%s: Not a identification message' % msg))
msgbin = common.hex2bin(msg)
return common.bin2int(msgbin[5:8]) | Aircraft category number
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
int: category number | codesearchnet |
def _iterdump(self, file_name, headers=None):
if headers is None:
headers = ["Discharge_Capacity", "Charge_Capacity"]
step_txt = self.headers_normal['step_index_txt']
point_txt = self.headers_normal['data_point_txt']
cycle_txt = self.headers_normal['cycle_index_txt'... | Function for dumping values from a file.
Should only be used by developers.
Args:
file_name: name of the file
headers: list of headers to pick
default:
["Discharge_Capacity", "Charge_Capacity"]
Returns: pandas.DataFrame | juraj-google-style |
def installed_capabilities(image=None):
if (salt.utils.versions.version_cmp(__grains__['osversion'], '10') == (- 1)):
raise NotImplementedError('`installed_capabilities` is not available on this version of Windows: {0}'.format(__grains__['osversion']))
return _get_components('Capability Identity', 'Capa... | List the capabilities installed on the system
Args:
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
Raises:
NotImplementedError: For all versions of Windows that are not Windows 10
and later. Server edit... | codesearchnet |
def get_existing_pipelines(self):
url = '{0}/applications/{1}/pipelineConfigs'.format(API_URL, self.app_name)
resp = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert resp.ok, 'Failed to lookup pipelines for {0}: {1}'.format(self.app_name, resp.text)
return resp.json() | Get existing pipeline configs for specific application.
Returns:
str: Pipeline config json | codesearchnet |
def get_lowest_values(self, count):
count = int(count)
assert (count <= len(self._values)), 'count must be <= to Data Collection len. {} > {}.'.format(count, len(self._values))
assert (count > 0), 'count must be greater than 0. Got {}.'.format(count)
lowest_values = sorted(self._values)[0:count]
low... | Get a list of the the x lowest values of the Data Collection and their indices.
This is useful for situations where one needs to know the times of
the year when the smallest values of a data collection occur.
Args:
count: Integer representing the number of lowest values to account for.
Returns:
highest_values: The n... | codesearchnet |
def GetRawDevice(path):
path = CanonicalPathToLocalPath(path)
try:
path = win32file.GetLongPathName(path)
except pywintypes.error:
pass
try:
mount_point = win32file.GetVolumePathName(path)
except pywintypes.error as details:
logging.info("path not found. %s", details)
raise IOError(... | Resolves the raw device that contains the path.
Args:
path: A path to examine.
Returns:
A pathspec to read the raw device as well as the modified path to read
within the raw device. This is usually the path without the mount point.
Raises:
IOError: if the path does not exist or some unexpected behaviour occurs. | juraj-google-style |
def convert_frame_change(self, shift, instruction):
command_dict = {
'name': 'fc',
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name,
'phase': instruction.command.phase
}
return self._qobj_model(**command_dict) | Return converted `FrameChangeInstruction`.
Args:
shift(int): Offset time.
instruction (FrameChangeInstruction): frame change instruction.
Returns:
dict: Dictionary of required parameters. | juraj-google-style |
def potential_purviews(self, direction, mechanism, purviews=False):
if purviews is False:
purviews = self.network.potential_purviews(direction, mechanism)
purviews = [purview for purview in purviews
if set(purview).issubset(self.node_indices)... | Return all purviews that could belong to the |MIC|/|MIE|.
Filters out trivially-reducible purviews.
Args:
direction (Direction): |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The mechanism of interest.
Keyword Args:
purviews (tuple[int]): Optional subset of purviews of interest. | juraj-google-style |
def _make_pr_entry(self, step, wall_time, data_array, thresholds):
true_positives = [int(v) for v in data_array[metadata.TRUE_POSITIVES_INDEX]]
false_positives = [int(v) for v in data_array[metadata.FALSE_POSITIVES_INDEX]]
tp_index = metadata.TRUE_POSITIVES_INDEX
fp_index = metadata.FALSE_POSITIVES_INDE... | Creates an entry for PR curve data. Each entry corresponds to 1 step.
Args:
step: The step.
wall_time: The wall time.
data_array: A numpy array of PR curve data stored in the summary format.
thresholds: An array of floating point thresholds.
Returns:
A PR curve entry. | codesearchnet |
def get_metadata(self):
if (self._metadata is None):
self._metadata = self._source.get_metadata(self._handle)
return self._metadata | Returns the associated metadata info for this template version
Returns:
dict: Metadata for this version | codesearchnet |
def load_values(self, dictionary, as_defaults=False, flat=False):
if flat:
separator = self.settings.str_path_separator
flat_dictionary = dictionary
dictionary = collections.OrderedDict()
for k, v in flat_dictionary.items():
k... | Import config values from a dictionary.
When ``as_defaults`` is set to ``True``, the values
imported will be set as defaults. This can be used to
declare the sections and items of configuration.
Values of sections and items in ``dictionary`` can be
dictionaries as well as instances of :class:`.Item` and
:class:`.Confi... | juraj-google-style |
def filter_list(lst, takeout, case_sensitive=True):
takeout = force_list(takeout)
if (not case_sensitive):
lst = [x.lower() for x in lst]
takeout = [y.lower() for y in takeout]
return [x for x in lst if (x not in takeout)] | Return a modified list removing items specified.
Args:
lst: Original list of values
takeout: Object or objects to remove from lst
case_sensitive: if the search should be case sensitive
Returns:
list: Filtered list of values | codesearchnet |
def add(self, key, value, expire=0, noreply=None):
if (noreply is None):
noreply = self.default_noreply
return self._store_cmd(b'add', {key: value}, expire, noreply)[key] | The memcached "add" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_norepl... | codesearchnet |
def max_pool(x, pool_size, strides, padding):
x = tf_np.asarray(x)
return tf_np.asarray(nn_ops.pool(input=x, window_shape=pool_size, pooling_type='MAX', strides=strides, padding=padding)) | Performs an N-D max pooling.
Args:
x: ndarray of rank N+2, of shape `[batch_size] + input_spatial_shape +
[num_channels]`. Pooling happens over the spatial dimensions only.
pool_size: sequence of N ints.
strides: sequence of N ints.
padding: a string, the padding algorithm. Must be "SAME" or "VALID".
Returns:
An (N+2... | github-repos |
def pprint(self, initials_only=False):
last_name = self.last
suffixes = ((', ' + self.suffix) if self.suffix else '')
if (initials_only and (last_name != u'')):
first_names = self.first_initials
else:
first_names = self.first
return u'{} {}{}'.format(first_names, last_name, suffixes)... | Pretty print the name.
Args:
initials_only (bool): ``True`` if we want the first names to be displayed with
only the initial followed by a dot. ``False`` otherwise.
Examples:
>>> ParsedName('Lieber, Stanley Martin').pprint()
u'Stanley Martin Lieber'
>>> ParsedName('Lieber, Stanley Martin').pprint(initials_only=True)
... | codesearchnet |
def valueWritePreprocessor(valueString, replaceParamsFile=None):
if (type(valueString) is bool):
log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.')
return valueString
variableString = valueString
if (replaceParamsFile is not None):
if (... | Look up variable name in replace param file for the negative id given and return it.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
... | codesearchnet |
def all_max(tensors):
return _apply_all_reduce('max', tensors) | Returns a list of tensors with the all-reduce max across `tensors`.
The computation is done with an all-reduce operation, so if only some of the
returned tensors are evaluated then the computation will hang.
Args:
tensors: The input tensors across which to reduce; must be assigned
to GPU devices.
Returns:
List of te... | github-repos |
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
return super().get_special_tokens_mask(token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True)
if token_ids_1 is not None:
return [1] + [0... | Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pai... | github-repos |
def add_file(profile, branch, file_path, file_contents, is_executable=False, commit_message=None):
branch_sha = get_branch_sha(profile, branch)
tree = get_files_in_branch(profile, branch_sha)
new_tree = add_file_to_tree(tree, file_path, file_contents, is_executable)
data = trees.create_tree(profile, new... | Add a file to a branch.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
branch
The name of a branch.
file_path
The path of the new file in the tree.
file_contents
The (UTF-8 encode... | codesearchnet |
def _image_url(array, fmt='png', mode='data', quality=90, domain=None):
supported_modes = 'data'
if (mode not in supported_modes):
message = "Unsupported mode '%s', should be one of '%s'."
raise ValueError(message, mode, supported_modes)
image_data = serialize_array(array, fmt=fmt, quality=q... | Create a data URL representing an image from a PIL.Image.
Args:
image: a numpy
mode: presently only supports "data" for data URL
Returns:
URL representing image | codesearchnet |
def _embedding_lookup_for_sparse_tensor(self, inp: sparse_tensor.SparseTensor, weight: Optional[sparse_tensor.SparseTensor], table: tf_variables.Variable, feature: tpu_embedding_v2_utils.FeatureConfig) -> tensor.Tensor:
def sparse_to_dense_computation(inp, weight):
if weight is None:
weight = s... | Embedding lookup for sparse tensor based on its feature config.
Args:
inp: a single SparseTensor input.
weight: None or SparseTensor which has the same shape of the input.
table: a table variable.
feature: a feature config.
Returns:
Embedding lookup result. | github-repos |
def reduce_sum(self, x):
return self.reduce(lambda y: math_ops.reduce_sum(y, axis=0), x) | Performs a sum reduction on `x` across pfor iterations.
Note that this currently may not work inside a control flow construct.
Args:
x: an unvectorized Tensor.
Returns:
A Tensor that has same rank as `x`. The value is the sum of the values
of `x` across the pfor iterations. | github-repos |
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
if not isinstance(grid_pinpoints, list):
raise TypeError('grid_pinpoints should be a list of tuples or lists')
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
... | Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (width, height).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height,... | github-repos |
def get_sns_topic_arn(topic_name, account, region):
if ((topic_name.count(':') == 5) and topic_name.startswith('arn:aws:sns:')):
return topic_name
session = boto3.Session(profile_name=account, region_name=region)
sns_client = session.client('sns')
topics = sns_client.list_topics()['Topics']
... | Get SNS topic ARN.
Args:
topic_name (str): Name of the topic to lookup.
account (str): Environment, e.g. dev
region (str): Region name, e.g. us-east-1
Returns:
str: ARN for requested topic name | codesearchnet |
def convert_to_torch_compatible(cls, x):
return x | Convert a tensor to something that the Torch backend can consume.
This can be a Torch tensor, NumPy array or any other type of tensor that
`keras.backend.torch.core.convert_to_tensor()` can consume.
Only called after slicing using `__getitem__`.
Used to densify sparse tensors and ragged tensors.
Args:
x: the tensor t... | github-repos |
def get_block_header(self, block_hash, id=None, endpoint=None):
return self._call_endpoint(GET_BLOCK_HEADER, params=[block_hash, 1], id=id, endpoint=endpoint) | Get the corresponding block header information according to the specified script hash.
Args:
block_hash: (str) the block scripthash (e.g. 'a5508c9b6ed0fc09a531a62bc0b3efcb6b8a9250abaf72ab8e9591294c1f6957')
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
... | juraj-google-style |
def rationalize(flt: float, denominators: Set[int]=None) -> Fraction:
if (denominators is None):
denominators = _DENOMINATORS
frac = Fraction.from_float(flt).limit_denominator()
if (frac.denominator not in denominators):
raise ValueError('Cannot rationalize')
return frac | Convert a floating point number to a Fraction with a small
denominator.
Args:
flt: A floating point number
denominators: Collection of standard denominators. Default is
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 32, 64, 128, 256, 512,
1024, 2048, 4096, 8192
Raises:
ValueError: If cannot rationalize float | codesearchnet |
def deploy_ray_axis_func(axis, func, num_splits, kwargs, *partitions):
table = concat_arrow_table_partitions(axis, partitions)
try:
result = func(table, **kwargs)
except Exception:
result = pyarrow.Table.from_pandas(func(table.to_pandas(), **kwargs))
return split_arrow_table_result(... | Deploy a function along a full axis in Ray.
Args:
axis: The axis to perform the function along.
func: The function to perform.
num_splits: The number of splits to return
(see `split_result_of_axis_func_pandas`)
kwargs: A dictionary of keyword arguments.
partitions: All partitions that make up the full axis (row or col... | juraj-google-style |
def save_headers(cls, filename: str, response: HTTPResponse):
new_filename = (filename + '-new')
with open('wb') as new_file:
new_file.write(response.header())
with wpull.util.reset_file_offset(response.body):
response.body.seek(0)
shutil.copyfileobj(response.body, new_fi... | Prepend the HTTP response header to the file.
Args:
filename: The path of the file
response: Response | codesearchnet |
def new_random_wallet(cls, user_entropy=None, network=BitcoinMainNet):
seed = str(urandom(64))
seed += str(int((time.time() * (10 ** 6))))
if user_entropy:
user_entropy = str(user_entropy)
seed += user_entropy
return cls.from_master_secret(seed, network=network) | Generate a new wallet using a randomly generated 512 bit seed.
Args:
user_entropy: Optional user-supplied entropy which is combined
combined with the random seed, to help counteract compromised
PRNGs.
You are encouraged to add an optional `user_entropy` string to protect
against a compromised CSPRNG. This will be com... | codesearchnet |
def wait_for_batches(self, batch_ids, timeout=None):
self._batch_tracker.watch_statuses(self, batch_ids)
timeout = (timeout or DEFAULT_TIMEOUT)
start_time = time()
with self._wait_condition:
while True:
if (self._statuses is not None):
return _format_batch_statuses(se... | Locks until a list of batch ids is committed to the block chain
or a timeout is exceeded. Returns the statuses of those batches.
Args:
batch_ids (list of str): The ids of the batches to wait for
timeout(int): Maximum time in seconds to wait for
Returns:
list of BatchStatus: BatchStatuses to send back to client | codesearchnet |
def get_unit(self, name):
return Unit(client=self, data=self._single_request('Units.Get', unitName=name)) | Retreive a specifi unit from the fleet cluster by name
Args:
name (str): If specified, only this unit name is returned
Returns:
Unit: The unit identified by ``name`` in the fleet cluster
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400 | juraj-google-style |
def resolve_prefix_path(cls, start_path=None):
if not start_path or start_path == 'auto':
start_path = os.path.curdir
cur_path = start_path
LOGGER.debug('Checking if %s is a prefix', os.path.abspath(cur_path))
if cls.is_prefix(cur_path):
return os.path.a... | Look for an existing prefix in the given path, in a path/.lago dir, or
in a .lago dir under any of it's parent directories
Args:
start_path (str): path to start the search from, if None passed, it
will use the current dir
Returns:
str: path to the found prefix
Raises:
RuntimeError: if no prefix was found | juraj-google-style |
def initial_sql(self, value):
self._initial_sql = value
if (value is None):
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value) | Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing. | codesearchnet |
def ExistsWithType(self, urn, aff4_type=None, follow_symlinks=True, age=NEWEST_TIME, token=None):
if (not aff4_type):
raise ValueError("aff4_type can't be None")
try:
self.Open(urn, aff4_type=aff4_type, follow_symlinks=follow_symlinks, age=age, token=token)
return True
except Instant... | Checks if an object with a given URN and type exists in the datastore.
Args:
urn: The urn to check.
aff4_type: Expected object type.
follow_symlinks: If object opened is a symlink, follow it.
age: The age policy used to check this object. Should be either
NEWEST_TIME or a time range given as a tuple (start, end) in
mi... | codesearchnet |
def GenerateCostReport(metagraph, per_node_report=False, verbose=False, cluster=None):
if cluster is None:
cluster = gcluster.Cluster(disable_detailed_stats=False)
return tf_wrap.GenerateCostReport(metagraph.SerializeToString(), per_node_report, verbose, cluster.tf_cluster) | Analyze the cost of each TensorFlow op and node in the provided metagraph.
Args:
metagraph: A TensorFlow MetaGraphDef.
per_node_report: by default the report contains stats aggregated on a per op
type basis, setting per_node_report to True adds results for each
individual node to the report.
verbose: Prints out the en... | github-repos |
def attach_template(self, _template, _key, **unbound_var_values):
if (_key in unbound_var_values):
raise ValueError(('%s specified twice.' % _key))
unbound_var_values[_key] = self
return _DeferredLayer(self.bookkeeper, _template.as_layer().construct, [], unbound_var_values, scope=self._scope, defaul... | Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
Val... | codesearchnet |
def fetch(self, plan_id, data={}, **kwargs):
return super(Plan, self).fetch(plan_id, data, **kwargs) | Fetch Plan for given Id
Args:
plan_id : Id for which Plan object has to be retrieved
Returns:
Plan dict for given subscription Id | codesearchnet |
def get_expectations_config(self, discard_failed_expectations=True, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True, suppress_warnings=False):
config = dict(self._expectations_config)
config = copy.deepcopy(config)
expectations = config['expectati... | Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress ... | codesearchnet |
def recent_all_projects(self, limit=30, offset=0):
method = 'GET'
url = '/recent-builds?circle-token={token}&limit={limit}&offset={offset}'.format(token=self.client.api_token, limit=limit, offset=offset)
json_data = self.client.request(method, url)
return json_data | Return information about recent builds across all projects.
Args:
limit (int), Number of builds to return, max=100, defaults=30.
offset (int): Builds returned from this point, default=0.
Returns:
A list of dictionaries. | codesearchnet |
def watch_printer(watch, value):
print('({: 8} s) {}: {}'.format(value.raw_time, watch, value.value)) | Print a watched value.
Args:
watch (DataStream): The stream that was watched
value (IOTileReading): The value to was seen | codesearchnet |
def __init__(self, policies=None, database_path=None):
self._logger = logging.getLogger('kmip.server.engine')
self._cryptography_engine = engine.CryptographyEngine()
self.database_path = 'sqlite:
if not database_path:
self.database_path = 'sqlite:
self._da... | Create a KmipEngine.
Args:
policy_path (string): The path to the filesystem directory
containing PyKMIP server operation policy JSON files.
Optional, defaults to None.
database_path (string): The path to the SQLite database file
used to store all server data. Optional, defaults to None.
If none, database path defaults... | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.