code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def __init__(self, hunt_obj, runner_args=None, token=None):
self.token = token or hunt_obj.token
self.queue_manager = queue_manager.QueueManager(token=self.token)
self.outbound_lock = threading.Lock()
self.hunt_obj = hunt_obj
if runner_args is not None:
self.runner_args = runner_a... | Constructor for the Hunt Runner.
Args:
hunt_obj: The hunt object this runner will run states for.
runner_args: A HuntRunnerArgs() instance containing initial values. If not
specified, we use the runner_args from the hunt_obj.
token: An instance of access_control.ACLToken security token. | juraj-google-style |
def compute(i, tas):
elems_value_batchable = [ta.read(i) for ta in elems_batchable_ta]
elems_value_flat = _elems_value_batchable_to_flat(elems_value_batchable, elems_flat_signature)
elems_value = elems_unflatten(elems_value_flat)
ag_ctx = autograph_ctx.control_status_ctx()
autographed_fn = autograph... | The loop body of map_fn.
Args:
i: the loop counter
tas: the flat TensorArray accumulator list
Returns:
(i + 1, tas): the updated counter + updated TensorArrays
Raises:
TypeError: if fn_output_signature and result_value structure don't match
ValueType: if fn_output_signature and result_value lengths don't match | github-repos |
def loads(text):
if text.startswith('CCSDS_OEM_VERS'):
func = _read_oem
elif text.startswith('CCSDS_OPM_VERS'):
func = _read_opm
else:
raise ValueError('Unknown CCSDS type')
return func(text) | Read CCSDS from a string, and provide the beyond class corresponding;
Orbit or list of Orbit if it's an OPM, Ephem if it's an OEM.
Args:
text (str):
Return:
Orbit or Ephem
Raise:
ValueError: when the text is not a recognizable CCSDS format | codesearchnet |
def __init__(self, fraction):
self.fraction = fraction
super().__init__('Fraction should be in (0,1] (received {})'
.format(fraction)) | Initialization of instances:
Args:
fraction (float): the invalid fraction.
Attributes:
fraction (float): the invalid fraction. | juraj-google-style |
def prompt_for_test_start(
message='Enter a DUT ID in order to start the test.', timeout_s=60*60*24,
validator=lambda sn: sn, cli_color=''):
@PhaseOptions(timeout_s=timeout_s)
@plugs.plug(prompts=UserInput)
def trigger_phase(test, prompts):
dut_id = prompts.prompt(
message, text_input... | Returns an OpenHTF phase for use as a prompt-based start trigger.
Args:
message: The message to display to the user.
timeout_s: Seconds to wait before raising a PromptUnansweredError.
validator: Function used to validate or modify the serial number.
cli_color: An ANSI color code, or the empty string. | juraj-google-style |
def _Replacement(node):
value = node.id
if value in ('True', 'False', 'None'):
return node
return _StrNode(value) | Returns a node to use in place of the supplied node in the AST.
Args:
node: A node of type Name. Could be a variable, or builtin constant.
Returns:
A node to use in place of the supplied Node. Either the same node, or a
String node whose value matches the Name node's id. | github-repos |
def FromStream(cls, stream):
if stream.system:
specifier = DataStreamSelector.MatchSystemOnly
else:
specifier = DataStreamSelector.MatchUserOnly
return DataStreamSelector(stream.stream_type, stream.stream_id, specifier) | Create a DataStreamSelector from a DataStream.
Args:
stream (DataStream): The data stream that we want to convert. | codesearchnet |
def deepcopy_dict(data):
try:
return copy.deepcopy(data)
except TypeError:
copied_data = {}
for key, value in data.items():
if isinstance(value, dict):
copied_data[key] = deepcopy_dict(value)
else:
try:
copi... | deepcopy dict data, ignore file object (_io.BufferedReader)
Args:
data (dict): dict data structure
{
'a': 1,
'b': [2, 4],
'c': lambda x: x+1,
'd': open('LICENSE'),
'f': {
'f1': {'a1': 2},
'f2': io.open('LICENSE', 'rb'),
}
}
Returns:
dict: deep copied dict data, with file object unchanged. | juraj-google-style |
def flatten(inputs, name=None, data_format='channels_last'):
warnings.warn('`tf.layers.flatten` is deprecated and will be removed in a future version. Please use `tf.keras.layers.Flatten` instead.')
layer = Flatten(name=name, data_format=data_format)
return layer.apply(inputs) | Flattens an input tensor while preserving the batch axis (axis 0).
Args:
inputs: Tensor input.
name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, heigh... | github-repos |
def validate_word(self, word):
while word:
match = self.seg_regex.match(word)
if match:
word = word[len(match.group(0)):]
else:
return False
return True | Returns True if `word` consists exhaustively of valid IPA segments
Args:
word (unicode): input word as Unicode IPA string
Returns:
bool: True if `word` can be divided exhaustively into IPA segments
that exist in the database | codesearchnet |
def mark_flags_as_mutual_exclusive(flag_names, required=False, flag_values=FLAGS):
def validate_mutual_exclusion(flags_dict):
flag_count = sum((1 for val in flags_dict.values() if (val is not None)))
if ((flag_count == 1) or ((not required) and (flag_count == 0))):
return True
m... | Ensures that only one flag among flag_names is set.
Args:
flag_names: [str], a list of the flag names to be checked.
required: Boolean, if set, exactly one of the flags must be set.
Otherwise, it is also valid for none of the flags to be set.
flag_values: An optional FlagValues instance to validate against. | codesearchnet |
def get_name(node):
if isinstance(node, gast.Name):
return node.id
elif isinstance(node, (gast.Subscript, gast.Attribute)):
return get_name(node.value)
else:
raise TypeError | Get the name of a variable.
Args:
node: A `Name`, `Subscript` or `Attribute` node.
Returns:
The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`. | juraj-google-style |
def _create_complete_graph(node_ids):
g = nx.Graph()
g.add_nodes_from(node_ids)
for (i, j) in combinations(node_ids, 2):
g.add_edge(i, j)
return g | Create a complete graph from the list of node ids.
Args:
node_ids: a list of node ids
Returns:
An undirected graph (as a networkx.Graph) | codesearchnet |
def program(self, *, vertex_shader, fragment_shader=None, geometry_shader=None,
tess_control_shader=None, tess_evaluation_shader=None, varyings=()) -> 'Program':
if type(varyings) is str:
varyings = (varyings,)
varyings = tuple(varyings)
res = Program.__ne... | Create a :py:class:`Program` object.
Only linked programs will be returned.
A single shader in the `shaders` parameter is also accepted.
The varyings are only used when a transform program is created.
Args:
shaders (list): A list of :py:class:`Shader` objects.
varyings (list): A list of varying names.
Returns:
:py:... | juraj-google-style |
def pb(name, data, bucket_count=None, display_name=None, description=None):
import tensorflow.compat.v1 as tf
if (bucket_count is None):
bucket_count = summary_v2.DEFAULT_BUCKET_COUNT
data = np.array(data).flatten().astype(float)
if (data.size == 0):
buckets = np.array([]).reshape((0, 3)... | Create a legacy histogram summary protobuf.
Arguments:
name: A unique name for the generated summary, including any desired
name scopes.
data: A `np.array` or array-like form of any shape. Must have type
castable to `float`.
bucket_count: Optional positive `int`. The output will have this
many buckets, except in two e... | codesearchnet |
def _validate(self):
if self.tuple_shapes is not None:
for policy, shape in zip(self._sharding_policies, self._tuple_shapes):
_ = policy.get_sharded_shape(shape) | Checks that the configuration is self-consistent.
Raises:
ValueError: if the shapes and sharding policies don't match. | github-repos |
def cut_video(in_file, out_file, start=None, end=None, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs):
options = {'log_level': log_level}
if (vcodec is None):
options['vcodec'] = 'copy'
if (acodec is None):
options['acodec'] = 'copy'
if start:
options['ss'... | Cut a clip from a video.
Args:
in_file (str): Input video filename.
out_file (str): Output video filename.
start (None or float): Start time (in seconds).
end (None or float): End time (in seconds).
vcodec (None or str): Output video codec, None for unchanged.
acodec (None or str): Output audio codec, None for unchang... | codesearchnet |
def GetBalance(self, asset_id, watch_only=0):
total = Fixed8(0)
if type(asset_id) is NEP5Token.NEP5Token:
return self.GetTokenBalance(asset_id, watch_only)
for coin in self.GetCoins():
if coin.Output.AssetId == asset_id:
if coin.State & CoinStat... | Get the balance of a specific token by its asset id.
Args:
asset_id (NEP5Token|TransactionOutput): an instance of type neo.Wallets.NEP5Token or neo.Core.TX.Transaction.TransactionOutput to get the balance from.
watch_only (bool): True, to limit to watch only wallets.
Returns:
Fixed8: total balance. | juraj-google-style |
def _WriteCacheFile(self, cache_filename, scopes):
creds = {'scopes': sorted(list(scopes)),
'svc_acct_name': self.__service_account_name}
creds_str = json.dumps(creds)
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cache_file.Locke... | Writes the credential metadata to the cache file.
This does not save the credentials themselves (CredentialStore class
optionally handles that after this class is initialized).
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials. | juraj-google-style |
def BuildAdGroupCriterionOperations(adgroup_operations, number_of_keywords=1):
criterion_operations = [
{
'xsi_type': 'AdGroupCriterionOperation',
'operand': {
'xsi_type': 'BiddableAdGroupCriterion',
'adGro... | Builds the operations adding a Keyword Criterion to each AdGroup.
Args:
adgroup_operations: a list containing the operations that will add AdGroups.
number_of_keywords: an int defining the number of Keywords to be created.
Returns:
a list containing the operations that will create a new Keyword Criterion
associated w... | juraj-google-style |
def __init__(self, AssetId=None, Value=None, script_hash=None):
super(TransactionOutput, self).__init__()
self.AssetId = AssetId
self.Value = Value
self.ScriptHash = script_hash | Create an instance.
Args:
AssetId (UInt256):
Value (Fixed8):
script_hash (UInt160): | juraj-google-style |
def GetDecrypter(cls, encryption_method, **kwargs):
encryption_method = encryption_method.lower()
decrypter = cls._decrypters.get(encryption_method, None)
if not decrypter:
return None
return decrypter(**kwargs) | Retrieves the decrypter object for a specific encryption method.
Args:
encryption_method (str): encryption method identifier.
kwargs (dict): keyword arguments depending on the decrypter.
Returns:
Decrypter: decrypter or None if the encryption method does not exists.
Raises:
CredentialError: if the necessary credenti... | juraj-google-style |
def list_worker_processes(apppool):
ps_cmd = ['Get-ChildItem', "'IIS:\\AppPools\\{0}\\WorkerProcesses'".format(apppool)]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Una... | Returns a list of worker processes that correspond to the passed
application pool.
.. versionadded:: 2017.7.0
Args:
apppool (str): The application pool to query
Returns:
dict: A dictionary of worker processes with their process IDs
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_worker_processes 'My App P... | codesearchnet |
def get_min_max_value(self) -> tuple[float, float]:
if self._num_bins > 512:
logging.warning('num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests all histogram mid value pairs, so it may take a long time.', self._num_bins)
mse_min = (float('inf'), float('inf'), float('inf'))
for left... | Finds the optimal quant_min and quant_max by testing all possible cases.
It guarantees optimal quant_min and quant_max for the representative
dataset, but not for the test dataset.
Returns:
(min_value, max_value): Min and max calculated using
HistogramMseBruteforce. | github-repos |
def prepare_axes(axes, title, size, cmap=None):
if (axes is None):
return None
axes.set_xlim([0, size[1]])
axes.set_ylim([size[0], 0])
axes.set_aspect('equal')
axes.axis('off')
if isinstance(cmap, str):
title = '{} (cmap: {})'.format(title, cmap)
axes.set_title(title)
axe... | Prepares an axes object for clean plotting.
Removes x and y axes labels and ticks, sets the aspect ratio to be
equal, uses the size to determine the drawing area and fills the image
with random colors as visual feedback.
Creates an AxesImage to be shown inside the axes object and sets the
needed properties.
Args:
ax... | codesearchnet |
def inject_params(self, params):
for arg, value in params.items():
cli_arg = '--{}'.format(arg)
if cli_arg in sys.argv:
self.tcex.log.debug('skipping existing arg: {}'.format(cli_arg))
continue
... | Inject params into sys.argv from secureParams API, AOT, or user provided.
Args:
params (dict): A dictionary containing all parameters that need to be injected as args. | juraj-google-style |
def patch_masks(patches: dict) -> None:
for patch in patches:
patch_mask(patch) | Wraps patch mask function for list of patches.
Modifies in place. Executes patch_mask for multiple patches.
Args:
patches: A list of patch objects to annotate. | github-repos |
def _as_log_entry(self, name, now):
d = {u'http_response_code': self.response_code, u'timestamp': time.mktime(now.timetuple())}
severity = _SEVERITY.INFO
if (self.response_code >= 400):
severity = _SEVERITY.ERROR
d[u'error_cause'] = self.error_cause.name
if (self.request_size > 0):
... | Makes a `LogEntry` from this instance for the given log_name.
Args:
rules (:class:`ReportingRules`): determines what labels, metrics and
logs to include in the report request.
now (:class:`datetime.DateTime`): the current time
Return:
a ``LogEntry`` generated from this instance with the given name
and timestamp
Rais... | codesearchnet |
def _ParseLastRunTime(self, parser_mediator, fixed_length_section):
systemtime_struct = fixed_length_section.last_run_time
system_time_tuple = (systemtime_struct.year, systemtime_struct.month, systemtime_struct.weekday, systemtime_struct.day_of_month, systemtime_struct.hours, systemtime_struct.minutes, systemti... | Parses the last run time from a fixed-length data section.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
fixed_length_section (job_fixed_length_data_section): a Windows
Scheduled Task job fixed-length data section.
Returns:
dfdatetime.Da... | codesearchnet |
def _get_default_configurable_parameter_values(fn, whitelist, blacklist):
arg_vals = _ARG_DEFAULTS_CACHE.get(fn)
if arg_vals is not None:
return arg_vals.copy()
arg_spec = _get_cached_arg_spec(fn)
if arg_spec.defaults:
default_kwarg_names = arg_spec.args[-len(arg_spec.defaults):]
arg_vals = d... | Retrieve all default values for configurable parameters of a function.
Any parameters included in the supplied blacklist, or not included in the
supplied whitelist, are excluded.
Args:
fn: The function whose parameter values should be retrieved.
whitelist: The whitelist (or `None`) associated with the function.
black... | juraj-google-style |
def _load_json_module():
first_import_error = None
for module_name in ['json', 'simplejson']:
try:
module = __import__(module_name, {}, {}, 'json')
if (not hasattr(module, 'JSONEncoder')):
message = ('json library "%s" is not compatible with ProtoRPC' % module_nam... | Try to load a valid json module.
There are more than one json modules that might be installed. They are
mostly compatible with one another but some versions may be different.
This function attempts to load various json modules in a preferred order.
It does a basic check to guess if a loaded version of json is compati... | codesearchnet |
def get_logging_metric_hook(benchmark_log_dir=None, tensors_to_log=None, every_n_secs=600, **kwargs):
if (benchmark_log_dir is None):
raise ValueError('metric_log_dir should be provided to use metric logger')
if (tensors_to_log is None):
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.Lo... | Function to get LoggingMetricHook.
Args:
benchmark_log_dir: `string`, directory path to save the metric log.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins... | codesearchnet |
def is_special_unitary(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:
return (is_unitary(matrix, rtol=rtol, atol=atol) and ((matrix.shape[0] == 0) or np.allclose(np.linalg.det(matrix), 1, rtol=rtol, atol=atol))) | Determines if a matrix is approximately unitary with unit determinant.
A matrix is special-unitary if it is square and its adjoint is its inverse
and its determinant is one.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on e... | codesearchnet |
def raw_state(self):
try:
return self._get_domain().state()
except libvirt.libvirtError as e:
raise vm_plugin.LagoFailedToGetVMStateError(str(e)) | Return the state of the domain in Libvirt's terms
Retruns:
tuple of ints: The state and its reason
Raises:
:exc:`~lago.plugins.vm.LagoVMDoesNotExistError`:
If the VM of this provider doesn't exist.
:exc:`~lago.plugins.vm.LagoFailedToGetVMStateError:
If the VM exist, but the query returned an error. | codesearchnet |
def get_sig(ir, name):
sig = '{}({})'
argss = convert_arguments(ir.arguments)
return [sig.format(name, ','.join(args)) for args in argss] | Return a list of potential signature
It is a list, as Constant variables can be converted to int256
Args:
ir (slithIR.operation)
Returns:
list(str) | juraj-google-style |
def LockScanNode(self, path_spec):
scan_node = self._scan_nodes.get(path_spec, None)
if not scan_node:
raise KeyError('Scan node does not exist.')
self._locked_scan_nodes[path_spec] = scan_node | Marks a scan node as locked.
Args:
path_spec (PathSpec): path specification.
Raises:
KeyError: if the scan node does not exists. | juraj-google-style |
def save_feature_list(self, obj, set_id, feature_list_id):
save(obj, self.features_dir + 'X_{}_{}.pickle'.format(set_id, feature_list_id)) | Pickle the specified feature list to a file.
Example: `save_feature_list(project, X_tfidf_train, 'train', 'tfidf')`.
Args:
obj: The object to pickle (e.g., a numpy array or a Pandas dataframe)
project: An instance of pygoose project.
set_id: The id of the subset (e.g., 'train' or 'test')
feature_list_id: The name for ... | juraj-google-style |
def transpose(self, name=None, activate_final=None):
if name is None:
name = self.module_name + "_transpose"
if activate_final is None:
activate_final = self.activate_final
output_sizes = [lambda l=layer: l.input_shape[1] for layer in self._layers]
output_sizes.reverse()
return MLP(... | Returns transposed `MLP`.
Args:
name: Optional string specifying the name of the transposed module. The
default name is constructed by appending "_transpose"
to `self.module_name`.
activate_final: Optional boolean determining if the activation and batch
normalization, if turned on, are applied to the final layer.
Ret... | juraj-google-style |
def _force_edge_active_move(self, state: _STATE) -> _STATE:
(seqs, edges) = state
unused_edges = edges.copy()
for seq in seqs:
for i in range(1, len(seq)):
unused_edges.remove(self._normalize_edge((seq[(i - 1)], seq[i])))
edge = self._choose_random_edge(unused_edges)
if (not edge... | Move which forces a random edge to appear on some sequence.
This move chooses random edge from the edges which do not belong to any
sequence and modifies state in such a way, that this chosen edge
appears on some sequence of the search state.
Args:
state: Search state, not mutated.
Returns:
New search state with one... | codesearchnet |
def _feedback(self, dna: DNA, reward: Union[float, Tuple[float]]) -> None: | Actual feedback method which should be implemented by the child class.
The default implementation is no-op.
Args:
dna: a DNA object.
reward: reward for the DNA. It is a float if `self.multi_objective`
returns False, otherwise it's a tuple of floats. | github-repos |
def _get_name_filter(package, context='decorate', reparse=False):
global name_filters
pkey = (package, context)
if ((pkey in name_filters) and (not reparse)):
return name_filters[pkey]
from acorn.config import settings
spack = settings(package)
sections = {'decorate': ['tracking', 'acorn... | Makes sure that the name filters for the specified package have been
loaded.
Args:
package (str): name of the package that this method belongs to.
context (str): one of ['decorate', 'time', 'analyze']; specifies which
section of the configuration settings to check. | codesearchnet |
def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions):
actions_to_send = defaultdict(dict)
for env_id in active_envs:
actions_to_send[env_id] = {}
for (policy_id, eval_data) in to_eval.items():
rnn_in_cols = _to_colu... | Process the output of policy neural network evaluation.
Records policy evaluation results into the given episode objects and
returns replies to send back to agents in the env.
Returns:
actions_to_send: nested dict of env id -> agent id -> agent replies. | codesearchnet |
def AddAttribute(self, attribute, value=None, age=None):
if ('w' not in self.mode):
raise IOError(('Writing attribute %s to read only object.' % attribute))
if (value is None):
value = attribute
attribute = value.attribute_instance
if ((self.mode != 'w') and attribute.lock_protected ... | Add an additional attribute to this object.
If value is None, attribute is expected to be already initialized with a
value. For example:
fd.AddAttribute(fd.Schema.CONTAINS("some data"))
Args:
attribute: The attribute name or an RDFValue derived from the attribute.
value: The value the attribute will be set to.
age: ... | codesearchnet |
def _stream_output(process):
exit_code = None
while (exit_code is None):
stdout = process.stdout.readline().decode('utf-8')
sys.stdout.write(stdout)
exit_code = process.poll()
if (exit_code != 0):
raise RuntimeError(('Process exited with code: %s' % exit_code))
return exi... | Stream the output of a process to stdout
This function takes an existing process that will be polled for output. Only stdout
will be polled and sent to sys.stdout.
Args:
process(subprocess.Popen): a process that has been started with
stdout=PIPE and stderr=STDOUT
Returns (int): process exit code | codesearchnet |
def getParameter(self, name):
return lock_and_call(
lambda: Parameter(self._impl.getParameter(name)),
self._lock
) | Get the parameter with the corresponding name.
Args:
name: Name of the parameter to be found.
Raises:
TypeError: if the specified parameter does not exist. | juraj-google-style |
def json(self) -> dict:
content = {}
if self.text:
content['text'] = self.text
content['controls'] = [control.json() for control in self.content]
self.control_json['content'] = content
return self.control_json | Returns json compatible state of the ButtonsFrame instance.
Returns json compatible state of the ButtonsFrame instance including
all nested buttons.
Returns:
control_json: Json representation of ButtonsFrame state. | codesearchnet |
def should_stop(self):
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True | Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise. | github-repos |
def get_creation_date_tags(url, domain, as_dicts=False):
creation_date_tags = [
mementoweb_api_tags(url),
get_whois_tags(domain),
]
creation_date_tags = sorted(
sum(creation_date_tags, []),
key=lambda x: x.date
)
if not as_dicts:
return creation_date_ta... | Put together all data sources in this module and return it's output.
Args:
url (str): URL of the web. With relative paths and so on.
domain (str): Just the domain of the web.
as_dicts (bool, default False): Convert output to dictionaries
compatible with :class:`.SourceString`?
Returns:
list: Sorted list of :class:`Ti... | juraj-google-style |
def annotate_test_file(self, test_file: Iterator[str]) -> Iterator[str]:
transformed_tests, run_directives = self.for_each_test_case(test_file, self.annotate_test_case, num_outputs=2)
return itertools.chain([_BANNER_COMMENT_LINE], run_directives, ['\n'], transformed_tests) | Inserts FileCheck directives above each test case in an HLO test file.
Args:
test_file: An iterator over the lines of an HLO test file.
Returns:
An iterator over the lines of the transformed HLO test file. Each test
case is preceded by FileCheck directives describing the expected output
of the optimizer on that test ... | github-repos |
def add_op(state, op_func, *args, **kwargs):
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
op_func(state, host, *args, **kwargs) | Prepare & add an operation to ``pyinfra.state`` by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
to op_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function | codesearchnet |
def make_sharded_variable_creator(hosts: List[Text]) -> Callable[..., TPUEmbeddingVariable]:
def sharded_variable_creator(next_creator: Callable[..., tf_variables.Variable], *args, **kwargs):
kwargs['skip_mirrored_creator'] = True
num_hosts = len(hosts)
name, shape, dtype, unwrappe... | Makes a sharded variable creator given a list of hosts.
Args:
hosts: a list of tensorflow devices on which to shard the tensors.
Returns:
A variable creator function. | github-repos |
def _read_protocol_line(self):
self._server_start_stdout = []
while True:
line = self._proc.stdout.readline().decode('utf-8')
if not line:
raise errors.ServerStartError(self._device, 'Unexpected EOF when waiting for server to start.')
line = line.strip()
if line.start... | Reads the next line of instrumentation output relevant to snippets.
This method will skip over lines that don't start with 'SNIPPET ' or
'INSTRUMENTATION_RESULT:'.
Returns:
A string for the next line of snippet-related instrumentation output,
stripped.
Raises:
errors.ServerStartError: If EOF is reached without any p... | github-repos |
async def get_movie(self, id_):
url = self.url_builder(
'movie/{movie_id}',
dict(movie_id=id_),
url_params=OrderedDict(append_to_response='credits'),
)
data = await self.get_data(url)
if data is None:
return
return Movie.fr... | Retrieve movie data by ID.
Arguments:
id_ (:py:class:`int`): The movie's TMDb ID.
Returns:
:py:class:`~.Movie`: The requested movie. | juraj-google-style |
def __init__(self, header, values, datetimes):
assert isinstance(header, Header), \
'header must be a Ladybug Header object. Got {}'.format(type(header))
assert isinstance(datetimes, Iterable) \
and not isinstance(datetimes, (str, dict, bytes, bytearray)), \
... | Initialize base collection.
Args:
header: A Ladybug Header object.
values: A list of values.
datetimes: A list of Ladybug DateTime objects that aligns with
the list of values. | juraj-google-style |
def get_qa_logit_layer(self) -> nn.Module:
if hasattr(self, 'answer_head'):
return self.answer_head.logit_fc[-1] | Returns the linear layer that produces question answering logits
Returns:
`nn.Module`: A torch module mapping the question answering prediction hidden states. `None`: A NoneType
object if Lxmert does not have the visual answering head. | github-repos |
def from_dict(cls, parameters):
instance = cls()
instance.fitted = parameters['fitted']
instance.constant_value = parameters['constant_value']
if instance.fitted and instance.constant_value is None:
instance.model = scipy.stats.truncnorm(parameters['a'], parameters[... | Set attributes with provided values.
Args:
parameters(dict): Dictionary containing instance parameters.
Returns:
Truncnorm: Instance populated with given parameters. | juraj-google-style |
def make_single_array(ds, batch_size=8*1024):
if isinstance(ds.output_types, tuple) or isinstance(ds.output_shapes, tuple):
raise ValueError('Dataset must have a single type and shape')
nshapes = len(ds.output_shapes)
if nshapes > 0:
raise ValueError('Dataset must be comprised of scalar... | Create a single numpy array from a dataset.
The dataset must have only one dimension, that is,
the length of its `output_shapes` and `output_types`
is 1, and its output shape must be `[]`, that is,
every tensor in the dataset must be a scalar.
Args:
ds: a TF Dataset.
batch_size: how many elements to read per pass
... | juraj-google-style |
def __init__(self, bytes_per_pack=0, timeout_seconds=None):
pass | Creates a CollectiveHints.
Args:
bytes_per_pack: a non-negative integer. Breaks collective operations into
packs of certain size. If it's zero, the value is determined
automatically. This only applies to all-reduce with
`MultiWorkerMirroredStrategy` currently.
timeout_seconds: a float or None, timeout in seconds. If n... | github-repos |
def status(self, job_ids):
logging.debug("Checking status of : {0}".format(job_ids))
for job_id in self.resources:
poll_code = self.resources[job_id]['proc'].poll()
if self.resources[job_id]['status'] in ['COMPLETED', 'FAILED']:
continue
if ... | Get the status of a list of jobs identified by their ids.
Args:
- job_ids (List of ids) : List of identifiers for the jobs
Returns:
- List of status codes. | juraj-google-style |
def validate_probability(p: float, p_str: str) -> float:
if p < 0:
raise ValueError('{} was less than 0.'.format(p_str))
elif p > 1:
raise ValueError('{} was greater than 1.'.format(p_str))
return p | Validates that a probability is between 0 and 1 inclusively.
Args:
p: The value to validate.
p_str: What to call the probability in error messages.
Returns:
The probability p if the probability if valid.
Raises:
ValueError if the probability is invalid. | juraj-google-style |
def __init__(self, mediator=None):
super(WindowsVolumeScanner, self).__init__(mediator=mediator)
self._file_system = None
self._path_resolver = None
self._windows_directory = None | Initializes a Windows volume scanner.
Args:
mediator (VolumeScannerMediator): a volume scanner mediator. | juraj-google-style |
def broadcast_shapes(shape1, shape2):
shape1 = list(shape1)
shape2 = list(shape2)
origin_shape1 = shape1
origin_shape2 = shape2
if len(shape1) > len(shape2):
shape2 = [1] * (len(shape1) - len(shape2)) + shape2
if len(shape1) < len(shape2):
shape1 = [1] * (len(shape2) - len(shape1... | Broadcast input shapes to a unified shape.
Convert to list for mutability.
Args:
shape1: A tuple or list of integers.
shape2: A tuple or list of integers.
Returns:
output_shape (list of integers or `None`): The broadcasted shape.
Example:
>>> broadcast_shapes((5, 3), (1, 3))
[5, 3] | github-repos |
def probabilistic_collocation(order, dist, subset=0.1):
(abscissas, weights) = chaospy.quad.collection.golub_welsch(order, dist)
likelihood = dist.pdf(abscissas)
alpha = numpy.random.random(len(weights))
alpha = (likelihood > ((alpha * subset) * numpy.max(likelihood)))
abscissas = abscissas.T[alpha]... | Probabilistic collocation method.
Args:
order (int, numpy.ndarray) : Quadrature order along each axis.
dist (Dist) : Distribution to generate samples from.
subset (float) : Rate of which to removed samples. | codesearchnet |
def ProcessGlobalSuppresions(lines):
for line in lines:
if _SEARCH_C_FILE.search(line):
for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
_global_error_suppressions[category] = True
if _SEARCH_KERNEL_FILE.search(line):
for category in _DEFAULT_KERNEL_SUPPRESSE... | Updates the list of global error suppressions.
Parses any lint directives in the file that have global effect.
Args:
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline. | codesearchnet |
def _try_refresh_access_token(self) -> None:
if self.refresh_token:
if ((not self.access_token) or self._is_access_token_expired()):
(self.access_token, self.access_expiration) = self._get_access_from_refresh()
self.access_expiration = (time.time() + self.access_expiration) | Attempts to get a new access token using the refresh token, if needed.
If the access token is expired and this instance has a stored refresh token,
then the refresh token is in the API call to get a new access token. If
successful, this instance is modified in-place with that new access token.
Args:
None
Returns:
No... | codesearchnet |
def Match(self, artifact=None, os_name=None, cpe=None, label=None):
return [
c for c in self.conditions if c.Match(artifact, os_name, cpe, label)
] | Test if host data should trigger a check.
Args:
artifact: An artifact name.
os_name: An OS string.
cpe: A CPE string.
label: A label string.
Returns:
A list of conditions that match. | juraj-google-style |
def _build_hash_string(self):
if ((self.site_name in SITE_LIST) or self.hash_string):
if (self.username and self.password):
try:
hash_string = self.hash_string.format(self.password)
except TypeError:
raise PybooruError("Pybooru can't add 'password' to ... | Function for build password hash string.
Raises:
PybooruError: When isn't provide hash string.
PybooruError: When aren't provide username or password.
PybooruError: When Pybooru can't add password to hash strring. | codesearchnet |
def load_validation_plugin(name=None):
if (not name):
return BaseValidationRules
plugin = None
for entry_point in iter_entry_points('bigchaindb.validation', name):
plugin = entry_point.load()
if (not plugin):
raise ResolutionError('No plugin found in group `bigchaindb.validation`... | Find and load the chosen validation plugin.
Args:
name (string): the name of the entry_point, as advertised in the
setup.py of the providing package.
Returns:
an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules`` | codesearchnet |
def VerifyStructure(self, parser_mediator, line):
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.SECURITYD_LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a MacOS securityd log file')
return False
time_... | Verify that this file is a securityd log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not. | codesearchnet |
def get_palette(num_colors=256):
pallete = [0]*(num_colors*3)
for j in range(0, num_colors):
lab = j
pallete[j*3+0] = 0
pallete[j*3+1] = 0
pallete[j*3+2] = 0
i = 0
while (lab > 0):
pallete[j*3+0] |= (((lab >> 0) & 1) << (7-i))
pallete[... | generates the colormap for visualizing the segmentation mask
Args:
num_colors (int): the number of colors to generate in the output palette
Returns:
string: the supplied extension, if assertion is successful. | juraj-google-style |
def check_requirements_file(req_file, skip_packages):
reqs = read_requirements(req_file)
if (skip_packages is not None):
reqs = [req for req in reqs if (req.name not in skip_packages)]
outdated_reqs = filter(None, [check_req(req) for req in reqs])
return outdated_reqs | Return list of outdated requirements.
Args:
req_file (str): Filename of requirements file
skip_packages (list): List of package names to ignore. | codesearchnet |
def _get_filename_from_url(url):
parse = urlparse(url)
return os.path.basename(parse.path) | Return a filename from a URL
Args:
url (str): URL to extract filename from
Returns:
(str): Filename in URL | juraj-google-style |
def _get_dominant_angle(lines, domination_type=MEDIAN):
if (domination_type == MEDIAN):
return _get_median_angle(lines)
elif (domination_type == MEAN):
return _get_mean_angle(lines)
else:
raise ValueError(('Unknown domination type provided: %s' % domination_type)) | Picks dominant angle of a set of lines.
Args:
lines: iterable of (x1, y1, x2, y2) tuples that define lines.
domination_type: either MEDIAN or MEAN.
Returns:
Dominant angle value in radians.
Raises:
ValueError: on unknown domination_type. | codesearchnet |
def _reduce_output(self, outputs, seq_lengths):
batch_size = outputs.shape[0]
reduced = []
for i in range(batch_size):
if (self.lstm_reduction == 'mean'):
reduced.append(outputs[(i, :seq_lengths[i], :)].mean(dim=0))
elif (self.lstm_reduction == 'max'):
reduced.append(... | Reduces the output of an LSTM step
Args:
outputs: (torch.FloatTensor) the hidden state outputs from the
lstm, with shape [batch_size, max_seq_length, hidden_size] | codesearchnet |
def get(self, key, default=None, cast=True):
tablename, _, key = key.rpartition(':')
if tablename and tablename not in self.fields.name.split('+'):
raise ItsdbError('column requested from wrong table: {}'
.format(tablename))
try:
inde... | Return the field data given by field name *key*.
Args:
key: the field name of the data to return
default: the value to return if *key* is not in the row | juraj-google-style |
def UpdateIncludeState(filename, include_dict, io=codecs):
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(cl... | Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise. | juraj-google-style |
def get_frame(self, index):
frame_num = self.frame_index[index]
onset = float(frame_num) / self.fps
if index < self.n_frames - 1:
next_frame_num = self.frame_index[index + 1]
end = float(next_frame_num) / self.fps
else:
end = float(self.dura... | Get video frame at the specified index.
Args:
index (int): Positional index of the desired frame. | juraj-google-style |
def add_member_to_list(self, username, listname, member_type='USER'):
return self.client.service.addMemberToList(listname, username, member_type, self.proxy_id) | Add a member to an existing list.
Args:
username (str): The username of the user to add
listname (str): The name of the list to add the user to
member_type (str): Normally, this should be "USER".
If you are adding a list as a member of another list,
set this to "LIST", instead. | codesearchnet |
def pull(self, platform=None):
repository, _ = parse_repository_tag(self.image_name)
return self.collection.pull(repository, tag=self.id, platform=platform) | Pull the image digest.
Args:
platform (str): The platform to pull the image for.
Default: ``None``
Returns:
(:py:class:`Image`): A reference to the pulled image. | juraj-google-style |
def render_template_inplace(template_path, info, dry_run=False, extra_filters=None, resolver=None):
filters = {}
if (resolver is not None):
filters['find_product'] = _create_resolver_filter(resolver)
if (extra_filters is not None):
filters.update(extra_filters)
basedir = os.path.dirname(... | Render a template file in place.
This function expects template path to be a path to a file
that ends in .tpl. It will be rendered to a file in the
same directory with the .tpl suffix removed.
Args:
template_path (str): The path to the template file
that we want to render in place.
info (dict): A dictionary of varia... | codesearchnet |
def print_start_command(self, command):
size = len(command)
if size > 20:
raise RuntimeError('Command too long')
n1 = size/10
n2 = size%10
self.send('^PS'+chr(n1)+chr(n2)+command) | Set print command
Args:
command: the type of command you desire.
Returns:
None
Raises:
RuntimeError: Command too long. | juraj-google-style |
def db(self, entity, query_filters='size=10'):
if (self.entity_api_key == ''):
return {'status': 'failure', 'response': 'No API key found in request'}
historic_url = ((self.base_url + 'api/0.1.0/historicData?') + query_filters)
historic_headers = {'apikey': self.entity_api_key, 'Content-Type': 'appl... | This function allows an entity to access the historic data.
Args:
entity (string): Name of the device to listen to
query_filters (string): Elastic search response format string
example, "pretty=true&size=10" | codesearchnet |
def decorate_set_on_listener(prototype):
def add_annotation(method):
method._event_info = {}
method._event_info['name'] = method.__name__
method._event_info['prototype'] = prototype
return method
return add_annotation | Private decorator for use in the editor.
Allows the Editor to create listener methods.
Args:
params (str): The list of parameters for the listener
method (es. "(self, new_value)") | codesearchnet |
def __call__(self, **kwargs):
if len(kwargs) != len(self._inputs):
raise ValueError('Invalid number of inputs provided for running a SignatureDef, expected %s vs provided %s' % (len(self._inputs), len(kwargs)))
for input_name, value in kwargs.items():
if input_name not in self._inputs:
... | Runs the SignatureDef given the provided inputs in arguments.
Args:
**kwargs: key,value for inputs to the model. Key is the SignatureDef input
name. Value is numpy array with the value.
Returns:
dictionary of the results from the model invoke.
Key in the dictionary is SignatureDef output name.
Value is the result Ten... | github-repos |
def getContext(self, context_name = 'default'):
if context_name == 'default' and 'default' not in self.contexts:
self('default')
return self.contexts[context_name] | Get a context by name, create the default context if it does not exist
Params:
context_name (string):
Context name
Raises:
KeyError:
If the context name does not exist
Returns:
bubbler.Bubbler:
Named context | juraj-google-style |
def added_tokens_decoder(self) -> dict[int, AddedToken]:
return dict(sorted(self._added_tokens_decoder.items(), key=lambda item: item[0])) | Returns the added tokens in the vocabulary as a dictionary of index to AddedToken.
Returns:
`Dict[str, int]`: The added tokens. | github-repos |
def _ssl_context_factory(parameters):
client_cert = None
ca_cert = None
key = config.conf['tls']['keyfile']
cert = config.conf['tls']['certfile']
ca_file = config.conf['tls']['ca_cert']
if ca_file:
with open(ca_file, 'rb') as fd:
ca_cert = ssl.Certificate.loadPEM(fd.read())
... | Produce a Twisted SSL context object from a pika connection parameter object.
This is necessary as Twisted manages the connection, not Pika.
Args:
parameters (pika.ConnectionParameters): The connection parameters built
from the fedora_messaging configuration. | codesearchnet |
def register_mbr_plugin(self, fs_id, plugin):
self.logger.debug('MBR: {}, FS ID: {}'
.format(self.__get_plugin_name(plugin), fs_id))
self.__mbr_plugins[fs_id].append(plugin) | Used in plugin's registration routine,
to associate it's detection method with given filesystem id
Args:
fs_id: filesystem id that is read from MBR partition entry
plugin: plugin that supports this filesystem | juraj-google-style |
def __init__(self, configuration, provider=None):
self._configuration = configuration
self._provider = provider | Base class for backends.
This method should initialize the module and its configuration, and
raise an exception if a component of the module is
not available.
Args:
configuration (BackendConfiguration): backend configuration
provider (BaseProvider): provider responsible for this backend
Raises:
FileNotFoundError if ... | juraj-google-style |
def plot_ax(self, ax=None, fontsize=12, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax=ax)
color = kwargs.get("color", "r")
label = kwargs.get("label", "{} fit".format(self.__class__.__name__))
lines = ["Equation of State: %s" % self.__class__.__name__,
"Minimum e... | Plot the equation of state on axis `ax`
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
fontsize: Legend fontsize.
color (str): plot color.
label (str): Plot label
text (str): Legend text (options)
Returns:
Matplotlib figure object. | juraj-google-style |
def _get_backend_instance(self, backend_cls):
try:
backend_instance = backend_cls(provider=self)
except Exception as err:
raise QiskitError(('Backend %s could not be instantiated: %s' % (backend_cls, err)))
return backend_instance | Return an instance of a backend from its class.
Args:
backend_cls (class): Backend class.
Returns:
BaseBackend: a backend instance.
Raises:
QiskitError: if the backend could not be instantiated. | codesearchnet |
def write(self, offset, data):
if (not isinstance(offset, (int, long))):
raise TypeError('Invalid offset type, should be integer.')
if (not isinstance(data, (bytes, bytearray, list))):
raise TypeError('Invalid data type, expected bytes, bytearray, or list.')
offset = self._adjust_offset(offs... | Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are... | codesearchnet |
def _compress_json(self, j):
compressed_json = copy.copy(j)
compressed_json.pop('users', None)
compressed_data = zlib.compress(
json.dumps(j['users']).encode('utf-8'),
self.zlib_compression_strength
)
b64_data = base64.b64encode(compressed_data).... | Compress the BLOB data portion of the usernotes.
Arguments:
j: the JSON in Schema v5 format (dict)
Returns a dict with the 'users' key removed and 'blob' key added | juraj-google-style |
def get_first_model_with_rest_name(cls, rest_name):
models = cls.get_models_with_rest_name(rest_name)
if len(models) > 0:
return models[0]
return None | Get the first model corresponding to a rest_name
Args:
rest_name: the rest name | juraj-google-style |
def RemoveEventAttribute(self, attribute_name):
if (attribute_name not in self._extra_event_attributes):
raise KeyError('Event attribute: {0:s} not set'.format(attribute_name))
del self._extra_event_attributes[attribute_name] | Removes an attribute from being set on all events produced.
Args:
attribute_name (str): name of the attribute to remove.
Raises:
KeyError: if the event attribute is not set. | codesearchnet |
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields not in (1, 4):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(... | Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set. | juraj-google-style |
def __init__(self, name, aliases=None, description=None, urls=None):
super(UUIDDefinition, self).__init__(
name, aliases=aliases, description=description, urls=urls)
self.size = 16 | Initializes an UUID data type definition.
Args:
name (str): name.
aliases (Optional[list[str]]): aliases.
description (Optional[str]): description.
urls (Optional[list[str]]): URLs. | juraj-google-style |
def read_raw(self, key):
data = None
if (key is not None):
data = self.db.read(key.strip())
else:
self.tcex.log.warning(u'The key field was None.')
return data | Read method of CRUD operation for raw data.
Args:
key (string): The variable to read from the DB.
Returns:
(any): Results retrieved from DB. | codesearchnet |
def console_from_file(filename: str) -> tcod.console.Console:
return tcod.console.Console._from_cdata(lib.TCOD_console_from_file(filename.encode('utf-8'))) | Return a new console object from a filename.
The file format is automactially determined. This can load REXPaint `.xp`,
ASCII Paint `.apf`, or Non-delimited ASCII `.asc` files.
Args:
filename (Text): The path to the file, as a string.
Returns: A new :any`Console` instance. | codesearchnet |
def create_endpoint(self, endpoint_name, config_name, tags=None, wait=True):
LOGGER.info('Creating endpoint with name {}'.format(endpoint_name))
tags = (tags or [])
self.sagemaker_client.create_endpoint(EndpointName=endpoint_name, EndpointConfigName=config_name, Tags=tags)
if wait:
self.wait_for... | Create an Amazon SageMaker ``Endpoint`` according to the endpoint configuration specified in the request.
Once the ``Endpoint`` is created, client applications can send requests to obtain inferences.
The endpoint configuration is created using the ``CreateEndpointConfig`` API.
Args:
endpoint_name (str): Name of the A... | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.