code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def sendmail(subject, text, mailto, sender=None):
def user_at_host():
from socket import gethostname
return ((os.getlogin() + '@') + gethostname())
try:
sender = (user_at_host() if (sender is None) else sender)
except OSError:
sender = 'abipyscheduler@youknowwhere'
if is_string(mailto):
mailto = [mailto]
from email.mime.text import MIMEText
mail = MIMEText(text)
mail['Subject'] = subject
mail['From'] = sender
mail['To'] = ', '.join(mailto)
msg = mail.as_string()
from subprocess import Popen, PIPE
import sys
sendmail = which('sendmail')
if (sendmail is None):
return (- 1)
if (sys.version_info[0] < 3):
p = Popen([sendmail, '-t'], stdin=PIPE, stderr=PIPE)
else:
p = Popen([sendmail, '-t'], stdin=PIPE, stderr=PIPE, universal_newlines=True)
(outdata, errdata) = p.communicate(msg)
return len(errdata)
|
Sends an e-mail with unix sendmail.
Args:
subject: String with the subject of the mail.
text: String with the body of the mail.
mailto: String or list of string with the recipients.
sender: string with the sender address.
If sender is None, username@hostname is used.
Returns:
Exit status
|
codesearchnet
|
def _init_request_logging(self, app):
enabled = (not app.config.get(CONF_DISABLE_REQUEST_LOGGING, False))
if (not enabled):
return
self._requests_middleware = WSGIApplication(self._key, app.wsgi_app, telemetry_channel=self._channel)
app.wsgi_app = self._requests_middleware
|
Sets up request logging unless ``APPINSIGHTS_DISABLE_REQUEST_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
|
codesearchnet
|
def readme_verify():
expected = populate_readme(REVISION, RTD_VERSION)
with open(README_FILE, 'r') as file_obj:
contents = file_obj.read()
if (contents != expected):
err_msg = ('\n' + get_diff(contents, expected, 'README.rst.actual', 'README.rst.expected'))
raise ValueError(err_msg)
else:
print('README contents are as expected.')
|
Populate the template and compare to ``README``.
Raises:
ValueError: If the current README doesn't agree with the expected
value computed from the template.
|
codesearchnet
|
def update_clinvar_id(self, clinvar_id, submission_id ):
updated_submission = self.clinvar_submission_collection.find_one_and_update( {'_id': ObjectId(submission_id)}, { '$set' : {'clinvar_subm_id' : clinvar_id, 'updated_at': datetime.now()} }, upsert=True, return_document=pymongo.ReturnDocument.AFTER )
return updated_submission
|
saves an official clinvar submission ID in a clinvar submission object
Args:
clinvar_id(str): a string with a format: SUB[0-9]. It is obtained from clinvar portal when starting a new submission
submission_id(str): submission_id(str) : id of the submission to be updated
Returns:
updated_submission(obj): a clinvar submission object, updated
|
juraj-google-style
|
def shutdown(self, wait=True):
self.scheduler_thread.stop()
self.worker_message_handler_thread.stop()
if wait:
self.scheduler_thread.join()
self.worker_message_handler_thread.join()
|
Shut down the worker message handler and scheduler threads.
Args:
wait: If true, block until both threads have successfully shut down. If False, return immediately.
Returns: None
|
juraj-google-style
|
def sg_min(tensor, opt):
return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
|
r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
|
codesearchnet
|
def get_tabular_rows(self, url, dict_rows=False, **kwargs):
return self.get_tabular_stream(url, **kwargs).iter(keyed=dict_rows)
|
Get iterator for reading rows from tabular data. Each row is returned as a dictionary.
Args:
url (str): URL to download
dict_rows (bool): Return dict (requires headers parameter) or list for each row. Defaults to False (list).
**kwargs:
headers (Union[int, List[int], List[str]]): Number of row(s) containing headers or list of headers
file_type (Optional[str]): Type of file. Defaults to inferring.
delimiter (Optional[str]): Delimiter used for values in each row. Defaults to inferring.
Returns:
Iterator[Union[List,Dict]]: Iterator where each row is returned as a list or dictionary.
|
codesearchnet
|
def get(account_id, account_type_id=None):
if (type(account_id) == str):
args = {'account_name': account_id}
else:
args = {'account_id': account_id}
if account_type_id:
args['account_type_id'] = account_type_id
return db.Account.find_one(**args)
|
Return account by ID and type
Args:
account_id (`int`, `str`): Unique Account identifier
account_type_id (str): Type of account to get
Returns:
:obj:`Account`: Returns an Account object if found, else None
|
codesearchnet
|
def decrypt(self, cipherText):
decryptedResult = ''
for index in range(0, len(cipherText), BLOCK_SIZE):
block = cipherText[index:(index + BLOCK_SIZE)]
if (len(block) < BLOCK_SIZE):
block = zero_pad(block, BLOCK_SIZE)
decryptedResult += self.decrypt_block(block)
return decryptedResult
|
Decrypt an arbitrary-length block of data.
NOTE: This function formerly worked only on 16-byte blocks of `cipherText`.
code that assumed this should still work fine, but can optionally be
modified to call `decrypt_block` instead.
Args:
cipherText (str): data to decrypt. If the data is not a multiple of 16
bytes long, it will be padded with null (0x00) bytes until it is.
WARNING: This is almost certainty never need to happen for
correctly-encrypted data.
Returns:
decrypted data. Note that this will always be a multiple of 16 bytes
long. If the original data was not a multiple of 16 bytes, the
result will contain trailing null bytes, which can be removed with
`.rstrip('\x00')`
|
codesearchnet
|
def tensor_float_32_execution_enabled():
return _pywrap_tensor_float_32_execution.is_enabled()
|
Returns whether TensorFloat-32 is enabled.
By default, TensorFloat-32 is enabled, but this can be changed with
`tf.config.experimental.enable_tensor_float_32_execution`.
Returns:
True if TensorFloat-32 is enabled (the default) and False otherwise
|
github-repos
|
def resolve(self, sourcepath, paths, library_paths=None):
(basedir, filename) = os.path.split(sourcepath)
basepaths = [basedir]
resolved_paths = []
if (library_paths and isinstance(library_paths, string_types) and (library_paths not in basepaths)):
basepaths.append(library_paths)
elif library_paths:
for k in list(library_paths):
if (k not in basepaths):
basepaths.append(k)
for import_rule in paths:
candidates = self.candidate_paths(import_rule)
stack = []
for (i, basepath) in enumerate(basepaths):
checked = self.check_candidate_exists(basepath, candidates)
if checked:
stack.extend(checked)
if (len(stack) > 1):
raise UnclearResolution("rule '{}' This is not clear for these paths: {}".format(import_rule, ', '.join(stack)))
elif (len(stack) == 1):
resolved_paths.append(os.path.normpath(stack[0]))
elif self.STRICT_PATH_VALIDATION:
raise UnresolvablePath("Imported path '{}' does not exist in '{}'".format(import_rule, basedir))
return resolved_paths
|
Resolve given paths from given base paths
Return resolved path list.
Note:
Resolving strategy is made like libsass do, meaning paths in
import rules are resolved from the source file where the import
rules have been finded.
If import rule is not explicit enough and two file are candidates
for the same rule, it will raises an error. But contrary to
libsass, this happen also for files from given libraries in
``library_paths`` (oposed to libsass just silently taking the
first candidate).
Args:
sourcepath (str): Source file path, its directory is used to
resolve given paths. The path must be an absolute path to
avoid errors on resolving.
paths (list): Relative paths (from ``sourcepath``) to resolve.
library_paths (list): List of directory paths for libraries to
resolve paths if resolving fails on the base source path.
Default to None.
Raises:
UnresolvablePath: If a path does not exist and
``STRICT_PATH_VALIDATION`` attribute is ``True``.
Returns:
list: List of resolved path.
|
codesearchnet
|
def capability_installed(name, source=None, limit_access=False, image=None, restart=False):
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
old = __salt__['dism.installed_capabilities']()
if (name in old):
ret['comment'] = 'The capability {0} is already installed'.format(name)
return ret
if __opts__['test']:
ret['changes']['capability'] = '{0} will be installed'.format(name)
ret['result'] = None
return ret
status = __salt__['dism.add_capability'](name, source, limit_access, image, restart)
if (status['retcode'] not in [0, 1641, 3010]):
ret['comment'] = 'Failed to install {0}: {1}'.format(name, status['stdout'])
ret['result'] = False
new = __salt__['dism.installed_capabilities']()
changes = salt.utils.data.compare_lists(old, new)
if changes:
ret['comment'] = 'Installed {0}'.format(name)
ret['changes'] = status
ret['changes']['capability'] = changes
return ret
|
Install a DISM capability
Args:
name (str): The capability to install
source (str): The optional source of the capability
limit_access (bool): Prevent DISM from contacting Windows Update for
online images
image (Optional[str]): The path to the root directory of an offline
Windows image. If `None` is passed, the running operating system is
targeted. Default is None.
restart (Optional[bool]): Reboot the machine if required by the install
Example:
Run ``dism.available_capabilities`` to get a list of available
capabilities. This will help you get the proper name to use.
.. code-block:: yaml
install_dotnet35:
dism.capability_installed:
- name: NetFX3~~~~
|
codesearchnet
|
def observe(self, value):
self._buffer.append(value)
if len(self._buffer) == _BUFFER_SIZE:
self._flush()
|
Samples an observation's value.
Args:
value: A numeric value signifying the value to be sampled.
|
juraj-google-style
|
def write(self, originalPrefix, newPrefix=None):
numSpaces = max(2, 25 - len(self.name))
if self.value is None:
line = '%s\n' % self.name
else:
if self.name == 'WMS':
line = '%s %s\n' % (self.name, self.value)
elif newPrefix is None:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value)
elif originalPrefix in self.value:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value.replace(originalPrefix, newPrefix))
else:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value)
return line
|
Write project card to string.
Args:
originalPrefix (str): Original name to give to files that follow the project naming convention
(e.g: prefix.gag).
newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None.
Returns:
str: Card and value as they would be written to the project file.
|
juraj-google-style
|
def get_num_image_channels(module_or_spec, signature=None, input_name=None):
if (input_name is None):
input_name = 'images'
input_info_dict = module_or_spec.get_input_info_dict(signature)
try:
shape = input_info_dict[input_name].get_shape()
except KeyError:
raise ValueError(("Module is missing input '%s' in signature '%s'." % (input_name, (signature or 'default'))))
try:
(_, _, _, num_channels) = shape.as_list()
if (num_channels is None):
raise ValueError
except ValueError:
raise ValueError(('Shape of module input is %s, expected [batch_size, height, width, num_channels] with known num_channels' % shape))
return num_channels
|
Returns expected num_channels dimensions of an image input.
This is for advanced users only who expect to handle modules with
image inputs that might not have the 3 usual RGB channels.
Args:
module_or_spec: a Module or ModuleSpec that accepts image inputs.
signature: a string with the key of the signature in question.
If None, the default signature is used.
input_name: a string with the input name for images. If None, the
conventional input name `images` for the default signature is used.
Returns:
An integer with the number of input channels to the module.
Raises:
ValueError: If the channel information is missing or malformed.
|
codesearchnet
|
def slice_batch_indices(indices):
num_in_full_batch = num_full_batches * batch_size
first_k_indices = tf.slice(indices, [0], [num_in_full_batch])
first_k_indices = tf.reshape(first_k_indices, [num_full_batches, batch_size])
flat_dataset = tf.data.Dataset.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = tf.data.Dataset.from_tensors(tf.slice(indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
return flat_dataset
|
Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is
to slice the Tensor in a Dataset map. (With a condition on the upper
index to handle the partial batch.) However it turns out that
coercing the Tensor into a shape which is divisible by the batch
size (and handling the last partial batch separately) allows for a
much more favorable memory access pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire
epoch.
Returns:
A Dataset of batched indices.
|
github-repos
|
def save_aggregate_reports_to_splunk(self, aggregate_reports):
logger.debug("Saving aggregate reports to Splunk")
if type(aggregate_reports) == dict:
aggregate_reports = [aggregate_reports]
if len(aggregate_reports) < 1:
return
data = self._common_data.copy()
json_str = ""
for report in aggregate_reports:
for record in report["records"]:
new_report = dict()
for metadata in report["report_metadata"]:
new_report[metadata] = report["report_metadata"][metadata]
new_report["published_policy"] = report["policy_published"]
new_report["source_ip_address"] = record["source"][
"ip_address"]
new_report["source_country"] = record["source"]["country"]
new_report["source_reverse_dns"] = record["source"][
"reverse_dns"]
new_report["source_base_domain"] = record["source"][
"base_domain"]
new_report["message_count"] = record["count"]
new_report["disposition"] = record["policy_evaluated"][
"disposition"
]
new_report["spf_aligned"] = record["alignment"]["spf"]
new_report["dkim_aligned"] = record["alignment"]["dkim"]
new_report["passed_dmarc"] = record["alignment"]["dmarc"]
new_report["header_from"] = record["identifiers"][
"header_from"]
new_report["envelope_from"] = record["identifiers"][
"envelope_from"]
if "dkim" in record["auth_results"]:
new_report["dkim_results"] = record["auth_results"][
"dkim"]
if "spf" in record["auth_results"]:
new_report["spf_results"] = record["auth_results"][
"spf"]
data["sourcetype"] = "dmarc:aggregate"
timestamp = human_timestamp_to_timestamp(
new_report["begin_date"])
data["time"] = timestamp
data["event"] = new_report.copy()
json_str += "{0}\n".format(json.dumps(data))
if not self.session.verify:
logger.debug("Skipping certificate verification for Splunk HEC")
try:
response = self.session.post(self.url, data=json_str,
timeout=self.timeout)
response = response.json()
except Exception as e:
raise SplunkError(e.__str__())
if response["code"] != 0:
raise SplunkError(response["text"])
|
Saves aggregate DMARC reports to Splunk
Args:
aggregate_reports: A list of aggregate report dictionaries
to save in Splunk
|
juraj-google-style
|
def IoU(cm, ignore_index=None):
if (not isinstance(cm, ConfusionMatrix)):
raise TypeError('Argument cm should be instance of ConfusionMatrix, but given {}'.format(type(cm)))
if (ignore_index is not None):
if (not (isinstance(ignore_index, numbers.Integral) and (0 <= ignore_index < cm.num_classes))):
raise ValueError('ignore_index should be non-negative integer, but given {}'.format(ignore_index))
cm = cm.type(torch.float64)
iou = (cm.diag() / (((cm.sum(dim=1) + cm.sum(dim=0)) - cm.diag()) + 1e-15))
if (ignore_index is not None):
def ignore_index_fn(iou_vector):
if (ignore_index >= len(iou_vector)):
raise ValueError('ignore_index {} is larger than the length of IoU vector {}'.format(ignore_index, len(iou_vector)))
indices = list(range(len(iou_vector)))
indices.remove(ignore_index)
return iou_vector[indices]
return MetricsLambda(ignore_index_fn, iou)
else:
return iou
|
Calculates Intersection over Union
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
ignore_index (int, optional): index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
.. code-block:: python
train_evaluator = ...
cm = ConfusionMatrix(num_classes=num_classes)
IoU(cm, ignore_index=0).attach(train_evaluator, 'IoU')
state = train_evaluator.run(train_dataset)
# state.metrics['IoU'] -> tensor of shape (num_classes - 1, )
|
codesearchnet
|
def _EuclidianDistances(self,slist):
e_dists2 = [transitfeed.ApproximateDistanceBetweenStops(stop, tail) for
(stop,tail) in itertools.izip(slist, slist[1:])]
return e_dists2
|
Calculate euclidian distances between stops.
Uses the stoplists long/lats to approximate distances
between stations and build a list with y-coordinates for the
horizontal lines in the graph.
Args:
# Class Stop is defined in transitfeed.py
stoplist: [Stop, Stop, ...]
Returns:
# One integer for each pair of stations
# indicating the approximate distance
[0,33,140, ... ,X]
|
juraj-google-style
|
def from_json(cls, fh):
if isinstance(fh, str):
return cls(json.loads(fh))
else:
return cls(json.load(fh))
|
Load json from file handle.
Args:
fh (file): File handle to load from.
Examlple:
>>> with open('data.json', 'r') as json:
>>> data = composite.load(json)
|
juraj-google-style
|
def convert(cls, content, input_format, output_format):
assert input_format in ('srt', 'sjson')
assert output_format in ('srt', 'sjson')
content = content.decode('utf-8-sig')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'sjson':
try:
srt_subs = SubRipFile.from_string(content, error_handling=SubRipFile.ERROR_RAISE)
except Error as ex:
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(cls.generate_sjson_from_srt(srt_subs))
if input_format == 'sjson':
if output_format == 'srt':
return cls.generate_srt_from_sjson(json.loads(content))
|
Convert transcript `content` from `input_format` to `output_format`.
Arguments:
content: Transcript content byte-stream.
input_format: Input transcript format.
output_format: Output transcript format.
Accepted input formats: sjson, srt.
Accepted output format: srt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt
content during conversion from srt to sjson.
|
juraj-google-style
|
def write(self, fb):
print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)
print('class = {}'.format(fb.func_ins.name), file=self.file)
print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)
print('func_args = {}'.format(repr(fb.func_args)), file=self.file)
print('func_kwargs = {}'.format(repr(fb.func_kwargs)), file=self.file)
print('ext = ({}, {})'.format(repr(fb.ext), repr(fb.ext_kwargs)), file=self.file)
if (self.setup_stat is not None):
self._write_a_stat('setup', self.setup_stat)
if (self.foward_stat is not None):
self._write_a_stat('forward', self.forward_stat)
if (self.backward_stat is not None):
self._write_a_stat('backward', self.backward_stat)
|
Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``.
|
codesearchnet
|
def SetSerializersProfiler(self, serializers_profiler):
self._serializers_profiler = serializers_profiler
if self._storage_file:
self._storage_file.SetSerializersProfiler(serializers_profiler)
|
Sets the serializers profiler.
Args:
serializers_profiler (SerializersProfiler): serializers profiler.
|
juraj-google-style
|
def flatten(dictionary, separator='.', prefix=''):
new_dict = {}
for (key, value) in dictionary.items():
new_key = (((prefix + separator) + key) if prefix else key)
if isinstance(value, collections.MutableMapping):
new_dict.update(flatten(value, separator, new_key))
elif isinstance(value, list):
new_value = []
for item in value:
if isinstance(item, collections.MutableMapping):
new_value.append(flatten(item, separator, new_key))
else:
new_value.append(item)
new_dict[new_key] = new_value
else:
new_dict[new_key] = value
return new_dict
|
Flatten the dictionary keys are separated by separator
Arguments:
dictionary {dict} -- The dictionary to be flattened.
Keyword Arguments:
separator {str} -- The separator to use (default is '.'). It will
crush items with key conflicts.
prefix {str} -- Used for recursive calls.
Returns:
dict -- The flattened dictionary.
|
codesearchnet
|
def addRow(self, *value):
if len(value) == 1 and isinstance(value[0], (tuple, list)):
value = value[0]
assert len(value) == self.getNumCols()
self._impl.addRow(Tuple(value)._impl)
|
Add a row to the DataFrame. The size of the tuple must be equal to the
total number of columns in the dataframe.
Args:
value: A single argument with a tuple containing all the values
for the row to be added, or multiple arguments with the values for
each column.
|
juraj-google-style
|
def is_param_method(obj, has_deps=False):
parameterized = (inspect.ismethod(obj) and
isinstance(get_method_owner(obj), param.Parameterized))
if parameterized and has_deps:
return getattr(obj, "_dinfo", {}).get('dependencies')
return parameterized
|
Whether the object is a method on a parameterized object.
Args:
obj: Object to check
has_deps (boolean, optional): Check for dependencies
Whether to also check whether the method has been annotated
with param.depends
Returns:
A boolean value indicating whether the object is a method
on a Parameterized object and if enabled whether it has any
dependencies
|
juraj-google-style
|
def insert(self, key, value, name=None):
with tf.name_scope(name or '%s_lookup_table_insert' % self._name):
key = tf.convert_to_tensor(key, self._key_dtype, name='key')
value = tf.convert_to_tensor(value, self._value_dtype, name='value')
op = gen_simple_hash_table_op.examples_simple_hash_table_insert(self.resource_handle, key, value)
return op
|
Associates `key` with `value`.
Args:
key: Scalar key to insert.
value: Scalar value to be associated with key.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `key` or `value` doesn't match the table data
types.
|
github-repos
|
def is_hermitian(matrix: np.ndarray, *, rtol: float=1e-05, atol: float=1e-08) -> bool:
return ((matrix.shape[0] == matrix.shape[1]) and np.allclose(matrix, np.conj(matrix.T), rtol=rtol, atol=atol))
|
Determines if a matrix is approximately Hermitian.
A matrix is Hermitian if it's square and equal to its adjoint.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is Hermitian within the given tolerance.
|
codesearchnet
|
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params)
|
Returns details of a `WorkerPool`.
Args:
request: (CloudbuildProjectsLocationsWorkerPoolsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WorkerPool) The response message.
|
github-repos
|
def rh45(msg):
d = hex2bin(data(msg))
if d[38] == '0':
return None
rh = bin2int(d[39:51]) * 16
return rh
|
Radio height.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: radio height in ft
|
juraj-google-style
|
def local_predict(training_dir, data):
from .prediction import predict as predict_module
tmp_dir = tempfile.mkdtemp()
(_, input_file_path) = tempfile.mkstemp(dir=tmp_dir, suffix='.csv', prefix='input')
try:
if isinstance(data, pd.DataFrame):
data.to_csv(input_file_path, header=False, index=False)
else:
with open(input_file_path, 'w') as f:
for line in data:
f.write((line + '\n'))
model_dir = os.path.join(training_dir, 'model')
if (not file_io.file_exists(model_dir)):
raise ValueError('training_dir should contain the folder model')
cmd = ['predict.py', ('--predict-data=%s' % input_file_path), ('--trained-model-dir=%s' % model_dir), ('--output-dir=%s' % tmp_dir), '--output-format=csv', '--batch-size=16', '--mode=prediction', '--no-shard-files']
runner_results = predict_module.main(cmd)
runner_results.wait_until_finish()
schema_file = os.path.join(tmp_dir, 'csv_schema.json')
with open(schema_file, 'r') as f:
schema = json.loads(f.read())
errors_file = glob.glob(os.path.join(tmp_dir, 'errors*'))
if (errors_file and (os.path.getsize(errors_file[0]) > 0)):
print('Warning: there are errors. See below:')
with open(errors_file[0], 'r') as f:
text = f.read()
print(text)
prediction_file = glob.glob(os.path.join(tmp_dir, 'predictions*'))
if (not prediction_file):
raise FileNotFoundError('Prediction results not found')
predictions = pd.read_csv(prediction_file[0], header=None, names=[col['name'] for col in schema])
return predictions
finally:
shutil.rmtree(tmp_dir)
|
Runs local prediction on the prediction graph.
Runs local prediction and returns the result in a Pandas DataFrame. For
running prediction on a large dataset or saving the results, run
local_batch_prediction or batch_prediction. Input data should fully match
the schema that was used at training, except the target column should not
exist.
Args:
training_dir: local path to the trained output folder.
data: List of csv strings or a Pandas DataFrame that match the model schema.
Raises:
ValueError: if training_dir does not contain the folder 'model'.
FileNotFoundError: if the prediction data is not found.
|
codesearchnet
|
def available_writers(as_dict=False):
writers = []
for writer_configs in configs_for_writer():
try:
writer_info = read_writer_config(writer_configs)
except (KeyError, IOError, yaml.YAMLError):
LOG.warning("Could not import writer config from: %s", writer_configs)
LOG.debug("Error loading YAML", exc_info=True)
continue
writers.append(writer_info if as_dict else writer_info['name'])
return writers
|
Available writers based on current configuration.
Args:
as_dict (bool): Optionally return writer information as a dictionary.
Default: False
Returns: List of available writer names. If `as_dict` is `True` then
a list of dictionaries including additionally writer information
is returned.
|
juraj-google-style
|
def compose_tree_url(tree, issn_url=False):
url = compose_tree_path(tree, issn_url)
if WEB_PORT == 80:
return "%s:
return "%s:
|
Compose full url for given `tree`, with protocol, server's address and
port.
Args:
tree (obj): :class:`.Tree` instance.
issn_url (bool, default False): Compose URL using ISSN.
Returns:
str: URL of the tree
|
juraj-google-style
|
def _check_classes(var: 'cfg.Variable | None', check: 'Callable[[_base.BaseValue], bool]') -> bool:
if not var:
return False
for v in var.data:
if isinstance(v, class_mixin.Class):
if not check(v):
return False
elif isinstance(v.cls, class_mixin.Class) and v.cls != v:
if not check(v.cls):
return False
return True
|
Check whether the cls of each value in `var` is a class and passes `check`.
Args:
var: A cfg.Variable or empty.
check: (BaseValue) -> bool.
Returns:
Whether the check passes.
|
github-repos
|
def __init__(self, path, mode):
if (mode == WorkDB.Mode.open) and (not os.path.exists(path)):
raise FileNotFoundError('Requested file {} not found'.format(path))
self._path = path
self._conn = sqlite3.connect(path)
self._init_db()
|
Open a DB in file `path` in mode `mode`.
Args:
path: The path to the DB file.
mode: The mode in which to open the DB. See the `Mode` enum for
details.
Raises:
FileNotFoundError: If `mode` is `Mode.open` and `path` does not
exist.
|
juraj-google-style
|
def ProduceEventSource(self, event_source):
if (not self._storage_writer):
raise RuntimeError('Storage writer not set.')
self._storage_writer.AddEventSource(event_source)
self._number_of_event_sources += 1
self.last_activity_timestamp = time.time()
|
Produces an event source.
Args:
event_source (EventSource): an event source.
Raises:
RuntimeError: when storage writer is not set.
|
codesearchnet
|
def set_output_embeddings(self, value):
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info('Building the model')
self.build_in_name_scope()
lm_head.set_output_embeddings(value)
|
Set model's output embeddings
Args:
value (`tf.Variable`):
The new weights mapping hidden states to vocabulary.
|
github-repos
|
def slice_arrays(arrays, start=None, stop=None):
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [None if x is None else None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
if hasattr(start, '__getitem__'):
return arrays[start:stop]
return [None]
|
Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Args:
arrays: Single array or list of arrays.
start: can be an integer index (start index) or a list/array of indices
stop: integer (stop index); should be None if `start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
|
github-repos
|
def forward(self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor]=None, layer_head_mask: Optional[torch.Tensor]=None, past_key_value: Optional[Tuple[torch.Tensor]]=None, output_attentions: Optional[bool]=False, use_cache: Optional[bool]=True) -> torch.Tensor:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
hidden_states, self_attn_weights, present_key_value = self.self_attn(hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (present_key_value,)
return outputs
|
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(attention_heads,)`.
past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
|
github-repos
|
def _ReadSpecificationFile(self, path):
specification_store = specification.FormatSpecificationStore()
with io.open(path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:
for line in file_object.readlines():
line = line.strip()
if ((not line) or line.startswith('
continue
try:
(identifier, offset, pattern) = line.split()
except ValueError:
logger.error('[skipping] invalid line: {0:s}'.format(line))
continue
try:
offset = int(offset, 10)
except ValueError:
logger.error('[skipping] invalid offset in line: {0:s}'.format(line))
continue
try:
pattern = codecs.escape_decode(pattern)[0]
except ValueError:
logger.error('[skipping] invalid pattern in line: {0:s}'.format(line))
continue
format_specification = specification.FormatSpecification(identifier)
format_specification.AddNewSignature(pattern, offset=offset)
specification_store.AddSpecification(format_specification)
return specification_store
|
Reads the format specification file.
Args:
path (str): path of the format specification file.
Returns:
FormatSpecificationStore: format specification store.
|
codesearchnet
|
def with_organisation(self, organisation):
if (organisation is None):
organisation = ''
organisation = slugify(organisation)
self._validate_organisation(organisation)
self.organisation = organisation
return self
|
Add an organisation segment.
Args:
organisation (str): Official name of an administrative body
holding an election.
Returns:
IdBuilder
Raises:
ValueError
|
codesearchnet
|
def streaming_client(self, tasks_regex, tasks_negate, workers_regex, workers_negate):
cc = CapturingClient(Queue(), re.compile(tasks_regex), tasks_negate, re.compile(workers_regex), workers_negate)
self.observers.append(cc)
(yield cc.queue)
self.observers.remove(cc)
|
Connects a client to the streaming capture, filtering the events that are sent
to it.
Args:
tasks_regex (str): a pattern to filter tasks to capture.
ex.: '^dispatch|^email' to filter names starting with that
or 'dispatch.*123456' to filter that exact name and number
or even '123456' to filter that exact number anywhere.
tasks_negate (bool): if True, finds tasks that do not match criteria
workers_regex (str): a pattern to filter workers to capture.
ex.: 'service|priority' to filter names containing that
workers_negate (bool): if True, finds workers that do not match criteria
|
codesearchnet
|
def log_(
message: str,
logger: logging.Logger,
level: str = "info",
extra: Optional[Dict] = None,
trim: bool = False,
) -> None:
if extra is None:
extra = {}
if message:
message = message.replace("\n", "").replace(" ", " ").replace("{ ", "{")
if trim:
message = _trim_message(message)
getattr(logger, level)(message, extra=extra)
|
Log a request or response
Args:
message: JSON-RPC request or response string.
level: Log level.
extra: More details to include in the log entry.
trim: Abbreviate log messages.
|
juraj-google-style
|
def save_and_return_nodes(obj, export_dir, signatures=None, options: save_options.SaveOptions=None, experimental_skip_checkpoint=False):
options = options or save_options.SaveOptions()
saved_model = saved_model_pb2.SavedModel()
meta_graph_def = saved_model.meta_graphs.add()
_, exported_graph, object_saver, asset_info, saved_nodes, node_paths = _build_meta_graph(obj, signatures, options, meta_graph_def)
saved_model.saved_model_schema_version = constants.SAVED_MODEL_SCHEMA_VERSION
if not experimental_skip_checkpoint:
path_helpers.get_or_create_variables_dir(export_dir)
ckpt_options = checkpoint_options.CheckpointOptions(experimental_io_device=options.experimental_io_device, experimental_sharding_callback=options.experimental_sharding_callback)
object_saver.save(path_helpers.get_variables_path(export_dir), options=ckpt_options)
builder_impl.copy_assets_to_destination_dir(asset_info.asset_filename_map, export_dir)
if context.executing_eagerly():
try:
context.async_wait()
except errors.NotFoundError as err:
raise FileNotFoundError(f"{err}\n You may be trying to save on a different device from the computational device. Consider setting the `experimental_io_device` option in `tf.saved_model.SaveOptions` to the io_device such as '/job:localhost'.") from err
pywrap_saved_model.Save(export_dir)
if options.experimental_image_format:
prefix = file_io.join(compat.as_str(export_dir), 'saved_model')
proto_splitter.SavedModelSplitter(saved_model).write(prefix)
else:
path = file_io.join(compat.as_str(export_dir), compat.as_str(constants.SAVED_MODEL_FILENAME_PB))
file_io.atomic_write_string_to_file(path, saved_model.SerializeToString(deterministic=True))
fingerprinting_utils.write_fingerprint(export_dir)
if options.save_debug_info:
_export_debug_info(exported_graph, export_dir)
metrics.SetWritePath(saved_model_path=str(export_dir))
ops.dismantle_graph(exported_graph)
return (saved_nodes, node_paths)
|
Saves a SavedModel while returning all saved nodes and their paths.
Please see `tf.saved_model.save` for details.
Args:
obj: A trackable object to export.
export_dir: A directory in which to write the SavedModel.
signatures: A function or dictionary of functions to save in the SavedModel
as signatures.
options: `tf.saved_model.SaveOptions` object for configuring save options.
experimental_skip_checkpoint: If set to `True`, the checkpoint will not be
written.
Returns:
A tuple of (a list of saved nodes in the order they are serialized to the
`SavedObjectGraph`, dictionary mapping nodes to one possible path from
the root node to the key node)
|
github-repos
|
def set_exception(self, exc_class, exc_info, exc_stack):
if self.is_finished():
raise InternalError('set_exception called on finished AsynchronousResponse', result=self._result, exception=self._exception)
self._exception = (exc_class, exc_info, exc_stack)
self.finish()
|
Set an exception as the result of this operation.
Args:
exc_class (object): The exception type
|
codesearchnet
|
def on_run_end(self, request):
|
Callback invoked on run() calls to the debug-wrapper session.
This is a blocking callback.
The invocation happens right before the wrapper exits its run() call.
Args:
request: (`OnRunEndRequest`) callback request object carrying information
such as the actual action performed by the session wrapper for the
run() call.
Returns:
An instance of `OnRunStartResponse`.
|
github-repos
|
def schedule(
time: Union[datetime.time, datetime.datetime],
callback: Callable, *args):
dt = _fillDate(time)
now = datetime.datetime.now(dt.tzinfo)
delay = (dt - now).total_seconds()
loop = asyncio.get_event_loop()
loop.call_later(delay, callback, *args)
|
Schedule the callback to be run at the given time with
the given arguments.
Args:
time: Time to run callback. If given as :py:class:`datetime.time`
then use today as date.
callback: Callable scheduled to run.
args: Arguments for to call callback with.
|
juraj-google-style
|
def _select_mgmt_networks(self, conf):
nets = conf['nets']
mgmts = sorted([name for (name, net) in nets.iteritems() if (net.get('management') is True)])
if (len(mgmts) == 0):
mgmt_name = sorted(nets.keys())[0]
LOGGER.debug('No management network configured, selecting network %s', mgmt_name)
nets[mgmt_name]['management'] = True
mgmts.append(mgmt_name)
for mgmt_name in mgmts:
if (nets[mgmt_name].get('dns_domain_name', None) is None):
nets[mgmt_name]['dns_domain_name'] = 'lago.local'
return mgmts
|
Select management networks. If no management network is found, it will
mark the first network found by sorted the network lists. Also adding
default DNS domain, if none is set.
Args:
conf(spec): spec
|
codesearchnet
|
def AddColumn(self, column, default='', col_index=(- 1)):
if (column in self.table):
raise TableError(('Column %r already in table.' % column))
if (col_index == (- 1)):
self._table[0][column] = column
for i in range(1, len(self._table)):
self._table[i][column] = default
else:
self._table[0].Insert(column, column, col_index)
for i in range(1, len(self._table)):
self._table[i].Insert(column, default, col_index)
|
Appends a new column to the table.
Args:
column: A string, name of the column to add.
default: Default value for entries. Defaults to ''.
col_index: Integer index for where to insert new column.
Raises:
TableError: Column name already exists.
|
codesearchnet
|
def read(self, x):
access_logits = self._address_content(x)
weights = tf.nn.softmax(access_logits)
retrieved_mem = tf.reduce_sum(
tf.multiply(tf.expand_dims(weights, 3),
tf.expand_dims(self.mem_vals, axis=1)), axis=2)
return access_logits, retrieved_mem
|
Read from the memory.
An external component can use the results via a simple MLP,
e.g., fn(x W_x + retrieved_mem W_m).
Args:
x: a tensor in the shape of [batch_size, length, depth].
Returns:
access_logits: the logits for accessing the memory in shape of
[batch_size, length, memory_size].
retrieved_mem: the retrieved results in the shape of
[batch_size, length, val_depth].
|
juraj-google-style
|
def save(self):
try:
email = models.EmailAddress.objects.get(email=self.validated_data['email'], is_verified=True)
except models.EmailAddress.DoesNotExist:
return None
token = models.PasswordResetToken.objects.create(email=email)
token.send()
return token
|
Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise.
|
codesearchnet
|
def as_vartype(vartype):
if isinstance(vartype, Vartype):
return vartype
try:
if isinstance(vartype, str):
vartype = Vartype[vartype]
elif isinstance(vartype, frozenset):
vartype = Vartype(vartype)
else:
vartype = Vartype(frozenset(vartype))
except (ValueError, KeyError):
raise TypeError("expected input vartype to be one of: Vartype.SPIN, 'SPIN', {-1, 1}, Vartype.BINARY, 'BINARY', or {0, 1}.")
return vartype
|
Cast various inputs to a valid vartype object.
Args:
vartype (:class:`.Vartype`/str/set):
Variable type. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Returns:
:class:`.Vartype`: Either :class:`.Vartype.SPIN` or
:class:`.Vartype.BINARY`.
See also:
:func:`~dimod.decorators.vartype_argument`
|
codesearchnet
|
def __init__(self, msg):
super(CoTError, self).__init__(
msg, exit_code=STATUSES['malformed-payload']
)
|
Initialize CoTError.
Args:
msg (string): the error message
|
juraj-google-style
|
def quat_to_rot(quat: torch.Tensor) -> torch.Tensor:
quat = quat[..., None] * quat[..., None, :]
mat = _get_quat('_QTR_MAT', dtype=quat.dtype, device=quat.device)
shaped_qtr_mat = mat.view((1,) * len(quat.shape[:-2]) + mat.shape)
quat = quat[..., None, None] * shaped_qtr_mat
return torch.sum(quat, dim=(-3, -4))
|
Converts a quaternion to a rotation matrix.
Args:
quat: [*, 4] quaternions
Returns:
[*, 3, 3] rotation matrices
|
github-repos
|
def _process_stack_frames(self):
stack_frames = tf_stack.extract_stack()
stack_frame_ids = []
writer = None
for file_path, lineno, func, _ in stack_frames:
abs_path = os.path.abspath(file_path)
if (abs_path, lineno, func) in self._stack_frame_to_id:
stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])
continue
with self._stack_frame_to_id_lock:
if (abs_path, lineno, func) not in self._stack_frame_to_id:
stack_frame_id = _get_id()
self._stack_frame_to_id[abs_path, lineno, func] = stack_frame_id
file_index = self._write_source_file_content(abs_path)
file_line_col = graph_debug_info_pb2.GraphDebugInfo.FileLineCol(file_index=file_index, line=lineno, func=func)
stack_frame_with_id = debug_event_pb2.StackFrameWithId(id=stack_frame_id, file_line_col=file_line_col)
writer = self.get_writer()
writer.WriteStackFrameWithId(stack_frame_with_id)
stack_frame_ids.append(self._stack_frame_to_id[abs_path, lineno, func])
code_location = debug_event_pb2.CodeLocation(host_name=self._hostname, stack_frame_ids=stack_frame_ids)
return code_location
|
Process stack frames.
Send the content of source-files, on a best-effort basis.
Returns:
A list of stack frame IDs.
|
github-repos
|
def add_entry(self, path_object):
if (not is_root() and not self.st_mode & PERM_WRITE and
not self.filesystem.is_windows_fs):
exception = IOError if IS_PY2 else OSError
raise exception(errno.EACCES, 'Permission Denied', self.path)
if path_object.name in self.contents:
self.filesystem.raise_os_error(errno.EEXIST, self.path)
self.contents[path_object.name] = path_object
path_object.parent_dir = self
self.st_nlink += 1
path_object.st_nlink += 1
path_object.st_dev = self.st_dev
if path_object.st_nlink == 1:
self.filesystem.change_disk_usage(
path_object.size, path_object.name, self.st_dev)
|
Adds a child FakeFile to this directory.
Args:
path_object: FakeFile instance to add as a child of this directory.
Raises:
OSError: if the directory has no write permission (Posix only)
OSError: if the file or directory to be added already exists
|
juraj-google-style
|
def predict(self, X, break_ties="random", return_probs=False, **kwargs):
Y_s = self._to_numpy(self.predict_proba(X, **kwargs))
Y_p = self._break_ties(Y_s, break_ties).astype(np.int)
if return_probs:
return Y_p, Y_s
else:
return Y_p
|
Predicts (int) labels for an input X on all tasks
Args:
X: The input for the predict_proba method
break_ties: A tie-breaking policy (see Classifier._break_ties())
return_probs: Return the predicted probabilities as well
Returns:
Y_p: An n-dim np.ndarray of predictions in {1,...k}
[Optionally: Y_s: An [n, k] np.ndarray of predicted probabilities]
|
juraj-google-style
|
def delete(self, version_name):
name = ('%s/versions/%s' % (self._full_model_name, version_name))
response = self._api.projects().models().versions().delete(name=name).execute()
if ('name' not in response):
raise Exception('Invalid response from service. "name" is not found.')
_util.wait_for_long_running_operation(response['name'])
|
Delete a version of model.
Args:
version_name: the name of the version in short form, such as "v1".
|
codesearchnet
|
def _validate_alias_file_content(alias_file_path, url=''):
alias_table = get_config_parser()
try:
alias_table.read(alias_file_path)
for alias_name, alias_command in reduce_alias_table(alias_table):
_validate_alias_name(alias_name)
_validate_alias_command(alias_command)
_validate_alias_command_level(alias_name, alias_command)
_validate_pos_args_syntax(alias_name, alias_command)
except Exception as exception:
error_msg = CONFIG_PARSING_ERROR % AliasManager.process_exception_message(exception)
error_msg = error_msg.replace(alias_file_path, url or alias_file_path)
raise CLIError(error_msg)
|
Make sure the alias name and alias command in the alias file is in valid format.
Args:
The alias file path to import aliases from.
|
juraj-google-style
|
def get_gpu_ids():
if (_mode() == LOCAL_MODE):
raise Exception('ray.get_gpu_ids() currently does not work in PYTHON MODE.')
all_resource_ids = global_worker.raylet_client.resource_ids()
assigned_ids = [resource_id for (resource_id, _) in all_resource_ids.get('GPU', [])]
if (global_worker.original_gpu_ids is not None):
assigned_ids = [global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids]
return assigned_ids
|
Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
|
codesearchnet
|
def if_features(self, stmt: Statement, mid: ModuleId) -> bool:
iffs = stmt.find_all('if-feature')
if (not iffs):
return True
for i in iffs:
if (not FeatureExprParser(i.argument, self, mid).parse()):
return False
return True
|
Evaluate ``if-feature`` substatements on a statement, if any.
Args:
stmt: Yang statement that is tested on if-features.
mid: Identifier of the module in which `stmt` is present.
Raises:
ModuleNotRegistered: If `mid` is not registered in the data model.
InvalidFeatureExpression: If a if-feature expression is not
syntactically correct.
UnknownPrefix: If a prefix specified in a feature name is not
declared.
|
codesearchnet
|
def enable_traceback_filtering():
if sys.version_info.major != 3 or sys.version_info.minor < 7:
raise RuntimeError(f'Traceback filtering is only available with Python 3.7 or higher. This Python version: {sys.version}')
global _ENABLE_TRACEBACK_FILTERING
_ENABLE_TRACEBACK_FILTERING.value = True
|
Enable filtering out TensorFlow-internal frames in exception stack traces.
Raw TensorFlow stack traces involve many internal frames, which can be
challenging to read through, while not being actionable for end users.
By default, TensorFlow filters internal frames in most exceptions that it
raises, to keep stack traces short, readable, and focused on what's
actionable for end users (their own code).
If you have previously disabled traceback filtering via
`tf.debugging.disable_traceback_filtering()`, you can re-enable it via
`tf.debugging.enable_traceback_filtering()`.
Raises:
RuntimeError: If Python version is not at least 3.7.
|
github-repos
|
def section(title, element_list):
sect = {
'Type': 'Section',
'Title': title,
}
if isinstance(element_list, list):
sect['Elements'] = element_list
else:
sect['Elements'] = [element_list]
return sect
|
Returns a dictionary representing a new section. Sections
contain a list of elements that are displayed separately from
the global elements on the page.
Args:
title: The title of the section to be displayed
element_list: The list of elements to display within the section
Returns:
A dictionary with metadata specifying that it is to be rendered as
a section containing multiple elements
|
juraj-google-style
|
def add_request(self, request):
queue_item = QueueItem(request, Response(request.url))
self.add(queue_item)
return queue_item
|
Add a request to the queue.
Args:
request (:class:`nyawc.http.Request`): The request to add.
Returns:
:class:`nyawc.QueueItem`: The created queue item.
|
juraj-google-style
|
def _rescale(vector):
min_val = min(vector)
vector = [v - min_val for v in vector]
max_val = float(max(vector))
try:
return [v / max_val for v in vector]
except ZeroDivisionError:
return [1.0] * len(vector)
|
Scale values in vector to the range [0, 1].
Args:
vector: A list of real values.
|
juraj-google-style
|
def _preprocess_token_ids(self, token_ids, skip_special_tokens: bool=False):
if skip_special_tokens:
prompt_token_id = self.convert_tokens_to_ids('<|startofprev|>')
decoder_start_token_id = self.convert_tokens_to_ids('<|startoftranscript|>')
token_ids = self._strip_prompt(token_ids, prompt_token_id, decoder_start_token_id)
return token_ids
|
Pre-process the token ids for decoding by removing the prompt tokens ids and timestamp token ids.
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
List of tokenized input ids. Typically, obtained using the `__call__` method of the tokenizer.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens from the token ids. If `True`, the prompt token ids will be
removed.
|
github-repos
|
def do_check(func, files, status):
for file_name in files:
with open(file_name, 'r') as f:
output = func.parse(f.read(), file_name)
if output:
status.append("{0}: {1}".format(file_name, output))
return status
|
Generic do_check helper method
Args:
func (function): Specific function to call
files (list): list of files to run against
status (list): list of pre-receive check failures to eventually print
to the user
Returns:
status list of current pre-redeive check failures. Might be an empty
list.
|
juraj-google-style
|
def get_custom_objects():
return _GLOBAL_CUSTOM_OBJECTS
|
Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access the current collection of custom objects.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
|
github-repos
|
def create_from_settings(settings):
return Connection(
settings["url"],
settings["base_url"],
settings["user"],
settings["password"],
authorizations = settings["authorizations"],
debug = settings["debug"]
)
|
Create a connection with given settings.
Args:
settings (dict): A dictionary of settings
Returns:
:class:`Connection`. The connection
|
juraj-google-style
|
def codeblocks(start=None, end=None, full=True):
if full:
for function in functions(start, end):
fc = FlowChart(f=function.func_t)
for block in fc:
yield block
else:
start, end = fix_addresses(start, end)
for code_block in FlowChart(bounds=(start, end)):
yield code_block
|
Get all `CodeBlock`s in a given range.
Args:
start - start address of the range. If `None` uses IDB start.
end - end address of the range. If `None` uses IDB end.
full - `True` is required to change node info (e.g. color). `False` causes faster iteration.
|
juraj-google-style
|
def get_accuracy(targets, outputs, k=1, ignore_index=None):
n_correct = 0.0
for (target, output) in zip(targets, outputs):
if ((not torch.is_tensor(target)) or is_scalar(target)):
target = torch.LongTensor([target])
if ((not torch.is_tensor(output)) or is_scalar(output)):
output = torch.LongTensor([[output]])
predictions = output.topk(k=min(k, len(output)), dim=0)[0]
for prediction in predictions:
if torch_equals_ignore_index(target.squeeze(), prediction.squeeze(), ignore_index=ignore_index):
n_correct += 1
break
return ((n_correct / len(targets)), int(n_correct), len(targets))
|
Get the accuracy top-k accuracy between two tensors.
Args:
targets (1 - 2D :class:`torch.Tensor`): Target or true vector against which to measure
saccuracy
outputs (1 - 3D :class:`torch.Tensor`): Prediction or output vector
ignore_index (int, optional): Specifies a target index that is ignored
Returns:
:class:`tuple` consisting of accuracy (:class:`float`), number correct (:class:`int`) and
total (:class:`int`)
Example:
>>> import torch
>>> from torchnlp.metrics import get_accuracy
>>> targets = torch.LongTensor([1, 2, 3, 4, 5])
>>> outputs = torch.LongTensor([1, 2, 2, 3, 5])
>>> accuracy, n_correct, n_total = get_accuracy(targets, outputs, ignore_index=3)
>>> accuracy
0.8
>>> n_correct
4
>>> n_total
5
|
codesearchnet
|
def setDirname(self, dirname):
sep = utils._getPathSep(dirname)
if (not dirname.endswith(sep)):
dirname += sep
self._dir = utils.asString(dirname)
|
Set a new directory name for the sequence.
Args:
dirname (str): the new directory name
|
codesearchnet
|
def get_sites_in_sphere(self, pt, r):
neighbors = []
for site in self._sites:
dist = site.distance_from_point(pt)
if (dist <= r):
neighbors.append((site, dist))
return neighbors
|
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
|
codesearchnet
|
def automatic_gamma_density(structure, kppa):
latt = structure.lattice
lengths = latt.abc
ngrid = (kppa / structure.num_sites)
mult = ((((ngrid * lengths[0]) * lengths[1]) * lengths[2]) ** (1 / 3))
num_div = [int(round((mult / l))) for l in lengths]
num_div = [(i if (i > 0) else 1) for i in num_div]
num_div = [((i + (i % 2)) if (i <= 8) else ((i - (i % 2)) + 1)) for i in num_div]
style = Kpoints.supported_modes.Gamma
comment = ('pymatgen 4.7.6+ generated KPOINTS with grid density = ' + '{} / atom'.format(kppa))
num_kpts = 0
return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])
|
Returns an automatic Kpoint object based on a structure and a kpoint
density. Uses Gamma centered meshes always. For GW.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
Args:
structure:
Input structure
kppa:
Grid density
|
codesearchnet
|
def dict_from_items_with_values(*dictionaries, **items):
dict_list = list(dictionaries)
dict_list.append(items)
result = {}
for d in dict_list:
for (key, value) in d.items():
if (value is not None):
result[key] = value
return result
|
Creates a dict with the inputted items; pruning any that are `None`.
Args:
*dictionaries(dict): Dictionaries of items to be pruned and included.
**items: Items to be pruned and included.
Returns:
dict: A dictionary containing all of the items with a 'non-None' value.
|
codesearchnet
|
def bipartition_indices(N):
result = []
if (N <= 0):
return result
for i in range((2 ** (N - 1))):
part = [[], []]
for n in range(N):
bit = ((i >> n) & 1)
part[bit].append(n)
result.append((tuple(part[1]), tuple(part[0])))
return result
|
Return indices for undirected bipartitions of a sequence.
Args:
N (int): The length of the sequence.
Returns:
list: A list of tuples containing the indices for each of the two
parts.
Example:
>>> N = 3
>>> bipartition_indices(N)
[((), (0, 1, 2)), ((0,), (1, 2)), ((1,), (0, 2)), ((0, 1), (2,))]
|
codesearchnet
|
def trk50(msg):
d = hex2bin(data(msg))
if d[11] == '0':
return None
sign = int(d[12])
value = bin2int(d[13:23])
if sign:
value = value - 1024
trk = value * 90.0 / 512.0
if trk < 0:
trk = 360 + trk
return round(trk, 3)
|
True track angle, BDS 5,0 message
Args:
msg (String): 28 bytes hexadecimal message (BDS50) string
Returns:
float: angle in degrees to true north (from 0 to 360)
|
juraj-google-style
|
def handle_api_static_request(self, request, start_response):
if request.path == PROXY_PATH:
return util.send_wsgi_response('200 OK',
[('Content-Type',
'text/html')],
PROXY_HTML, start_response)
else:
_logger.debug('Unknown static url requested: %s',
request.relative_url)
return util.send_wsgi_response('404 Not Found', [('Content-Type',
'text/plain')], 'Not Found',
start_response)
|
Handler for requests to {base_path}/static/.*.
This calls start_response and returns the response body.
Args:
request: An ApiRequest, the request from the user.
start_response: A function with semantics defined in PEP-333.
Returns:
A string containing the response body.
|
juraj-google-style
|
def _to_row_partitions_from_lengths(lengths: Sequence[Union[int, Sequence[int]]]) -> Sequence[RowPartition]:
result, _ = dynamic_ragged_shape._to_row_partitions_and_nvals_from_lengths(lengths)
return result
|
Allow ragged and uniform shapes to be specified.
For example, [2, [2,1], 2] represents a shape like:
[[[0, 0], [0, 0]], [[0, 0]]]
Args:
lengths: a list of integers and lists of integers.
Returns:
a sequence of RowPartitions.
|
github-repos
|
def base64url_decode(input):
rem = len(input) % 4
if rem > 0:
input += b'=' * (4 - rem)
return base64.urlsafe_b64decode(input)
|
Helper method to base64url_decode a string.
Args:
input (str): A base64url_encoded string to decode.
|
juraj-google-style
|
def _unpad_modernbert_input(inputs: torch.Tensor, attention_mask: torch.Tensor, position_ids: Optional[torch.Tensor]=None, labels: Optional[torch.Tensor]=None) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, Optional[torch.Tensor], Optional[torch.Tensor]]:
seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = int(seqlens_in_batch.max().item())
cu_seqlens = torch.nn.functional.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
if inputs.dim() == 2:
unpadded_inputs = inputs.flatten()[indices]
else:
batch, seqlen, *rest = inputs.shape
shape = batch * seqlen
unpadded_inputs = inputs.view(shape, *rest)[indices]
unpadded_position_ids = position_ids.flatten()[indices] if position_ids is not None else None
unpadded_labels = labels.flatten()[indices] if labels is not None else None
return (unpadded_inputs, indices, cu_seqlens, max_seqlen_in_batch, unpadded_position_ids, unpadded_labels)
|
Remove padding from input sequences.
Args:
inputs: (batch, seqlen, ...) or (batch, seqlen)
attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
position_ids: (batch, seqlen), int, position ids
labels: (batch, seqlen), int, labels
Returns:
unpadded_inputs: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask.
indices: (total_nnz)
cu_seqlens: (batch + 1), the cumulative sequence lengths
max_seqlen_in_batch: int
unpadded_position_ids: (total_nnz) or None
unpadded_labels: (total_nnz) or None
|
github-repos
|
def rho_hv(scatterer):
Z = scatterer.get_Z()
a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2
b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])
c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
return np.sqrt(a / (b*c))
|
Copolarized correlation (rho_hv) for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
rho_hv.
|
juraj-google-style
|
def copy_files_to_folder(files, target_folder, overwrite=True):
if (not files):
return []
for f in files:
target = os.path.join(target_folder, os.path.split(f)[(- 1)])
if (target == f):
return target
if os.path.exists(target):
if overwrite:
try:
os.remove(target)
except Exception:
raise IOError(('Failed to remove %s' % f))
else:
shutil.copy(f, target)
else:
continue
else:
print(('Copying %s to %s' % (os.path.split(f)[(- 1)], os.path.normpath(target_folder))))
shutil.copy(f, target)
return [os.path.join(target_folder, os.path.split(f)[(- 1)]) for f in files]
|
Copy a list of files to a new target folder.
Returns:
A list of fullpath of the new files.
|
codesearchnet
|
def conversation(self, name=None, **kwargs):
convo = Conversation(self, **kwargs)
super().conversation(name, convo)
return convo
|
Make a new conversation.
Arguments:
name: The key for the dictionary the conversation will be stored as
in conversations. If None the conversation will be stored as a
list instead. Mixing both types results in an error.
**kwargs: Keyword arguments to pass into the new conversation.
These accept the same arguments as Cleverbot.
Returns:
The new conversation.
|
juraj-google-style
|
def embedding_layer(token_indices=None, token_embedding_matrix=None, n_tokens=None, token_embedding_dim=None, name: str=None, trainable=True):
if (token_embedding_matrix is not None):
tok_mat = token_embedding_matrix
if trainable:
Warning('Matrix of embeddings is passed to the embedding_layer, possibly there is a pre-trained embedding matrix. Embeddings paramenters are set to Trainable!')
else:
tok_mat = (np.random.randn(n_tokens, token_embedding_dim).astype(np.float32) / np.sqrt(token_embedding_dim))
tok_emb_mat = tf.Variable(tok_mat, name=name, trainable=trainable)
embedded_tokens = tf.nn.embedding_lookup(tok_emb_mat, token_indices)
return embedded_tokens
|
Token embedding layer. Create matrix of for token embeddings.
Can be initialized with given matrix (for example pre-trained
with word2ve algorithm
Args:
token_indices: token indices tensor of type tf.int32
token_embedding_matrix: matrix of embeddings with dimensionality
[n_tokens, embeddings_dimension]
n_tokens: total number of unique tokens
token_embedding_dim: dimensionality of embeddings, typical 100..300
name: embedding matrix name (variable name)
trainable: whether to set the matrix trainable or not
Returns:
embedded_tokens: tf tensor of size [B, T, E], where B - batch size
T - number of tokens, E - token_embedding_dim
|
codesearchnet
|
def _update_workflow_definition(pb_config: dict):
known_workflows = get_workflows()
workflow_id = pb_config['workflow']['id']
workflow_version = pb_config['workflow']['version']
if workflow_id not in known_workflows or \
workflow_version not in known_workflows[workflow_id]:
raise RuntimeError("Unknown workflow definition: {}:{}"
.format(workflow_id, workflow_version))
workflow = get_workflow(workflow_id, workflow_version)
for stage in workflow['stages']:
stage['status'] = 'none'
pb_config['workflow_parameters'] = pb_config['workflow']['parameters']
pb_config['workflow_id'] = pb_config['workflow']['id']
pb_config['workflow_version'] = pb_config['workflow']['version']
pb_config['workflow_stages'] = workflow['stages']
pb_config.pop('workflow', None)
|
Update the PB configuration workflow definition.
Args:
pb_config (dict): PB configuration dictionary
Raises:
RunTimeError, if the workflow definition (id, version)
specified in the sbi_config is not known.
|
juraj-google-style
|
def in_builddir(sub='.'):
from functools import wraps
def wrap_in_builddir(func):
'Wrap the function for the new build directory.'
@wraps(func)
def wrap_in_builddir_func(self, *args, **kwargs):
'The actual function inside the wrapper for the new builddir.'
p = (local.path(self.builddir) / sub)
if (not p.exists()):
LOG.error('%s does not exist.', p)
if (p == local.cwd):
LOG.debug('CWD already is %s', p)
return func(self, *args, *kwargs)
with local.cwd(p):
return func(self, *args, **kwargs)
return wrap_in_builddir_func
return wrap_in_builddir
|
Decorate a project phase with a local working directory change.
Args:
sub: An optional subdirectory to change into.
|
codesearchnet
|
def __handle_variable(self, shell_entry, output):
if 'variable' in shell_entry:
variable_name = shell_entry['variable']
self.pipeline.variables[variable_name] = "\n".join(output)
|
Saving output for configured variable name.
Args:
shell_entry(dict): shell based configuration (shell, docker container or Python).
output: list of strings representing output of last shell
|
juraj-google-style
|
def capitalcase(string):
string = str(string)
if (not string):
return string
return (uppercase(string[0]) + string[1:])
|
Convert string into capital case.
First letters will be uppercase.
Args:
string: String to convert.
Returns:
string: Capital case string.
|
codesearchnet
|
def log_handler(self, handler):
if (not self.opened()):
handler = (handler or util.noop)
self._log_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)
self._dll.JLINKARM_EnableLog(self._log_handler)
|
Setter for the log handler function.
Args:
self (JLink): the ``JLink`` instance
Returns:
``None``
|
codesearchnet
|
def __savorize(self, node: yaml.Node, expected_type: Type) -> yaml.Node:
logger.debug('Savorizing node assuming type {}'.format(expected_type.__name__))
for base_class in expected_type.__bases__:
if (base_class in self._registered_classes.values()):
node = self.__savorize(node, base_class)
if hasattr(expected_type, 'yatiml_savorize'):
logger.debug('Calling {}.yatiml_savorize()'.format(expected_type.__name__))
cnode = Node(node)
expected_type.yatiml_savorize(cnode)
node = cnode.yaml_node
return node
|
Removes syntactic sugar from the node.
This calls yatiml_savorize(), first on the class's base \
classes, then on the class itself.
Args:
node: The node to modify.
expected_type: The type to assume this type is.
|
codesearchnet
|
def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta], *, kwargs: Optional[Dict[str, Any]]=None, request_coder: Optional[coders.Coder], response_coder: Optional[coders.Coder], source_caller: Optional[Caller[RequestT, ResponseT]]=None):
self.request_coder = request_coder
self.response_coder = response_coder
self.redis_caller = _RedisCaller(host, port, time_to_live, request_coder=self.request_coder, response_coder=self.response_coder, kwargs=kwargs, source_caller=source_caller, mode=_RedisMode.WRITE)
|
Args:
host (str): The hostname or IP address of the Redis server.
port (int): The port number of the Redis server.
time_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for
records stored in Redis. Provide an integer (in seconds) or a
`datetime.timedelta` object.
kwargs: Optional(Dict[str, Any]) additional keyword arguments that
are required to connect to your redis server. Same as `redis.Redis()`.
request_coder: (Optional[`coders.Coder`]) coder for requests stored
in Redis.
response_coder: (Optional[`coders.Coder`]) coder for decoding responses
received from Redis.
source_caller: (Optional[`Caller`]): The source caller using this Redis
cache in case of fetching the cache request to store in Redis.
|
github-repos
|
def store_container(self, container):
with self._store_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
|
Thread-safe method to store data in the state's store.
Args:
container (containers.interface.AttributeContainer): The data to store.
|
juraj-google-style
|
def list_groups(refresh=False):
if 'group.list_groups' in __context__ and not refresh:
return __context__['group.list_groups']
results = _get_all_groups()
ret = []
for result in results:
ret.append(result.Name)
__context__['group.list_groups'] = ret
return ret
|
Return a list of groups
Args:
refresh (bool):
Refresh the info for all groups in ``__context__``. If False only
the groups in ``__context__`` will be returned. If True, the
``__context__`` will be refreshed with current data and returned.
Default is False
Returns:
list: A list of groups on the machine
CLI Example:
.. code-block:: bash
salt '*' group.list_groups
|
juraj-google-style
|
def merge_collections(collections, force_dense=False, sampling_rate='auto'):
if (len(listify(collections)) == 1):
return collections
levels = set([c.level for c in collections])
if (len(levels) > 1):
raise ValueError(("At the moment, it's only possible to merge Collections at the same level of analysis. You passed collections at levels: %s." % levels))
variables = list(chain(*[c.variables.values() for c in collections]))
cls = collections[0].__class__
variables = cls.merge_variables(variables, sampling_rate=sampling_rate)
if isinstance(collections[0], BIDSRunVariableCollection):
if (sampling_rate == 'auto'):
rates = [var.sampling_rate for var in variables if isinstance(var, DenseRunVariable)]
sampling_rate = (rates[0] if rates else None)
return cls(variables, sampling_rate)
return cls(variables)
|
Merge two or more collections at the same level of analysis.
Args:
collections (list): List of Collections to merge.
sampling_rate (int, str): Sampling rate to use if it becomes necessary
to resample DenseRunVariables. Either an integer or 'auto' (see
merge_variables docstring for further explanation).
Returns:
A BIDSVariableCollection or BIDSRunVariableCollection, depending
on the type of the input collections.
|
codesearchnet
|
def get_container_instance_group(access_token, subscription_id, resource_group,
container_group_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerInstance/ContainerGroups/',
container_group_name,
'?api-version=', CONTAINER_API])
return do_get(endpoint, access_token)
|
Get the JSON definition of a container group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
container_group_name (str): Name of container instance group.
Returns:
HTTP response. JSON body of container group.
|
juraj-google-style
|
def file_modified_time(file_name) -> pd.Timestamp:
return pd.to_datetime(time.ctime(os.path.getmtime(filename=file_name)))
|
File modified time in python
Args:
file_name: file name
Returns:
pd.Timestamp
|
codesearchnet
|
def deregister_context(self, context_words):
for context_word in context_words:
if context_word not in self._comp_dict:
raise KeyError('Cannot deregister unregistered context word "%s"' % context_word)
for context_word in context_words:
del self._comp_dict[context_word]
|
Deregister a list of context words.
Args:
context_words: A list of context words to deregister, as a list of str.
Raises:
KeyError: if there are word(s) in context_words that do not correspond
to any registered contexts.
|
github-repos
|
def to_geojson(self, filename, proj, metadata=None):
if metadata is None:
metadata = {}
json_obj = {"type": "FeatureCollection", "features": [], "properties": {}}
json_obj['properties']['times'] = self.times.tolist()
json_obj['properties']['dx'] = self.dx
json_obj['properties']['step'] = self.step
json_obj['properties']['u'] = self.u.tolist()
json_obj['properties']['v'] = self.v.tolist()
for k, v in metadata.items():
json_obj['properties'][k] = v
for t, time in enumerate(self.times):
feature = {"type": "Feature",
"geometry": {"type": "Polygon"},
"properties": {}}
boundary_coords = self.boundary_polygon(time)
lonlat = np.vstack(proj(boundary_coords[0], boundary_coords[1], inverse=True))
lonlat_list = lonlat.T.tolist()
if len(lonlat_list) > 0:
lonlat_list.append(lonlat_list[0])
feature["geometry"]["coordinates"] = [lonlat_list]
for attr in ["timesteps", "masks", "x", "y", "i", "j"]:
feature["properties"][attr] = getattr(self, attr)[t].tolist()
feature["properties"]["attributes"] = {}
for attr_name, steps in self.attributes.items():
feature["properties"]["attributes"][attr_name] = steps[t].tolist()
json_obj['features'].append(feature)
file_obj = open(filename, "w")
json.dump(json_obj, file_obj, indent=1, sort_keys=True)
file_obj.close()
return
|
Output the data in the STObject to a geoJSON file.
Args:
filename: Name of the file
proj: PyProj object for converting the x and y coordinates back to latitude and longitue values.
metadata: Metadata describing the object to be included in the top-level properties.
|
juraj-google-style
|
def set(msg_or_dict, key, value):
if (not isinstance(msg_or_dict, (collections_abc.MutableMapping, message.Message))):
raise TypeError('set() expected a dict or protobuf message, got {!r}.'.format(type(msg_or_dict)))
(basekey, subkey) = _resolve_subkeys(key)
if (subkey is not None):
if isinstance(msg_or_dict, collections_abc.MutableMapping):
msg_or_dict.setdefault(basekey, {})
set(get(msg_or_dict, basekey), subkey, value)
return
if isinstance(msg_or_dict, collections_abc.MutableMapping):
msg_or_dict[key] = value
else:
_set_field_on_message(msg_or_dict, key, value)
|
Set a key's value on a protobuf Message or dictionary.
Args:
msg_or_dict (Union[~google.protobuf.message.Message, Mapping]): the
object.
key (str): The key to set.
value (Any): The value to set.
Raises:
TypeError: If ``msg_or_dict`` is not a Message or dictionary.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.