code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
class _TrainingTarget(object):
def __init__(self, target, feedable=False, skip_target_weights=True):
self._target = target
self._feedable = feedable
self._skip_target_weights = skip_target_weights
@property
def target(self):
return self._target
@property
def feedable(self):
return self._feedable
@property
def skip_target_weights(self):
return self._skip_target_weights
|
Container for a target tensor (y_true) and its metadata (shape, loss...).
Args:
target: A target tensor for the model. It may be `None` if the
output is excluded from loss computation. It is still kept as None
since each output of the model should have a corresponding target. If
the target is None, the rest of the attributes will be None as well.
feedable: Boolean, whether the target is feedable (requires data to be
passed in `fit` or `train_on_batch`), or not (model compiled with
`target_tensors` argument).
skip_target_weights: Boolean, whether the target should be skipped during
weights calculation.
|
github-repos
|
def energy(self, sample_like, dtype=np.float):
(energy,) = self.energies(sample_like, dtype=dtype)
return energy
|
The energy of the given sample.
Args:
sample_like (samples_like):
A raw sample. `sample_like` is an extension of
NumPy's array_like structure. See :func:`.as_samples`.
dtype (:class:`numpy.dtype`, optional):
The data type of the returned energies. Defaults to float.
Returns:
The energy.
|
codesearchnet
|
def Unlock(fd, path):
try:
fcntl.flock(fd, fcntl.LOCK_UN | fcntl.LOCK_NB)
except IOError as e:
if e.errno == errno.EWOULDBLOCK:
raise IOError('Exception unlocking %s. Locked by another process.' % path)
else:
raise IOError('Exception unlocking %s. %s.' % (path, str(e)))
|
Release the lock on the file.
Args:
fd: int, the file descriptor of the file to unlock.
path: string, the name of the file to lock.
Raises:
IOError, raised from flock while attempting to release a file lock.
|
juraj-google-style
|
def ParseOptions(self, options):
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
signature_identifiers = self.ParseStringOption(
options, 'signature_identifiers')
if signature_identifiers == 'list':
self.list_signature_identifiers = True
if self.list_signature_identifiers:
return
self._ParseInformationalOptions(options)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._destination_path = self.ParseStringOption(
options, 'path', default_value='export')
if not self._data_location:
logger.warning('Unable to automatically determine data location.')
argument_helper_names = ['artifact_definitions', 'process_resources']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseFilterOptions(options)
if (getattr(options, 'no_vss', False) or
getattr(options, 'include_duplicates', False)):
self._skip_duplicates = False
self._EnforceProcessMemoryLimit(self._process_memory_limit)
|
Parses the options and initializes the front-end.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
juraj-google-style
|
def convert_and_export_with_cache(model: PreTrainedModel, example_input_ids: Optional[torch.Tensor]=None, example_cache_position: Optional[torch.Tensor]=None, dynamic_shapes: Optional[dict]=None, strict: Optional[bool]=None):
if not is_torch_greater_or_equal_than_2_3:
raise ImportError('torch >= 2.3 is required.')
import torch.export._trace
ALL_MASK_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', sdpa_mask_without_vmap)
ALL_ATTENTION_FUNCTIONS.register('sdpa_without_vmap', ALL_ATTENTION_FUNCTIONS['sdpa'])
model.config._attn_implementation = 'sdpa_without_vmap'
with torch.no_grad():
example_input_ids = example_input_ids if example_input_ids is not None else torch.tensor([[1]], dtype=torch.long)
example_cache_position = example_cache_position if example_cache_position is not None else torch.tensor([0], dtype=torch.long)
if is_torch_greater_or_equal('2.6.0'):
exported_program = torch.export.export(TorchExportableModuleWithStaticCache(model), args=(example_input_ids, example_cache_position), kwargs={}, dynamic_shapes=dynamic_shapes, strict=strict if strict is not None else True)
else:
if dynamic_shapes is not None:
logging.warning('Dynamic shapes spec will be ignored by convert_and_export_with_cache for torch < 2.6.0.')
if strict is not None:
logging.warning('The strict flag will be ignored by convert_and_export_with_cache for torch < 2.6.0.')
exported_program = torch.export._trace._export(TorchExportableModuleWithStaticCache(model), args=(example_input_ids,), kwargs={'cache_position': example_cache_position}, pre_dispatch=False, strict=True)
return exported_program
|
Convert a `PreTrainedModel` into an exportable module and export it using `torch.export`,
ensuring the exported model is compatible with `ExecuTorch`.
Args:
model (`PreTrainedModel`): The pretrained model to be exported.
example_input_ids (`Optional[torch.Tensor]`): Example input token id used by `torch.export`.
example_cache_position (`Optional[torch.Tensor]`): Example current cache position used by `torch.export`.
dynamic_shapes(`Optional[dict]`): Dynamic shapes used by `torch.export`.
strict(`Optional[bool]`): Flag to instruct `torch.export` to use `torchdynamo`.
Returns:
Exported program (`torch.export.ExportedProgram`): The exported program generated via `torch.export`.
|
github-repos
|
def write(self, brightness):
if (not isinstance(brightness, (bool, int))):
raise TypeError('Invalid brightness type, should be bool or int.')
if isinstance(brightness, bool):
brightness = (self._max_brightness if brightness else 0)
elif (not (0 <= brightness <= self._max_brightness)):
raise ValueError(('Invalid brightness value, should be between 0 and %d.' % self._max_brightness))
try:
os.write(self._fd, (b'%d\n' % brightness))
except OSError as e:
raise LEDError(e.errno, ('Writing LED brightness: ' + e.strerror))
try:
os.lseek(self._fd, 0, os.SEEK_SET)
except OSError as e:
raise LEDError(e.errno, ('Rewinding LED brightness: ' + e.strerror))
|
Set the brightness of the LED to `brightness`.
`brightness` can be a boolean for on/off, or integer value for a
specific brightness.
Args:
brightness (bool, int): Brightness value to set.
Raises:
LEDError: if an I/O or OS error occurs.
TypeError: if `brightness` type is not bool or int.
|
codesearchnet
|
def InitFromNotification(self, notification, is_pending=False):
self.timestamp = notification.timestamp
self.message = notification.message
self.subject = str(notification.subject)
self.is_pending = is_pending
reference_type_enum = ApiNotificationReference.Type
legacy_type = None
if ":" in notification.type:
legacy_type, new_type = notification.type.split(":", 2)
self.notification_type = new_type
else:
legacy_type = notification.type
components = self._GetUrnComponents(notification)
if legacy_type == "Discovery":
self.reference.type = reference_type_enum.CLIENT
self.reference.client = ApiNotificationClientReference(
client_id=components[0])
elif legacy_type == "ViewObject":
if len(components) >= 2 and components[0] == "hunts":
self.reference.type = reference_type_enum.HUNT
self.reference.hunt.hunt_id = components[1]
elif len(components) >= 2 and components[0] == "cron":
self.reference.type = reference_type_enum.CRON
self.reference.cron.cron_job_id = components[1]
elif len(components) >= 3 and components[1] == "flows":
self.reference.type = reference_type_enum.FLOW
self.reference.flow.flow_id = components[2]
self.reference.flow.client_id = components[0]
elif len(components) == 1 and rdf_client.ClientURN.Validate(
components[0]):
self.reference.type = reference_type_enum.CLIENT
self.reference.client.client_id = components[0]
else:
if notification.subject:
path = notification.subject.Path()
for prefix in itervalues(rdf_paths.PathSpec.AFF4_PREFIXES):
part = "/%s%s" % (components[0], prefix)
if path.startswith(part):
self.reference.type = reference_type_enum.VFS
self.reference.vfs.client_id = components[0]
self.reference.vfs.vfs_path = (prefix +
path[len(part):]).lstrip("/")
break
if self.reference.type != reference_type_enum.VFS:
self.reference.type = reference_type_enum.UNKNOWN
self.reference.unknown.subject_urn = notification.subject
elif legacy_type == "FlowStatus":
if not components or not rdf_client.ClientURN.Validate(components[0]):
self.reference.type = reference_type_enum.UNKNOWN
self.reference.unknown.subject_urn = notification.subject
else:
self.reference.type = reference_type_enum.FLOW
self.reference.flow.flow_id = notification.source.Basename()
self.reference.flow.client_id = components[0]
elif legacy_type == "GrantAccess":
if rdf_client.ClientURN.Validate(components[1]):
self.reference.type = reference_type_enum.CLIENT_APPROVAL
self.reference.client_approval.client_id = components[1]
self.reference.client_approval.approval_id = components[-1]
self.reference.client_approval.username = components[-2]
elif components[1] == "hunts":
self.reference.type = reference_type_enum.HUNT_APPROVAL
self.reference.hunt_approval.hunt_id = components[2]
self.reference.hunt_approval.approval_id = components[-1]
self.reference.hunt_approval.username = components[-2]
elif components[1] == "cron":
self.reference.type = reference_type_enum.CRON_JOB_APPROVAL
self.reference.cron_job_approval.cron_job_id = components[2]
self.reference.cron_job_approval.approval_id = components[-1]
self.reference.cron_job_approval.username = components[-2]
else:
self.reference.type = reference_type_enum.UNKNOWN
self.reference.unknown.subject_urn = notification.subject
self.reference.unknown.source_urn = notification.source
return self
|
Initializes this object from an existing notification.
Args:
notification: A rdfvalues.flows.Notification object.
is_pending: Indicates whether the user has already seen this notification
or not.
Returns:
The current instance.
|
juraj-google-style
|
def napalm_configure(task: Task, dry_run: Optional[bool]=None, filename: Optional[str]=None, configuration: Optional[str]=None, replace: bool=False) -> Result:
device = task.host.get_connection('napalm', task.nornir.config)
if replace:
device.load_replace_candidate(filename=filename, config=configuration)
else:
device.load_merge_candidate(filename=filename, config=configuration)
diff = device.compare_config()
dry_run = task.is_dry_run(dry_run)
if ((not dry_run) and diff):
device.commit_config()
else:
device.discard_config()
return Result(host=task.host, diff=diff, changed=(len(diff) > 0))
|
Loads configuration into a network devices using napalm
Arguments:
dry_run: Whether to apply changes or not
filename: filename containing the configuration to load into the device
configuration: configuration to load into the device
replace: whether to replace or merge the configuration
Returns:
Result object with the following attributes set:
* changed (``bool``): whether the task is changing the system or not
* diff (``string``): change in the system
|
codesearchnet
|
def get_linux_config(browser: str) -> dict:
if browser.lower() == 'chrome':
cookie_file = '~/.config/google-chrome/Default/Cookies'
elif browser.lower() == "chromium":
cookie_file = '~/.config/chromium/Default/Cookies'
else:
raise ValueError("Browser must be either Chrome or Chromium.")
config = {
'my_pass': 'peanuts',
'iterations': 1,
'cookie_file': cookie_file,
}
try:
import gi
gi.require_version('Secret', '1')
from gi.repository import Secret
except ImportError:
pass
else:
flags = Secret.ServiceFlags.LOAD_COLLECTIONS
service = Secret.Service.get_sync(flags)
gnome_keyring = service.get_collections()
unlocked_keyrings = service.unlock_sync(gnome_keyring).unlocked
keyring_name = "{} Safe Storage".format(browser.capitalize())
for unlocked_keyring in unlocked_keyrings:
for item in unlocked_keyring.get_items():
if item.get_label() == keyring_name:
item.load_secret_sync()
config['my_pass'] = item.get_secret().get_text()
break
else:
continue
break
return config
|
Get the settings for Chrome/Chromium cookies on Linux.
Args:
browser: Either "Chrome" or "Chromium"
Returns:
Config dictionary for Chrome/Chromium cookie decryption
|
juraj-google-style
|
def record(self, flat_outputs, inference_args, input_tangents):
backward_function, to_record = self._backward(flat_outputs)
record.record_operation(self._inference_function.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)
|
Record the function call operation.
_DelayedRewriteGradientFunctions supports only first-order backprop tape
gradients (and then only when graph building). It does not work with
higher-order tape gradients or forward autodiff, but does work with
higher-order symbolic gradients (tf.gradients).
Args:
flat_outputs: The result of running `forward`.
inference_args: A flat list of Tensors with inference inputs to the
operation.
input_tangents: A flat list of Tensors with input tangents consumed by the
operation.
|
github-repos
|
def _MultipleModulesFoundError(path, candidates):
assert (len(candidates) > 1)
params = ([path] + _StripCommonPathPrefix(candidates[:2]))
if (len(candidates) == 2):
fmt = ERROR_LOCATION_MULTIPLE_MODULES_3
else:
fmt = ERROR_LOCATION_MULTIPLE_MODULES_4
params.append(str((len(candidates) - 2)))
return (fmt, params)
|
Generates an error message to be used when multiple matches are found.
Args:
path: The breakpoint location path that the user provided.
candidates: List of paths that match the user provided path. Must
contain at least 2 entries (throws AssertionError otherwise).
Returns:
A (format, parameters) tuple that should be used in the description
field of the breakpoint error status.
|
codesearchnet
|
def isexe(*components):
_path = path(*components)
return isfile(_path) and os.access(_path, os.X_OK)
|
Return whether a path is an executable file.
Arguments:
path (str): Path of the file to check.
Examples:
>>> fs.isexe("/bin/ls")
True
>>> fs.isexe("/home")
False
>>> fs.isexe("/not/a/real/path")
False
Returns:
bool: True if file is executable, else false.
|
juraj-google-style
|
def _find_dtype(value, preferred):
result = _find_dtype_helper(value, preferred)
if result == dtypes.int64 or result == dtypes.int32 or result is None:
return result
raise ValueError('Illegal dtype: ' + str(result))
|
Returns the preferred dtype of value or preferred if preferred != None.
This is used as an operator to pass over multiple objects in decreasing order
of priority until there is a preferred dtype for one. For example, if you were
adding three tensor-ish things (some tensors, some lists), and needed a
preferred dtype, you could use this as:
def adding(a, b, c, dtype = None):
dtype = _find_dtype(a, dtype)
dtype = _find_dtype(b, dtype)
dtype = _find_dtype(c, dtype)
if dtype is None:
dtype = tf.float32
...Code continues here...
Args:
value: a list, value, RowPartition, or tensor.
preferred: a given dtype. If not None, this will be returned.
Returns:
an optional dtype.
|
github-repos
|
def incoming_edges(self, node):
edges = self.edges()
in_edges = []
for out_node, in_node in edges:
if node is in_node:
in_edges.append((out_node, in_node))
return tuple(in_edges)
|
Returns a ``tuple`` of incoming edges for a **node object**.
Arguments:
- node(``object``) **node object** present in the graph to be queried
for incoming edges.
|
juraj-google-style
|
def get_lock_request(name, version, patch_lock, weak=True):
ch = ('~' if weak else '')
if (patch_lock == PatchLock.lock):
s = ('%s%s==%s' % (ch, name, str(version)))
return PackageRequest(s)
elif ((patch_lock == PatchLock.no_lock) or (not version)):
return None
version_ = version.trim(patch_lock.rank)
s = ('%s%s-%s' % (ch, name, str(version_)))
return PackageRequest(s)
|
Given a package and patch lock, return the equivalent request.
For example, for object 'foo-1.2.1' and lock type 'lock_3', the equivalent
request is '~foo-1.2'. This restricts updates to foo to patch-or-lower
version changes only.
For objects not versioned down to a given lock level, the closest possible
lock is applied. So 'lock_3' applied to 'foo-1' would give '~foo-1'.
Args:
name (str): Package name.
version (Version): Package version.
patch_lock (PatchLock): Lock type to apply.
Returns:
`PackageRequest` object, or None if there is no equivalent request.
|
codesearchnet
|
def _tpu_service(self):
if self._service:
return self._service
if not _GOOGLE_API_CLIENT_INSTALLED:
raise RuntimeError('Missing runtime dependency on the Google API client. Run `pip install cloud-tpu-client` to fix.')
credentials = self._credentials
if credentials is None or credentials == 'default':
credentials = client.GoogleCredentials.get_application_default()
if self._discovery_url:
return discovery.build('tpu', 'v1', credentials=credentials, discoveryServiceUrl=self._discovery_url, cache_discovery=False)
else:
return discovery.build('tpu', 'v1', credentials=credentials, cache_discovery=False)
|
Creates a new Cloud TPU API object.
This works around an issue where the underlying HTTP connection sometimes
times out when the script has been running for too long. Other methods in
this object call this method to get a new API object whenever they need
to communicate with the Cloud API.
Raises:
RuntimeError: If the dependent Python packages are missing.
Returns:
A Google Cloud TPU API object.
|
github-repos
|
def build_gemini_query(self, query, extra_info):
if 'WHERE' in query:
return "{0} AND {1}".format(query, extra_info)
else:
return "{0} WHERE {1}".format(query, extra_info)
|
Append sql to a gemini query
Args:
query(str): The gemini query
extra_info(str): The text that should be added
Return:
extended_query(str)
|
juraj-google-style
|
def sample_variants(self, variants, sample_name, category = 'snv'):
LOG.info('Retrieving variants for subject : {0}'.format(sample_name))
has_allele = re.compile('1|2')
query = {
'$and': [
{'_id' : { '$in' : variants}},
{'category' : category},
{'samples': {
'$elemMatch': { 'display_name' : sample_name, 'genotype_call': { '$regex' : has_allele } }
}}
]
}
result = self.variant_collection.find(query)
return result
|
Given a list of variants get variant objects found in a specific patient
Args:
variants(list): a list of variant ids
sample_name(str): a sample display name
category(str): 'snv', 'sv' ..
Returns:
result(iterable(Variant))
|
juraj-google-style
|
def __call__(self, shape, dtype=dtypes.float32, **kwargs):
self._validate_kwargs(kwargs)
dtype = _assert_float_dtype(dtype)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
**kwargs: Additional keyword arguments.
Raises:
ValueError: If the dtype is not floating point
|
github-repos
|
def recipe_bulkdozer(config, recipe_timezone, account_id, dcm_profile_id, sheet_url):
traffic(config, {'hour': [], 'account_id': account_id, 'dcm_profile_id': dcm_profile_id, 'auth': 'user', 'sheet_url': sheet_url, 'timezone': recipe_timezone})
|
Bulkdozer is a tool that can reduce trafficking time in Campaign Manager by up
to 80%% by providing automated bulk editing capabilities.
Args:
recipe_timezone (timezone) - Timezone for report dates.
account_id (string) - Campaign Manager Network ID (optional if profile id provided)
dcm_profile_id (string) - Campaign Manager Profile ID (optional if account id provided)
sheet_url (string) - Feed Sheet URL
|
github-repos
|
def kron_with_controls(*matrices: np.ndarray) -> np.ndarray:
product = kron(*matrices)
for i in range(product.shape[0]):
for j in range(product.shape[1]):
if np.isnan(product[(i, j)]):
product[(i, j)] = (1 if (i == j) else 0)
return product
|
Computes the kronecker product of a sequence of matrices and controls.
Use linalg.CONTROL_TAG to represent controls. Any entry of the output
matrix corresponding to a situation where the control is not satisfied will
be overwritten by identity matrix elements.
The control logic works by imbuing NaN with the meaning "failed to meet one
or more controls". The normal kronecker product then spreads the per-item
NaNs to all the entries in the product that need to be replaced by identity
matrix elements. This method rewrites those NaNs. Thus CONTROL_TAG can be
the matrix [[NaN, 0], [0, 1]] or equivalently [[NaN, NaN], [NaN, 1]].
Because this method re-interprets NaNs as control-failed elements, it won't
propagate error-indicating NaNs from its input to its output in the way
you'd otherwise expect.
Args:
*matrices: The matrices and controls to combine with the kronecker
product.
Returns:
The resulting matrix.
|
codesearchnet
|
def cloud_train(train_dataset,
eval_dataset,
analysis_dir,
output_dir,
features,
model_type,
max_steps,
num_epochs,
train_batch_size,
eval_batch_size,
min_eval_frequency,
top_n,
layer_sizes,
learning_rate,
epsilon,
job_name,
job_name_prefix,
config):
import google.datalab.ml as ml
if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1:
raise ValueError('CsvDataSets must be built with a file pattern, not list '
'of files.')
if file_io.file_exists(output_dir):
raise ValueError('output_dir already exist. Use a new output path.')
if isinstance(features, dict):
if not file_io.file_exists(output_dir):
file_io.recursive_create_dir(output_dir)
features_file = os.path.join(output_dir, 'features_file.json')
file_io.write_string_to_file(
features_file,
json.dumps(features))
else:
features_file = features
if not isinstance(config, ml.CloudTrainingConfig):
raise ValueError('cloud should be an instance of '
'google.datalab.ml.CloudTrainingConfig for cloud training.')
_assert_gcs_files([output_dir, train_dataset.input_files[0], eval_dataset.input_files[0],
features_file, analysis_dir])
args = ['--train-data-paths=%s' % train_dataset.input_files[0],
'--eval-data-paths=%s' % eval_dataset.input_files[0],
'--preprocess-output-dir=%s' % analysis_dir,
'--transforms-file=%s' % features_file,
'--model-type=%s' % model_type,
'--max-steps=%s' % str(max_steps),
'--train-batch-size=%s' % str(train_batch_size),
'--eval-batch-size=%s' % str(eval_batch_size),
'--min-eval-frequency=%s' % str(min_eval_frequency),
'--learning-rate=%s' % str(learning_rate),
'--epsilon=%s' % str(epsilon)]
if num_epochs:
args.append('--num-epochs=%s' % str(num_epochs))
if top_n:
args.append('--top-n=%s' % str(top_n))
if layer_sizes:
for i in range(len(layer_sizes)):
args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i])))
job_request = {
'package_uris': [_package_to_staging(output_dir), _TF_GS_URL, _PROTOBUF_GS_URL],
'python_module': 'mltoolbox._structured_data.trainer.task',
'job_dir': output_dir,
'args': args
}
job_request.update(dict(config._asdict()))
if not job_name:
job_name = job_name_prefix or 'structured_data_train'
job_name += '_' + datetime.datetime.now().strftime('%y%m%d_%H%M%S')
job = ml.Job.submit_training(job_request, job_name)
print('Job request send. View status of job at')
print('https:
_default_project())
return job
|
Train model using CloudML.
See local_train() for a description of the args.
Args:
config: A CloudTrainingConfig object.
job_name: Training job name. A default will be picked if None.
|
juraj-google-style
|
def to_diff_dict(self) -> dict[str, Any]:
config_dict = self.to_dict()
default_config_dict = PretrainedConfig().to_dict()
class_config_dict = self.__class__().to_dict() if not self.has_no_defaults_at_init else {}
serializable_config_dict = {}
for key, value in config_dict.items():
if isinstance(getattr(self, key, None), PretrainedConfig) and key in class_config_dict and isinstance(class_config_dict[key], dict) or key in self.sub_configs:
diff = recursive_diff_dict(value, default_config_dict, config_obj=getattr(self, key, None))
if 'model_type' in value:
diff['model_type'] = value['model_type']
serializable_config_dict[key] = diff
elif key not in default_config_dict or key == 'transformers_version' or key == 'vocab_file' or (value != default_config_dict[key]) or (key in default_config_dict and value != class_config_dict.get(key, value)):
serializable_config_dict[key] = value
self._remove_keys_not_serialized(serializable_config_dict)
if '_name_or_path' in serializable_config_dict:
del serializable_config_dict['_name_or_path']
if hasattr(self, 'quantization_config'):
serializable_config_dict['quantization_config'] = self.quantization_config.to_dict() if not isinstance(self.quantization_config, dict) else self.quantization_config
self.dict_torch_dtype_to_str(serializable_config_dict)
return serializable_config_dict
|
Removes all attributes from the configuration that correspond to the default config attributes for
better readability, while always retaining the `config` attribute from the class. Serializes to a
Python dictionary.
Returns:
Dict[str, Any]: Dictionary of all the attributes that make up this configuration instance.
|
github-repos
|
def service_messages(self, short_name):
if short_name not in self.services:
raise ArgumentError("Unknown service name", short_name=short_name)
return list(self.services[short_name]['state'].messages)
|
Get the messages stored for a service.
Args:
short_name (string): The short name of the service to get messages for
Returns:
list(ServiceMessage): A list of the ServiceMessages stored for this service
|
juraj-google-style
|
def _PrintExtractionStatusUpdateWindow(self, processing_status):
if self._stdout_output_writer:
self._ClearScreen()
output_text = 'plaso - {0:s} version {1:s}\n\n'.format(self._tool_name, plaso.__version__)
self._output_writer.Write(output_text)
self.PrintExtractionStatusHeader(processing_status)
table_view = views.CLITabularTableView(column_names=['Identifier', 'PID', 'Status', 'Memory', 'Sources', 'Events', 'File'], column_sizes=[15, 7, 15, 15, 15, 15, 0])
self._AddExtractionProcessStatusTableRow(processing_status.foreman_status, table_view)
for worker_status in processing_status.workers_status:
self._AddExtractionProcessStatusTableRow(worker_status, table_view)
table_view.Write(self._output_writer)
self._output_writer.Write('\n')
if processing_status.aborted:
self._output_writer.Write('Processing aborted - waiting for clean up.\n\n')
if self._stdout_output_writer:
sys.stdout.flush()
|
Prints an extraction status update in window mode.
Args:
processing_status (ProcessingStatus): processing status.
|
codesearchnet
|
class StackedRNNCells(Layer):
def __init__(self, cells, **kwargs):
super().__init__(**kwargs)
for cell in cells:
if 'call' not in dir(cell):
raise ValueError(f'All cells must have a `call` method. Received cell without a `call` method: {cell}')
if 'state_size' not in dir(cell):
raise ValueError(f'All cells must have a `state_size` attribute. Received cell without a `state_size`: {cell}')
self.cells = cells
@property
def state_size(self):
return [c.state_size for c in self.cells]
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif isinstance(self.cells[-1].state_size, (list, tuple)):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, batch_size=None):
initial_states = []
for cell in self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(batch_size=batch_size))
elif isinstance(cell.state_size, int):
initial_states.append(ops.zeros((batch_size, cell.state_size), dtype=self.compute_dtype))
else:
initial_states.append([ops.zeros((batch_size, d), dtype=self.compute_dtype) for d in cell.state_size])
return initial_states
def call(self, inputs, states, training=False, **kwargs):
new_states = []
for cell, states in zip(self.cells, states):
state_is_list = tree.is_nested(states)
states = list(states) if tree.is_nested(states) else [states]
if isinstance(cell, Layer) and cell._call_has_training_arg:
kwargs['training'] = training
else:
kwargs.pop('training', None)
cell_call_fn = cell.__call__ if callable(cell) else cell.call
inputs, states = cell_call_fn(inputs, states, **kwargs)
if len(states) == 1 and (not state_is_list):
states = states[0]
new_states.append(states)
if len(new_states) == 1:
new_states = new_states[0]
return (inputs, new_states)
def build(self, input_shape):
for cell in self.cells:
if isinstance(cell, Layer) and (not cell.built):
cell.build(input_shape)
cell.built = True
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif isinstance(cell.state_size, (list, tuple)):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
batch_size = tree.flatten(input_shape)[0]
input_shape = (batch_size, output_dim)
def get_config(self):
cells = []
for cell in self.cells:
cells.append(serialization_lib.serialize_keras_object(cell))
config = {'cells': cells}
base_config = super().get_config()
return {**base_config, **config}
@classmethod
def from_config(cls, config, custom_objects=None):
cells = []
for cell_config in config.pop('cells'):
cells.append(serialization_lib.deserialize_keras_object(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
|
Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Args:
cells: List of RNN cell instances.
Example:
```python
batch_size = 3
sentence_length = 5
num_features = 2
new_shape = (batch_size, sentence_length, num_features)
x = np.reshape(np.arange(30), new_shape)
rnn_cells = [keras.layers.LSTMCell(128) for _ in range(2)]
stacked_lstm = keras.layers.StackedRNNCells(rnn_cells)
lstm_layer = keras.layers.RNN(stacked_lstm)
result = lstm_layer(x)
```
|
github-repos
|
def _update_data(self, data):
self.data = data
child_change_dict = {}
for name in self.children:
child_data = getattr(data, name, None)
if (child_data is None):
child_change_dict[name] = [[]]
else:
child_change_dict[name] = [[], child_data]
return child_change_dict
|
Set our data and notify any subscribers of children what has changed
Args:
data (object): The new data
Returns:
dict: {child_name: [path_list, optional child_data]} of the change
that needs to be passed to a child as a result of this
|
codesearchnet
|
def _BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training=True):
x_dtype = x.dtype.base_dtype
if x_dtype == dtypes.float16 or x_dtype == dtypes.bfloat16:
x = math_ops.cast(x, dtypes.float32)
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b'NHWC':
keepdims = False
reduce_axis = [0, 1, 2]
elif data_format == b'NDHWC':
keepdims = False
reduce_axis = [0, 1, 2, 3]
elif data_format == b'NCHW':
keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
else:
keepdims = True
reduce_axis = [0, 2, 3, 4]
shape = [1, array_ops.size(scale), 1, 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)), reduce_axis, keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b'NCHW' or data_format == b'NCDHW':
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return (math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset)
else:
if data_format == b'NHWC':
reduce_axis = [0, 1, 2]
elif data_format == b'NDHWC':
reduce_axis = [0, 1, 2, 3]
elif data_format == b'NCHW':
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
else:
reduce_axis = [0, 2, 3, 4]
shape = [1, array_ops.size(pop_mean), 1, 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return (math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset)
|
Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 or 5 dimensions for gradient for y.
x: A `Tensor` of 4 or 5 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when
is_training=False.
pop_var: A `Tensor` of 1 dimension for the population variance. Only used
when is_training=False.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
is_training: A bool value to indicate the operation is for training
(default) or inference.
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
|
github-repos
|
def _predictResponseSize(mode, functioncode, payloadToSlave):
MIN_PAYLOAD_LENGTH = 4
BYTERANGE_FOR_GIVEN_SIZE = slice(2, 4)
NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4
NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1
RTU_TO_ASCII_PAYLOAD_FACTOR = 2
NUMBER_OF_RTU_RESPONSE_STARTBYTES = 2
NUMBER_OF_RTU_RESPONSE_ENDBYTES = 2
NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5
NUMBER_OF_ASCII_RESPONSE_ENDBYTES = 4
_checkMode(mode)
_checkFunctioncode(functioncode, None)
_checkString(payloadToSlave, description='payload', minlength=MIN_PAYLOAD_LENGTH)
if functioncode in [5, 6, 15, 16]:
response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION
elif functioncode in [1, 2, 3, 4]:
given_size = _twoByteStringToNum(payloadToSlave[BYTERANGE_FOR_GIVEN_SIZE])
if functioncode == 1 or functioncode == 2:
number_of_inputs = given_size
response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \
number_of_inputs
elif functioncode == 3 or functioncode == 4:
number_of_registers = given_size
response_payload_size = NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + \
number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER
else:
raise ValueError('Wrong functioncode: {}. The payload is: {!r}'.format( \
functioncode, payloadToSlave))
if mode == MODE_ASCII:
return NUMBER_OF_ASCII_RESPONSE_STARTBYTES + \
response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR + \
NUMBER_OF_ASCII_RESPONSE_ENDBYTES
else:
return NUMBER_OF_RTU_RESPONSE_STARTBYTES + \
response_payload_size + \
NUMBER_OF_RTU_RESPONSE_ENDBYTES
|
Calculate the number of bytes that should be received from the slave.
Args:
* mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII)
* functioncode (int): Modbus function code.
* payloadToSlave (str): The raw request that is to be sent to the slave (not hex encoded string)
Returns:
The preducted number of bytes (int) in the response.
Raises:
ValueError, TypeError.
|
juraj-google-style
|
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
|
Modifies the response object prior to sending it to the client. Used to add CORS headers to the request
Args:
response (response): Flask response object
Returns:
`None`
|
juraj-google-style
|
def decode_bu64(b):
s = b
s = s.replace(b'-', b'+')
s = s.replace(b'_', b'/')
p = len(s) % 4
if p == 0:
pass
elif p == 2:
s += b'=='
elif p == 3:
s += b'='
else:
raise ValueError('Illegal Base64url string')
return base64.standard_b64decode(s)
|
Encode bytes to a URL safe flavor of Base64 used by JWTs.
- Reverse of encode_bu64().
Args:
b: bytes
URL safe Base64 encoded bytes to encode.
Returns:
bytes: Decoded bytes.
|
juraj-google-style
|
def get_init_tokens_op(self, num_tokens=-1):
if self._gradients_applied is False:
raise ValueError('get_init_tokens_op() should be called after apply_gradients().')
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError('Too few tokens to finish the first step: %d (given) vs %d (needed)' % (num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(''):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name='no_init_tokens')
return init_tokens
|
Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
|
github-repos
|
def _GetFieldByName(message_descriptor, field_name):
try:
return message_descriptor.fields_by_name[field_name]
except KeyError:
raise ValueError(('Protocol message %s has no "%s" field.' % (message_descriptor.name, field_name)))
|
Returns a field descriptor by field name.
Args:
message_descriptor: A Descriptor describing all fields in message.
field_name: The name of the field to retrieve.
Returns:
The field descriptor associated with the field name.
|
codesearchnet
|
def reflection(normal, origin=(0, 0, 0)):
n = np.array(normal, dtype=float) / np.linalg.norm(normal)
u, v, w = n
translation = np.eye(4)
translation[0:3, 3] = -np.array(origin)
xx = 1 - 2 * u ** 2
yy = 1 - 2 * v ** 2
zz = 1 - 2 * w ** 2
xy = -2 * u * v
xz = -2 * u * w
yz = -2 * v * w
mirror_mat = [[xx, xy, xz, 0], [xy, yy, yz, 0], [xz, yz, zz, 0],
[0, 0, 0, 1]]
if np.linalg.norm(origin) > 1e-6:
mirror_mat = np.dot(np.linalg.inv(translation),
np.dot(mirror_mat, translation))
return SymmOp(mirror_mat)
|
Returns reflection symmetry operation.
Args:
normal (3x1 array): Vector of the normal to the plane of
reflection.
origin (3x1 array): A point in which the mirror plane passes
through.
Returns:
SymmOp for the reflection about the plane
|
juraj-google-style
|
def _save_state_and_schedule_next(self, shard_state, tstate, task_directive):
spec = tstate.mapreduce_spec
if task_directive == self._TASK_DIRECTIVE.DROP_TASK:
return
if task_directive in (self._TASK_DIRECTIVE.RETRY_SLICE,
self._TASK_DIRECTIVE.RETRY_TASK):
return self.retry_task()
elif task_directive == self._TASK_DIRECTIVE.ABORT_SHARD:
logging.info("Aborting shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.FAIL_TASK:
logging.critical("Shard %s failed permanently.", shard_state.shard_id)
task = None
elif task_directive == self._TASK_DIRECTIVE.RETRY_SHARD:
logging.warning("Shard %s is going to be attempted for the %s time.",
shard_state.shard_id,
shard_state.retries + 1)
task = self._state_to_task(tstate, shard_state)
elif task_directive == self._TASK_DIRECTIVE.RECOVER_SLICE:
logging.warning("Shard %s slice %s is being recovered.",
shard_state.shard_id,
shard_state.slice_id)
task = self._state_to_task(tstate, shard_state)
else:
assert task_directive == self._TASK_DIRECTIVE.PROCEED_TASK
countdown = self._get_countdown_for_next_slice(spec)
task = self._state_to_task(tstate, shard_state, countdown=countdown)
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
"default")
config = util.create_datastore_write_config(spec)
@db.transactional(retries=5)
def _tx():
fresh_shard_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_shard_state:
raise db.Rollback()
if (not fresh_shard_state.active or
"worker_active_state_collision" in _TEST_INJECTED_FAULTS):
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning("Datastore's %s", str(fresh_shard_state))
logging.warning("Slice's %s", str(shard_state))
return
fresh_shard_state.copy_from(shard_state)
fresh_shard_state.put(config=config)
if fresh_shard_state.active:
self._add_task(task, spec, queue_name)
try:
_tx()
except (datastore_errors.Error,
taskqueue.Error,
runtime.DeadlineExceededError,
apiproxy_errors.Error), e:
logging.warning(
"Can't transactionally continue shard. "
"Will retry slice %s %s for the %s time.",
tstate.shard_id,
tstate.slice_id,
self.task_retry_count() + 1)
self._try_free_lease(shard_state)
raise e
|
Save state and schedule task.
Save shard state to datastore.
Schedule next slice if needed.
Set HTTP response code.
No modification to any shard_state or tstate.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
task_directive: enum _TASK_DIRECTIVE.
Returns:
The task to retry if applicable.
|
juraj-google-style
|
def __call__(self, shape, dtype=None, **kwargs):
raise NotImplementedError
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor.
**kwargs: Additional keyword arguments.
|
github-repos
|
def tarfile_extract(fileobj, dest_path):
tar = tarfile.open(mode='r|', fileobj=fileobj, bufsize=pipebuf.PIPE_BUF_BYTES)
dest_path = os.path.realpath(dest_path)
extracted_files = []
for member in tar:
assert (not member.name.startswith('/'))
relpath = os.path.join(dest_path, member.name)
if member.issym():
target_path = os.path.join(dest_path, member.name)
try:
os.symlink(member.linkname, target_path)
except OSError as e:
if (e.errno == errno.EEXIST):
os.remove(target_path)
os.symlink(member.linkname, target_path)
else:
raise
continue
if (member.isreg() and (member.size >= pipebuf.PIPE_BUF_BYTES)):
cat_extract(tar, member, relpath)
else:
tar.extract(member, path=dest_path)
filename = os.path.realpath(relpath)
extracted_files.append(filename)
if (len(extracted_files) > 1000):
_fsync_files(extracted_files)
del extracted_files[:]
tar.close()
_fsync_files(extracted_files)
|
Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to.
|
codesearchnet
|
def iterator_chain(variables: VarType, parent: str = None) -> Iterable[VarMatrix]:
logger.debug("Yielding from append iterator")
if not isinstance(variables, list):
raise ValueError(
f"Append keyword only takes a list of arguments, got {variables} of type {type(variables)}"
)
yield list(
chain.from_iterable(
variable_matrix(item, parent, "product") for item in variables
)
)
|
This successively appends each element of an array to a single list of values.
This takes a list of values and puts all the values generated for each element in
the list into a single list of values. It uses the :func:`itertools.chain` function to
achieve this. This function is particularly useful for specifying multiple types of
simulations with different parameters.
Args:
variables: The variables object
parent: Unused
|
juraj-google-style
|
def process_rewards(self, rewards):
(min_reward, max_reward) = self.reward_range
rewards = np.clip(rewards, min_reward, max_reward)
rewards = np.around(rewards, decimals=0).astype(np.int64)
return rewards
|
Clips, rounds, and changes to integer type.
Args:
rewards: numpy array of raw (float) rewards.
Returns:
processed_rewards: numpy array of np.int64
|
codesearchnet
|
def get_arrays(self, type_img):
if type_img.lower() == 'lola':
return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image()
elif type_img.lower() == 'wac':
return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image()
else:
raise ValueError('The img type has to be either "Lola" or "Wac"')
|
Return arrays the region of interest
Args:
type_img (str): Either lola or wac.
Returns:
A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the
longitudes, ``Y`` contains the latitude and ``Z`` the values
extracted for the region of interest.
Note:
The argument has to be either lola or wac. Note case sensitive.
All return arrays have the same size.
All coordinates are in degree.
|
juraj-google-style
|
def _get_object_checkpoint_renames(path, variable_names):
fname = checkpoint_utils._get_checkpoint_filename(path)
try:
names_to_keys = saver_lib.object_graph_key_mapping(fname)
except errors.NotFoundError:
return {}
missing_names = set(variable_names) - set(names_to_keys.keys())
if missing_names:
raise ValueError('Attempting to warm-start from an object-based checkpoint, but found that the checkpoint did not contain values for all variables. The following variables were missing: {}'.format(missing_names))
return {name: names_to_keys[name] for name in variable_names}
|
Returns a dictionary mapping variable names to checkpoint keys.
The warm-starting utility expects variable names to match with the variable
names in the checkpoint. For object-based checkpoints, the variable names
and names in the checkpoint are different. Thus, for object-based checkpoints,
this function is used to obtain the map from variable names to checkpoint
keys.
Args:
path: path to checkpoint directory or file.
variable_names: list of variable names to load from the checkpoint.
Returns:
If the checkpoint is object-based, this function returns a map from variable
names to their corresponding checkpoint keys.
If the checkpoint is name-based, this returns an empty dict.
Raises:
ValueError: If the object-based checkpoint is missing variables.
|
github-repos
|
def emit(self, record):
record.task = self.cur_task
if record.levelno >= self.dump_level and self.cur_task:
self.tasks[self.cur_task].failed = True
self.tasks[self.cur_task].force_show = True
is_start = START_TASK_REG.match(str(record.msg))
if is_start:
self.handle_new_task(is_start.groupdict()['task_name'], record)
return
is_end = END_TASK_REG.match(str(record.msg))
if is_end:
self.handle_closed_task(is_end.groupdict()['task_name'], record)
return
force_show_record = ALWAYS_SHOW_REG.match(str(record.msg))
if force_show_record:
record.msg = force_show_record.groupdict()['message']
self.pretty_emit(record)
if (
not force_show_record and self.should_show_by_level(record)
and self.should_show_by_depth()
):
self.pretty_emit(record)
return
if self.cur_task:
self.tasks[self.cur_task].append(record)
|
Handle the given record, this is the entry point from the python
logging facility
Params:
record (logging.LogRecord): log record to handle
Returns:
None
|
juraj-google-style
|
def indent(self, node, dirty=True):
if node.subitems:
return
self._subitems[node.id] = node
node.super_list_item_id = self.id
node.parent_item = self
if dirty:
node.touch(True)
|
Indent an item. Does nothing if the target has subitems.
Args:
node (gkeepapi.node.ListItem): Item to indent.
dirty (bool): Whether this node should be marked dirty.
|
juraj-google-style
|
def __init__(self, details):
if not isinstance(details, dict):
raise ValueError('details')
if '__hash__' not in details:
raise KeyError('__hash__')
if '__optional__' in details:
bOptional = details['__optional__']
del details['__optional__']
else:
bOptional = None
if details['__hash__'] is True:
details['__hash__'] = {"__type__":"string"}
self._key = Node(details['__hash__'])
del details['__hash__']
self._node = _child(details)
if bOptional:
details['__optional__'] = bOptional
super(HashNode, self).__init__(details, 'HashNode')
|
Constructor
Initialises the instance
Arguments:
details {dict} -- Details describing the type of values allowed for
the node
Raises:
KeyError
ValueError
Returns:
HashNode
|
juraj-google-style
|
def validate(bo, error_level: str = "WARNING") -> Tuple[bool, List[Tuple[str, str]]]:
if bo.ast:
bo = validate_functions(bo.ast, bo)
if error_level == "WARNING":
bo = validate_arg_values(bo.ast, bo)
else:
bo.validation_messages.append(("ERROR", "Invalid BEL Statement - cannot parse"))
for msg in bo.validation_messages:
if msg[0] == "ERROR":
bo.parse_valid = False
break
return bo
|
Semantically validate BEL AST
Add errors and warnings to bel_obj.validation_messages
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
bo: main BEL language object
error_level: return ERRORs only or also WARNINGs
Returns:
Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages)
|
juraj-google-style
|
def verify_tensor_all_finite(t=None, msg=None, name=None, x=None, message=None):
x = deprecation.deprecated_argument_lookup('x', x, 't', t)
message = deprecation.deprecated_argument_lookup('message', message, 'msg', msg)
return verify_tensor_all_finite_v2(x, message, name)
|
Assert that the tensor does not contain any NaN's or Inf's.
Args:
t: Tensor to check.
msg: Message to log on failure.
name: A name for this operation (optional).
x: Alias for t.
message: Alias for msg.
Returns:
Same tensor as `t`.
|
github-repos
|
def get_svg_layers(svg_sources):
layers = []
(width, height) = (None, None)
def extract_length(attr):
'Extract length in pixels.'
match = CRE_MM_LENGTH.match(attr)
if match:
return (INKSCAPE_PPmm.magnitude * float(match.group('length')))
else:
return float(attr)
for svg_source_i in svg_sources:
xml_root = etree.parse(svg_source_i)
svg_root = xml_root.xpath('/svg:svg', namespaces=INKSCAPE_NSMAP)[0]
width = max(extract_length(svg_root.attrib['width']), width)
height = max(extract_length(svg_root.attrib['height']), height)
layers += svg_root.xpath('
for (i, layer_i) in enumerate(layers):
layer_i.attrib['id'] = ('layer%d' % (i + 1))
return ((width, height), layers)
|
Collect layers from input svg sources.
Args:
svg_sources (list) : A list of file-like objects, each containing
one or more XML layers.
Returns
-------
(width, height), layers : (int, int), list
The first item in the tuple is the shape of the largest layer, and the
second item is a list of ``Element`` objects (from :mod:`lxml.etree`
module), one per SVG layer.
|
codesearchnet
|
def normalize_keypoints(keypoints: torch.Tensor, height: int, width: int) -> torch.Tensor:
size = torch.tensor([width, height], device=keypoints.device, dtype=keypoints.dtype)[None]
center = size / 2
scaling = size.max(1, keepdim=True).values * 0.7
return (keypoints - center[:, None, :]) / scaling[:, None, :]
|
Normalize keypoints locations based on image image_shape
Args:
keypoints (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`):
Keypoints locations in (x, y) format.
height (`int`):
Image height.
width (`int`):
Image width.
Returns:
Normalized keypoints locations of shape (`torch.Tensor` of shape `(batch_size, num_keypoints, 2)`).
|
github-repos
|
def read_string_array(self, key, embedded=True):
data = None
if key is not None:
key_type = self.variable_type(key)
data = self.db.read(key.strip())
if embedded:
data = self.read_embedded(data, key_type)
if data is not None:
try:
data = json.loads(data, object_pairs_hook=OrderedDict)
except ValueError as e:
err = u'Failed loading JSON data ({}). Error: ({})'.format(data, e)
self.tcex.log.error(err)
self.tcex.message_tc(err)
self.tcex.exit(1)
else:
self.tcex.log.warning(u'The key field was None.')
return data
|
Read method of CRUD operation for string array data.
Args:
key (string): The variable to read from the DB.
embedded (boolean): Resolve embedded variables.
Returns:
(list): Results retrieved from DB.
|
juraj-google-style
|
def _OpenFile(self, path):
if not self._registry_file_reader:
return None
return self._registry_file_reader.Open(
path, ascii_codepage=self._ascii_codepage)
|
Opens a Windows Registry file.
Args:
path (str): path of the Windows Registry file.
Returns:
WinRegistryFile: Windows Registry file or None if not available.
|
juraj-google-style
|
def _validate_oneof_field_multi_mapping(src_pb, dest_pb, ignored_fields):
ignored_fields_set = set(ignored_fields)
src_oneof_names_dict = src_pb.DESCRIPTOR.oneofs_by_name
dest_oneof_dict = _get_fields_to_oneof_dict(dest_pb.DESCRIPTOR.oneofs_by_name)
dest_field_names = set(dest_pb.DESCRIPTOR.fields_by_name.keys())
for src_oneof_name, src_oneof_field in src_oneof_names_dict.items():
mapped_field = set()
for src_field in src_oneof_field.fields:
src_field_name = src_field.name
if src_field_name in ignored_fields_set:
continue
if src_field_name in dest_oneof_dict:
mapped_field.add(dest_oneof_dict[src_field_name])
elif src_field_name in dest_field_names:
mapped_field.add(src_field_name)
if len(mapped_field) > 1:
raise NotImplementedError('Oneof field {} in proto {} maps to more than one field, all fields in the oneof must be explicitly handled or ignored.'.format(src_oneof_name, src_pb.DESCRIPTOR.name))
|
Validates if the oneof field on src_pb maps to multiple fields.
Args:
src_pb: the proto to check oneof from.
dest_pb: the proto to check oneof against.
ignored_fields: fields that skip the check.
Exception: Raises NotImplementedError if any oneof field in src_pb maps to
multiple fields from dest_pb.
|
github-repos
|
def is_likely_link(text):
text = text.lower()
if (text.startswith('http:
return True
(dummy, dot, file_extension) = text.rpartition('.')
if (dot and file_extension and (len(file_extension) <= 4)):
file_extension_set = frozenset(file_extension)
if (file_extension_set and (file_extension_set <= ALPHANUMERIC_CHARS) and (not (file_extension_set <= NUMERIC_CHARS))):
if (file_extension in COMMON_TLD):
return False
file_type = mimetypes.guess_type(text, strict=False)[0]
if file_type:
return True
else:
return False
|
Return whether the text is likely to be a link.
This function assumes that leading/trailing whitespace has already been
removed.
Returns:
bool
|
codesearchnet
|
def GetMerger(self, cls):
for merger in self._mergers:
if isinstance(merger, cls):
return merger
raise LookupError('No matching DataSetMerger found')
|
Looks for an added DataSetMerger derived from the given class.
Args:
cls: A class derived from DataSetMerger.
Returns:
The matching DataSetMerger instance.
Raises:
LookupError: No matching DataSetMerger has been added.
|
codesearchnet
|
def __init__(self, nrows=None, nvals=None, uniform_row_length=None, dtype=dtypes.int64):
nrows = tensor_shape.TensorShape([nrows])
nvals = tensor_shape.TensorShape([nvals])
if not isinstance(uniform_row_length, tensor_shape.TensorShape):
uniform_row_length = tensor_shape.TensorShape([uniform_row_length])
else:
uniform_row_length = uniform_row_length.with_rank(1)
self._nrows = nrows
self._nvals = nvals
self._uniform_row_length = uniform_row_length
self._dtype = dtypes.as_dtype(dtype)
if self._dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('dtype must be tf.int32 or tf.int64')
nrows = tensor_shape.dimension_value(nrows[0])
nvals = tensor_shape.dimension_value(nvals[0])
ncols = tensor_shape.dimension_value(uniform_row_length[0])
if nrows == 0:
if nvals is None:
self._nvals = tensor_shape.TensorShape([0])
elif nvals != 0:
raise ValueError('nvals=%s is not compatible with nrows=%s' % (nvals, nrows))
if ncols == 0:
if nvals is None:
self._nvals = tensor_shape.TensorShape([0])
elif nvals != 0:
raise ValueError('nvals=%s is not compatible with uniform_row_length=%s' % (nvals, uniform_row_length))
if ncols is not None and nvals is not None:
if ncols != 0 and nvals % ncols != 0:
raise ValueError("nvals=%s is not compatible with uniform_row_length=%s (doesn't divide evenly)" % (nvals, ncols))
if nrows is not None and nvals != ncols * nrows:
raise ValueError('nvals=%s is not compatible with nrows=%s and uniform_row_length=%s' % (nvals, nrows, ncols))
if nrows is None and ncols != 0:
self._nrows = tensor_shape.TensorShape([nvals
if ncols is not None and nrows is not None and (nvals is None):
self._nvals = tensor_shape.TensorShape([ncols * nrows])
|
Constructs a new RowPartitionSpec.
Args:
nrows: The number of rows in the RowPartition, or `None` if unspecified.
nvals: The number of values partitioned by the RowPartition, or `None` if
unspecified.
uniform_row_length: The number of values in each row for this
RowPartition, or `None` if rows are ragged or row length is unspecified.
dtype: The data type used to encode the partition. One of `tf.int64` or
`tf.int32`.
|
github-repos
|
def _check_dep(self, depinfo, deptile, resolver):
try:
settings = self._load_depsettings(deptile)
except IOError:
return False
if (settings['resolver'] != resolver.__class__.__name__):
return None
resolver_settings = {}
if ('settings' in settings):
resolver_settings = settings['settings']
return resolver.check(depinfo, deptile, resolver_settings)
|
Check if a dependency tile is up to date
Returns:
bool: True if it is up to date, False if it not and None if this resolver
cannot assess whether or not it is up to date.
|
codesearchnet
|
def git_clone(prettyname: str, url: str, directory: str,
branch: str = None,
commit: str = None,
clone_options: List[str] = None,
run_func: Callable[[List[str]], Any] = None) -> bool:
run_func = run_func or subprocess.check_call
clone_options = clone_options or []
if os.path.isdir(directory):
log.info("Not re-cloning {} Git repository: using existing source "
"in {}".format(prettyname, directory))
return False
log.info("Fetching {} source from {} into {}",
prettyname, url, directory)
require_executable(GIT)
gitargs = [GIT, "clone"] + clone_options
if branch:
gitargs += ["--branch", branch]
gitargs += [url, directory]
run_func(gitargs)
if commit:
log.info("Resetting {} local Git repository to commit {}",
prettyname, commit)
run_func([GIT,
"-C", directory,
"reset", "--hard", commit])
return True
|
Fetches a Git repository, unless we have it already.
Args:
prettyname: name to display to user
url: URL
directory: destination directory
branch: repository branch
commit: repository commit tag
clone_options: additional options to pass to ``git clone``
run_func: function to use to call an external command
Returns:
did we need to do anything?
|
juraj-google-style
|
def _GetDistinctValues(self, field_name):
self._cursor.execute('SELECT {0:s}, COUNT({0:s}) FROM log2timeline GROUP BY {0:s}'.format(field_name))
result = {}
row = self._cursor.fetchone()
while row:
if row[0]:
result[row[0]] = row[1]
row = self._cursor.fetchone()
return result
|
Query database for unique field types.
Args:
field_name (str): name of the filed to retrieve.
Returns:
dict[str, int]: counts of field types by name.
|
codesearchnet
|
def HasStorage(self):
from neo.Core.State.ContractState import ContractPropertyState
return ((self.ContractProperties & ContractPropertyState.HasStorage) > 0)
|
Flag indicating if storage is available.
Returns:
bool: True if available. False otherwise.
|
codesearchnet
|
def download_artifact_bundle(self, id_or_uri, file_path):
uri = ((self.DOWNLOAD_PATH + '/') + extract_id_from_uri(id_or_uri))
return self._client.download(uri, file_path)
|
Download the Artifact Bundle.
Args:
id_or_uri: ID or URI of the Artifact Bundle.
file_path(str): Destination file path.
Returns:
bool: Successfully downloaded.
|
codesearchnet
|
def time_range_to_frame_range(self, start, end, sr):
start_sample = seconds_to_sample(start, sr)
end_sample = seconds_to_sample(end, sr)
return (self.sample_to_frame_range(start_sample)[0], self.sample_to_frame_range((end_sample - 1))[1])
|
Calculate the frames containing samples from the given time range in seconds.
Args:
start (float): Start time in seconds.
end (float): End time in seconds.
sr (int): The sampling rate to use for time-to-sample conversion.
Returns:
tuple: A tuple containing the start and end (exclusive) frame indices.
|
codesearchnet
|
def canonicalize(self, namespace_targets: Mapping[(str, List[str])]=None) -> 'BEL':
if (not self.ast):
return self
if (not self.ast.collected_nsarg_norms):
self = self.collect_nsarg_norms()
self.ast.canonicalize()
return self
|
Takes an AST and returns a canonicalized BEL statement string.
Args:
namespace_targets (Mapping[str, List[str]]): override default canonicalization
settings of BEL.bio API api_url - see {api_url}/status to get default canonicalization settings
Returns:
BEL: returns self
|
codesearchnet
|
def mod(x1, x2):
if any_symbolic_tensors((x1, x2)):
return Mod().symbolic_call(x1, x2)
return backend.numpy.mod(x1, x2)
|
Returns the element-wise remainder of division.
Args:
x1: First tensor.
x2: Second tensor.
Returns:
Output tensor, element-wise remainder of division.
|
github-repos
|
async def snap(self, user=None, view=None):
if (view is None):
view = self.view
if (user is None):
user = self.auth.getUserByName('root')
snap = (await view.snap(user))
return snap
|
Return a transaction object for the default view.
Args:
write (bool): Set to True for a write transaction.
Returns:
(synapse.lib.snap.Snap)
NOTE: This must be used in a with block.
|
codesearchnet
|
def set_json(self, obj, status=HttpStatusCodes.HTTP_200):
obj = json.dumps(obj, sort_keys=True, default=lambda x: str(x))
self.set_status(status)
self.set_header(HttpResponseHeaders.CONTENT_TYPE, 'application/json')
self.set_content(obj)
|
Helper method to set a JSON response.
Args:
obj (:obj:`object`): JSON serializable object
status (:obj:`str`, optional): Status code of the response
|
juraj-google-style
|
def placeOrder(self, contract: Contract, order: Order) -> Trade:
orderId = (order.orderId or self.client.getReqId())
self.client.placeOrder(orderId, contract, order)
now = datetime.datetime.now(datetime.timezone.utc)
key = self.wrapper.orderKey(self.wrapper.clientId, orderId, order.permId)
trade = self.wrapper.trades.get(key)
if trade:
assert (trade.orderStatus.status not in OrderStatus.DoneStates)
logEntry = TradeLogEntry(now, trade.orderStatus.status, 'Modify')
trade.log.append(logEntry)
self._logger.info(f'placeOrder: Modify order {trade}')
trade.modifyEvent.emit(trade)
self.orderModifyEvent.emit(trade)
else:
order.clientId = self.wrapper.clientId
order.orderId = orderId
orderStatus = OrderStatus(status=OrderStatus.PendingSubmit)
logEntry = TradeLogEntry(now, orderStatus.status, '')
trade = Trade(contract, order, orderStatus, [], [logEntry])
self.wrapper.trades[key] = trade
self._logger.info(f'placeOrder: New order {trade}')
self.newOrderEvent.emit(trade)
return trade
|
Place a new order or modify an existing order.
Returns a Trade that is kept live updated with
status changes, fills, etc.
Args:
contract: Contract to use for order.
order: The order to be placed.
|
codesearchnet
|
def _ParseLastRunTime(self, parser_mediator, fixed_length_section):
systemtime_struct = fixed_length_section.last_run_time
system_time_tuple = (
systemtime_struct.year, systemtime_struct.month,
systemtime_struct.weekday, systemtime_struct.day_of_month,
systemtime_struct.hours, systemtime_struct.minutes,
systemtime_struct.seconds, systemtime_struct.milliseconds)
date_time = None
if system_time_tuple != self._EMPTY_SYSTEM_TIME_TUPLE:
try:
date_time = dfdatetime_systemtime.Systemtime(
system_time_tuple=system_time_tuple)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid last run time: {0!s}'.format(system_time_tuple))
return date_time
|
Parses the last run time from a fixed-length data section.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
fixed_length_section (job_fixed_length_data_section): a Windows
Scheduled Task job fixed-length data section.
Returns:
dfdatetime.DateTimeValues: last run date and time or None if not
available.
|
juraj-google-style
|
def add_notification_listener(self, notification_type, notification_callback):
if (notification_type not in self.notifications):
self.notifications[notification_type] = [(self.notification_id, notification_callback)]
else:
if (reduce((lambda a, b: (a + 1)), filter((lambda tup: (tup[1] == notification_callback)), self.notifications[notification_type]), 0) > 0):
return (- 1)
self.notifications[notification_type].append((self.notification_id, notification_callback))
ret_val = self.notification_id
self.notification_id += 1
return ret_val
|
Add a notification callback to the notification center.
Args:
notification_type: A string representing the notification type from .helpers.enums.NotificationTypes
notification_callback: closure of function to call when event is triggered.
Returns:
Integer notification id used to remove the notification or -1 if the notification has already been added.
|
codesearchnet
|
def list_permissions(self, group_name=None, resource=None):
self.project_service.set_auth(self._token_project)
return self.project_service.list_permissions(group_name, resource)
|
List permission sets associated filtering by group and/or resource.
Args:
group_name (string): Name of group.
resource (intern.resource.boss.Resource): Identifies which data
model object to operate on.
Returns:
(list): List of permissions.
Raises:
requests.HTTPError on failure.
|
juraj-google-style
|
def _Upgrade2To3(self, data):
buffers = [{'data': []}]
for subgraph in data['subgraphs']:
if 'tensors' not in subgraph:
continue
for tensor in subgraph['tensors']:
if 'data_buffer' not in tensor:
tensor['buffer'] = 0
else:
if tensor['data_buffer']:
tensor[u'buffer'] = len(buffers)
buffers.append({'data': tensor['data_buffer']})
else:
tensor['buffer'] = 0
del tensor['data_buffer']
data['buffers'] = buffers
|
Upgrade data from Version 2 to Version 3.
Changed actual read-only tensor data to be in a buffers table instead
of inline with the tensor.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
|
github-repos
|
def resume(self, email, master_token, state=None, sync=True):
auth = APIAuth(self.OAUTH_SCOPES)
ret = auth.load(email, master_token, android_id=get_mac())
if ret:
self.load(auth, state, sync)
return ret
|
Authenticate to Google with the provided master token & sync.
Args:
email (str): The account to use.
master_token (str): The master token.
state (dict): Serialized state to load.
Raises:
LoginException: If there was a problem logging in.
|
juraj-google-style
|
class Embedding:
dense_embedding: Optional[List[float]] = None
sparse_embedding: Optional[Tuple[List[int], List[float]]] = None
|
Represents vector embeddings.
Args:
dense_embedding: Dense vector representation
sparse_embedding: Optional sparse vector representation for hybrid
search
|
github-repos
|
def VisitFunction(self, f):
signatures = tuple((ex for s in f.signatures for ex in ExpandSignature(s)))
return f.Replace(signatures=signatures)
|
Rebuild the function with the new signatures.
This is called after its children (i.e. when VisitSignature has already
converted each signature into a list) and rebuilds the function using the
new signatures.
Arguments:
f: A pytd.Function instance.
Returns:
Function with the new signatures.
|
github-repos
|
def __init__(self, channel):
self.NewSession = channel.unary_unary('/tensorflow.ProfileAnalysis/NewSession', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.NewProfileSessionResponse.FromString)
self.EnumSessions = channel.unary_unary('/tensorflow.ProfileAnalysis/EnumSessions', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.EnumProfileSessionsAndToolsResponse.FromString)
self.GetSessionToolData = channel.unary_unary('/tensorflow.ProfileAnalysis/GetSessionToolData', request_serializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataRequest.SerializeToString, response_deserializer=third__party_dot_tensorflow_dot_core_dot_profiler_dot_profiler__analysis__pb2.ProfileSessionDataResponse.FromString)
|
Constructor.
Args:
channel: A grpc.Channel.
|
github-repos
|
def traverse_inorder(self, leaves=True, internal=True):
c = self
s = deque()
done = False
while (not done):
if (c is None):
if (len(s) == 0):
done = True
else:
c = s.pop()
if ((leaves and c.is_leaf()) or (internal and (not c.is_leaf()))):
(yield c)
if (len(c.children) == 0):
c = None
elif (len(c.children) == 2):
c = c.children[1]
else:
raise RuntimeError(INORDER_NONBINARY)
else:
s.append(c)
if (len(c.children) == 0):
c = None
elif (len(c.children) == 2):
c = c.children[0]
else:
raise RuntimeError(INORDER_NONBINARY)
|
Perform an inorder traversal starting at this ``Node`` object
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False``
|
codesearchnet
|
def find_in_mailbox(cls, session, mailbox_or_id):
if hasattr(mailbox_or_id, 'id'):
mailbox_or_id = mailbox_or_id.id
return cls(
'/mailboxes/%d/users.json' % mailbox_or_id,
session=session,
)
|
Get the users that are associated to a Mailbox.
Args:
session (requests.sessions.Session): Authenticated session.
mailbox_or_id (MailboxRef or int): Mailbox of the ID of the
mailbox to get the folders for.
Returns:
RequestPaginator(output_type=helpscout.models.User): Users
iterator.
|
juraj-google-style
|
def make_df_from_batch(batch_name, batch_col="b01", reader=None, reader_label=None):
batch_name = batch_name
batch_col = batch_col
logger.debug(f"batch_name, batch_col: {batch_name}, {batch_col}")
if reader is None:
reader_obj = get_db_reader(reader_label)
reader = reader_obj()
srnos = reader.select_batch(batch_name, batch_col)
logger.debug("srnos:" + str(srnos))
info_dict = _create_info_dict(reader, srnos)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = _make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(create_labels)
info_df.set_index("filenames", inplace=True)
return info_df
|
Create a pandas DataFrame with the info needed for ``cellpy`` to load
the runs.
Args:
batch_name (str): Name of the batch.
batch_col (str): The column where the batch name is in the db.
reader (method): the db-loader method.
reader_label (str): the label for the db-loader (if db-loader method is
not given)
Returns: info_df (pandas DataFrame)
|
juraj-google-style
|
def remove_config(reset=False):
cmd = 'Stop-DscConfiguration'
log.info('DSC: Stopping Running Configuration')
try:
_pshell(cmd)
except CommandExecutionError as exc:
if (exc.info['retcode'] != 0):
raise CommandExecutionError('Failed to Stop DSC Configuration', info=exc.info)
log.info('DSC: %s', exc.info['stdout'])
cmd = 'Remove-DscConfigurationDocument -Stage Current, Pending, Previous -Force'
log.info('DSC: Removing Configuration')
try:
_pshell(cmd)
except CommandExecutionError as exc:
if (exc.info['retcode'] != 0):
raise CommandExecutionError('Failed to remove DSC Configuration', info=exc.info)
log.info('DSC: %s', exc.info['stdout'])
if (not reset):
return True
def _remove_fs_obj(path):
if os.path.exists(path):
log.info('DSC: Removing %s', path)
if (not __salt__['file.remove'](path)):
error = 'Failed to remove {0}'.format(path)
log.error('DSC: %s', error)
raise CommandExecutionError(error)
dsc_config_dir = '{0}\\System32\\Configuration'.format(os.getenv('SystemRoot', 'C:\\Windows'))
_remove_fs_obj('{0}\\DSCStatusHistory.mof'.format(dsc_config_dir))
_remove_fs_obj('{0}\\DSCEngineCache.mof'.format(dsc_config_dir))
_remove_fs_obj('{0}\\ConfigurationStatus'.format(dsc_config_dir))
return True
|
Remove the current DSC Configuration. Removes current, pending, and previous
dsc configurations.
.. versionadded:: 2017.7.5
Args:
reset (bool):
Attempts to reset the DSC configuration by removing the following
from ``C:\\Windows\\System32\\Configuration``:
- File: DSCStatusHistory.mof
- File: DSCEngineCache.mof
- Dir: ConfigurationStatus
Default is False
.. warning::
``remove_config`` may fail to reset the DSC environment if any
of the files in the ``ConfigurationStatus`` directory. If you
wait a few minutes and run again, it may complete successfully.
Returns:
bool: True if successful
Raises:
CommandExecutionError: On failure
CLI Example:
.. code-block:: bash
salt '*' dsc.remove_config True
|
codesearchnet
|
def json(self, json):
self._request.json = json
self.add_matcher(matcher('JSONMatcher', json))
|
Defines the JSON body to match.
``json`` argument can be an JSON string, a JSON serializable
Python structure, such as a ``dict`` or ``list`` or it can be
a regular expression used to match the body.
Arguments:
json (str|dict|list|regex): body JSON to match.
Returns:
self: current Mock instance.
|
juraj-google-style
|
def get_gpus(num_gpu=1, worker_index=-1):
list_gpus = subprocess.check_output(["nvidia-smi", "--list-gpus"]).decode()
logging.debug("all GPUs:\n{0}".format(list_gpus))
gpus = [x for x in list_gpus.split('\n') if len(x) > 0]
def parse_gpu(gpu_str):
cols = gpu_str.split(' ')
return cols[5].split(')')[0], cols[1].split(':')[0]
gpu_list = [parse_gpu(gpu) for gpu in gpus]
free_gpus = []
retries = 0
while len(free_gpus) < num_gpu and retries < MAX_RETRIES:
smi_output = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-compute-apps=gpu_uuid"]).decode()
logging.debug("busy GPUs:\n{0}".format(smi_output))
busy_uuids = [x for x in smi_output.split('\n') if len(x) > 0]
for uuid, index in gpu_list:
if uuid not in busy_uuids:
free_gpus.append(index)
if len(free_gpus) < num_gpu:
logging.warn("Unable to find available GPUs: requested={0}, available={1}".format(num_gpu, len(free_gpus)))
retries += 1
time.sleep(30 * retries)
free_gpus = []
logging.info("Available GPUs: {}".format(free_gpus))
if len(free_gpus) < num_gpu:
smi_output = subprocess.check_output(["nvidia-smi", "--format=csv", "--query-compute-apps=gpu_uuid,pid,process_name,used_gpu_memory"]).decode()
logging.info(": {0}".format(smi_output))
raise Exception("Unable to find {} free GPU(s)\n{}".format(num_gpu, smi_output))
num_available = len(free_gpus)
if worker_index == -1:
random.shuffle(free_gpus)
proposed_gpus = free_gpus[:num_gpu]
else:
if worker_index * num_gpu + num_gpu > num_available:
worker_index = worker_index * num_gpu % num_available
proposed_gpus = free_gpus[worker_index * num_gpu:(worker_index * num_gpu + num_gpu)]
logging.info("Proposed GPUs: {}".format(proposed_gpus))
return ','.join(str(x) for x in proposed_gpus)
|
Get list of free GPUs according to nvidia-smi.
This will retry for ``MAX_RETRIES`` times until the requested number of GPUs are available.
Args:
:num_gpu: number of GPUs desired.
:worker_index: index "hint" for allocation of available GPUs.
Returns:
Comma-delimited string of GPU ids, or raises an Exception if the requested number of GPUs could not be found.
|
juraj-google-style
|
def prettyprint_cfg_tree(root, decorate_after_node=0, full=False, forward=False):
if forward:
children = lambda node: node.outgoing
else:
children = lambda node: node.incoming
desc = lambda node: prettyprint_cfg_node(node, decorate_after_node, full)
return ascii_tree(root, get_children=children, get_description=desc)
|
Pretty print a cfg tree with the bindings at each node.
Args:
root: The root node.
decorate_after_node: Don't print bindings unless node_id > this.
full: Print the full string representation of a binding's data
forward: Traverse the tree forwards if true.
Returns:
A prettyprinted tree.
|
github-repos
|
def _convert_bytes_to_cc_source(data, array_name, max_line_width=80, include_guard=None, include_path=None, use_tensorflow_license=False):
starting_pad = ' '
array_lines = []
array_line = starting_pad
for value in bytearray(data):
if len(array_line) + 4 > max_line_width:
array_lines.append(array_line + '\n')
array_line = starting_pad
array_line += ' 0x%02x,' % value
if len(array_line) > len(starting_pad):
array_lines.append(array_line + '\n')
array_values = ''.join(array_lines)
if include_guard is None:
include_guard = 'TENSORFLOW_LITE_UTIL_' + array_name.upper() + '_DATA_H_'
if include_path is not None:
include_line = '
else:
include_line = ''
if use_tensorflow_license:
license_text = '\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the "License");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:
else:
license_text = ''
source_template = '{license_text}\n
source_text = source_template.format(array_name=array_name, array_length=len(data), array_values=array_values, license_text=license_text, include_line=include_line)
header_template = '\n{license_text}\n\n
header_text = header_template.format(array_name=array_name, include_guard=include_guard, license_text=license_text)
return (source_text, header_text)
|
Returns strings representing a C++ constant array containing `data`.
Args:
data: Byte array that will be converted into a C++ constant.
array_name: String to use as the variable name for the constant array.
max_line_width: The longest line length, for formatting purposes.
include_guard: Name to use for the include guard macro definition.
include_path: Optional path to include in the source file.
use_tensorflow_license: Whether to include the standard TensorFlow Apache2
license in the generated files.
Returns:
Text that can be compiled as a C++ source file to link in the data as a
literal array of values.
Text that can be used as a C++ header file to reference the literal array.
|
github-repos
|
def create_from_binary(cls, binary_view):
nw_obj = cls()
offset = 0
previous_dr_offset = 0
header_size = cls._INFO.size
while binary_view[offset] != 0:
header = cls._INFO.unpack(binary_view[offset:offset+header_size])[0]
length_len = header & 0x0F
length_offset = (header & 0xF0) >> 4
temp_len = offset+header_size+length_len
dr_length = int.from_bytes(binary_view[offset+header_size:temp_len], "little", signed=False)
if length_offset:
dr_offset = int.from_bytes(binary_view[temp_len:temp_len+length_offset], "little", signed=True) + previous_dr_offset
previous_dr_offset = dr_offset
else:
dr_offset = None
offset += header_size + length_len + length_offset
nw_obj.data_runs.append((dr_length, dr_offset))
_MOD_LOGGER.debug("DataRuns object created successfully")
return nw_obj
|
Creates a new object DataRuns from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
DataRuns: New object using hte binary stream as source
|
juraj-google-style
|
def create_nsg(access_token, subscription_id, resource_group, nsg_name, location):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/networkSecurityGroups/', nsg_name,
'?api-version=', NETWORK_API])
nsg_body = {'location': location}
body = json.dumps(nsg_body)
return do_put(endpoint, body, access_token)
|
Create network security group (use create_nsg_rule() to add rules to it).
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nsg_name (str): Name of the new NSG.
location (str): Azure data center location. E.g. westus.
Returns:
HTTP response. NSG JSON body.
|
juraj-google-style
|
def build_frontend(self, frontend_node):
proxy_name = frontend_node.frontend_header.proxy_name.text
service_address_node = frontend_node.frontend_header.service_address
config_block_lines = self.__build_config_block(frontend_node.config_block)
(host, port) = ('', '')
if isinstance(service_address_node, pegnode.ServiceAddress):
host = service_address_node.host.text
port = service_address_node.port.text
else:
for line in config_block_lines:
if isinstance(line, config.Bind):
(host, port) = (line.host, line.port)
break
else:
raise Exception('Not specify host and port in `frontend` definition')
return config.Frontend(name=proxy_name, host=host, port=port, config_block=config_block_lines)
|
parse `frontend` sections, and return a config.Frontend
Args:
frontend_node (TreeNode): Description
Raises:
Exception: Description
Returns:
config.Frontend: an object
|
codesearchnet
|
def compose(*parameter_functions):
def composed_fn(var_name, variable, phase):
for fn in parameter_functions:
variable = fn(var_name, variable, phase)
return variable
return composed_fn
|
Composes multiple modification functions in order.
Args:
*parameter_functions: The functions to compose.
Returns:
A parameter modification function that consists of applying all the provided
functions.
|
juraj-google-style
|
def get_import(self, file_prefixes_to_strip: Sequence[str], module_prefix: str, use_lazy_loading: bool) -> str:
module_import_path = _get_import_path(self.exported_symbol.file_name, file_prefixes_to_strip, module_prefix)
alias = ''
symbol_name = self.exported_symbol.symbol_name
if self.name != symbol_name:
alias = f' as {self.name}'
if not use_lazy_loading:
return f'from {module_import_path} import {symbol_name}{alias}
else:
return f" '{self.name}': ('{module_import_path}', '{symbol_name}'),
|
Returns the import statement for this entrypoint.
Args:
file_prefixes_to_strip: List of prefixes to strip from the file name.
module_prefix: A prefix to add to the import.
use_lazy_loading: Whether to use lazy loading or not.
|
github-repos
|
def extract_issuer_ca_cert_url(cert_obj):
for extension in cert_obj.extensions:
if (extension.oid.dotted_string == AUTHORITY_INFO_ACCESS_OID):
authority_info_access = extension.value
for access_description in authority_info_access:
if (access_description.access_method.dotted_string == CA_ISSUERS_OID):
return access_description.access_location.value
|
Extract issuer CA certificate URL from certificate.
Certificates may include a URL where the root certificate for the CA which was used
for signing the certificate can be downloaded. This function returns the URL if
present.
The primary use for this is to fix validation failure due to non-trusted issuer by
downloading the root CA certificate from the URL and installing it in the local
trust store.
Args:
cert_obj: cryptography.Certificate
Returns:
str: Issuer certificate URL if present, else None
|
codesearchnet
|
def WriteEvent(self, event):
self.WriteEventStart()
try:
self.WriteEventBody(event)
except errors.NoFormatterFound as exception:
error_message = 'unable to retrieve formatter with error: {0!s}'.format(exception)
self._ReportEventError(event, error_message)
except errors.WrongFormatter as exception:
error_message = 'wrong formatter with error: {0!s}'.format(exception)
self._ReportEventError(event, error_message)
self.WriteEventEnd()
|
Writes the event to the output.
Args:
event (EventObject): event.
|
codesearchnet
|
def parse_float(value: Any) -> Numeric:
return float(value)
|
Attempts to parse a valid floating point value from the provided value.
Args:
* value: of Any type
Returns:
* float value: if valid
Raises:
* ValueError: if parsing failed
|
github-repos
|
def time_to_readable_str(value_us, force_time_unit=None):
if not value_us:
return '0'
if force_time_unit:
if force_time_unit not in TIME_UNITS:
raise ValueError('Invalid time unit: %s' % force_time_unit)
order = TIME_UNITS.index(force_time_unit)
time_unit = force_time_unit
return '{:.10g}{}'.format(value_us / math.pow(10.0, 3 * order), time_unit)
else:
order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))
time_unit = TIME_UNITS[order]
return '{:.3g}{}'.format(value_us / math.pow(10.0, 3 * order), time_unit)
|
Convert time value to human-readable string.
Args:
value_us: time value in microseconds.
force_time_unit: force the output to use the specified time unit. Must be
in TIME_UNITS.
Returns:
Human-readable string representation of the time value.
Raises:
ValueError: if force_time_unit value is not in TIME_UNITS.
|
github-repos
|
def write_weights(file_path: str, weights: Array, features: typing.List[str]) -> None:
with open(file_path, 'w') as f:
f.write('\n'.join(['%s\t%.6f' % (feature, weights[i]) for i, feature in enumerate(features)]))
|
Writes learned weights and corresponsing features to a file.
Args:
file_path: A file path for the weights file.
weights: A weight vector.
features: A list of feature identifiers.
|
github-repos
|
def probe_services(self, handle, conn_id, callback):
self._command_task.async_command(['_probe_services', handle], callback, {'connection_id': conn_id, 'handle': handle})
|
Given a connected device, probe for its GATT services and characteristics
Args:
handle (int): a handle to the connection on the BLED112 dongle
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): Callback to be called when this procedure finishes
|
codesearchnet
|
def AddFilesWithUnknownHashes(client_path_blob_refs, use_external_stores=True):
hash_id_blob_refs = dict()
client_path_hash_id = dict()
metadatas = dict()
all_client_path_blob_refs = list()
for (client_path, blob_refs) in iteritems(client_path_blob_refs):
if (len(blob_refs) <= 1):
if blob_refs:
hash_id = rdf_objects.SHA256HashID.FromBytes(blob_refs[0].blob_id.AsBytes())
else:
hash_id = rdf_objects.SHA256HashID.FromData(b'')
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = blob_refs
metadatas[hash_id] = FileMetadata(client_path=client_path, blob_refs=blob_refs)
else:
for blob_ref in blob_refs:
all_client_path_blob_refs.append((client_path, blob_ref))
client_path_offset = collections.defaultdict((lambda : 0))
client_path_sha256 = collections.defaultdict(hashlib.sha256)
verified_client_path_blob_refs = collections.defaultdict(list)
client_path_blob_ref_batches = collection.Batch(items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE)
for client_path_blob_ref_batch in client_path_blob_ref_batches:
blob_id_batch = set((blob_ref.blob_id for (_, blob_ref) in client_path_blob_ref_batch))
blobs = data_store.BLOBS.ReadBlobs(blob_id_batch)
for (client_path, blob_ref) in client_path_blob_ref_batch:
blob = blobs[blob_ref.blob_id]
if (blob is None):
message = 'Could not find one of referenced blobs: {}'.format(blob_ref.blob_id)
raise BlobNotFoundError(message)
offset = client_path_offset[client_path]
if (blob_ref.size != len(blob)):
raise ValueError(('Got conflicting size information for blob %s: %d vs %d.' % (blob_ref.blob_id, blob_ref.size, len(blob))))
if (blob_ref.offset != offset):
raise ValueError(('Got conflicting offset information for blob %s: %d vs %d.' % (blob_ref.blob_id, blob_ref.offset, offset)))
verified_client_path_blob_refs[client_path].append(blob_ref)
client_path_offset[client_path] = (offset + len(blob))
client_path_sha256[client_path].update(blob)
for client_path in iterkeys(client_path_sha256):
sha256 = client_path_sha256[client_path].digest()
hash_id = rdf_objects.SHA256HashID.FromBytes(sha256)
client_path_hash_id[client_path] = hash_id
hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path]
data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs)
if use_external_stores:
for client_path in iterkeys(verified_client_path_blob_refs):
metadatas[client_path_hash_id[client_path]] = FileMetadata(client_path=client_path, blob_refs=verified_client_path_blob_refs[client_path])
EXTERNAL_FILE_STORE.AddFiles(metadatas)
return client_path_hash_id
|
Adds new files consisting of given blob references.
Args:
client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to
lists of blob references.
use_external_stores: A flag indicating if the files should also be added to
external file stores.
Returns:
A dictionary mapping `db.ClientPath` to hash ids of the file.
Raises:
BlobNotFoundError: If one of the referenced blobs cannot be found.
|
codesearchnet
|
def delete_document(project_id, knowledge_base_id, document_id):
import dialogflow_v2beta1 as dialogflow
client = dialogflow.DocumentsClient()
document_path = client.document_path(project_id, knowledge_base_id, document_id)
response = client.delete_document(document_path)
print('operation running:\n {}'.format(response.operation))
print('Waiting for results...')
print('Done.\n {}'.format(response.result()))
|
Deletes a Document.
Args:
project_id: The GCP project linked with the agent.
knowledge_base_id: Id of the Knowledge base.
document_id: Id of the Document.
|
codesearchnet
|
def __init__(self, size=3, **kwargs):
self.size = size
self.kwargs = kwargs
self._in_use = set()
self._lock = threading.Lock()
|
Initializes the pool.
Args:
size: size of pool (default 3)
**kwargs: arguments for Browser(...)
|
juraj-google-style
|
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
|
Initialize SizeCheckWrapper instance.
Args:
rfile (file): file of a limited size
maxlen (int): maximum length of the file being read
|
juraj-google-style
|
def _plot_depth_track(self, ax, md, kind='MD'):
if (kind == 'MD'):
ax.set_yscale('bounded', vmin=md.min(), vmax=md.max())
elif (kind == 'TVD'):
tvd = self.location.md2tvd(md)
ax.set_yscale('piecewise', x=tvd, y=md)
else:
raise Exception('Kind must be MD or TVD')
for sp in ax.spines.values():
sp.set_color('gray')
if ax.is_first_col():
pad = (- 10)
ax.spines['left'].set_color('none')
ax.yaxis.set_ticks_position('right')
for label in ax.get_yticklabels():
label.set_horizontalalignment('right')
elif ax.is_last_col():
pad = (- 10)
ax.spines['right'].set_color('none')
ax.yaxis.set_ticks_position('left')
for label in ax.get_yticklabels():
label.set_horizontalalignment('left')
else:
pad = (- 30)
for label in ax.get_yticklabels():
label.set_horizontalalignment('center')
ax.tick_params(axis='y', colors='gray', labelsize=12, pad=pad)
ax.set_xticks([])
ax.set(xticks=[])
ax.depth_track = True
return ax
|
Private function. Depth track plotting.
Args:
ax (ax): A matplotlib axis.
md (ndarray): The measured depths of the track.
kind (str): The kind of track to plot.
Returns:
ax.
|
codesearchnet
|
class QuantileThreshold(ThresholdFn):
def __init__(self, quantile: Optional[float]=0.95, quantile_tracker: Optional[QuantileTracker]=None, **kwargs):
super().__init__(**kwargs)
if quantile_tracker is not None:
self._tracker = quantile_tracker
else:
self._tracker = BufferedSlidingQuantileTracker(window_size=100, q=quantile)
@property
def is_stateful(self) -> bool:
return True
@property
def threshold(self) -> float:
return self._tracker.get()
def apply(self, score: Optional[float]) -> Optional[int]:
if score is None:
self._tracker.push(float('NaN'))
return None
self._tracker.push(score)
if math.isnan(score):
return self._missing_label
if score < self.threshold:
return self._normal_label
return self._outlier_label
|
Applies a quantile-based dynamic threshold to anomaly scores.
This `ThresholdFn` is stateful and uses a quantile tracker to dynamically
determine the threshold for anomaly detection. It estimates the specified
quantile of the incoming anomaly scores and uses this quantile value as the
threshold.
The threshold adapts over time as more data is processed, making it suitable
for scenarios where the distribution of anomaly scores may change.
Args:
quantile (Optional[float]): The quantile to be tracked (e.g., 0.95 for the
95th percentile). This value determines the dynamic threshold. Defaults to
0.95.
quantile_tracker (Optional[BufferedQuantileTracker]): An optional
pre-initialized quantile tracker. If provided, this tracker will be used;
otherwise, a `BufferedSlidingQuantileTracker` will be created with a
default window size of 100.
**kwargs: Additional keyword arguments to be passed to the base
`ThresholdFn` constructor.
|
github-repos
|
def _ReadFloatingPointDataTypeDefinition(
self, definitions_registry, definition_values, definition_name,
is_member=False):
return self._ReadFixedSizeDataTypeDefinition(
definitions_registry, definition_values,
data_types.FloatingPointDefinition, definition_name,
self._SUPPORTED_ATTRIBUTES_FIXED_SIZE_DATA_TYPE,
is_member=is_member, supported_size_values=(4, 8))
|
Reads a floating-point data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
FloatingPointDefinition: floating-point data type definition.
|
juraj-google-style
|
def create_context(pip_version=None, python_version=None):
if pip_version:
pip_req = "pip-%s" % str(pip_version)
else:
pip_req = "pip"
if python_version:
ver = Version(str(python_version))
major_minor_ver = ver.trim(2)
py_req = "python-%s" % str(major_minor_ver)
else:
package = get_latest_package("python")
if package:
major_minor_ver = package.version.trim(2)
else:
major_minor_ver = '.'.join(map(str, sys.version_info[:2]))
py_req = "python-%s" % str(major_minor_ver)
request = [pip_req, py_req]
with convert_errors(from_=(PackageFamilyNotFoundError, PackageNotFoundError),
to=BuildError, msg="Cannot run - pip or python rez "
"package is not present"):
context = ResolvedContext(request)
pip_variant = context.get_resolved_package("pip")
pip_package = pip_variant.parent
print_info("Using %s (%s)" % (pip_package.qualified_name, pip_variant.uri))
return context
|
Create a context containing the specific pip and python.
Args:
pip_version (str or `Version`): Version of pip to use, or latest if None.
python_version (str or `Version`): Python version to use, or latest if
None.
Returns:
`ResolvedContext`: Context containing pip and python.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.