code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def sget_voltage(self, cycle, step, set_number=None):
time_00 = time.time()
set_number = self._validate_dataset_number(set_number)
if set_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
voltage_header = self.headers_normal.voltage_txt
step_index_header = self.headers_normal.step_index_txt
test = self.datasets[set_number].dfdata
if isinstance(step, (list, tuple)):
warnings.warn(f"The varialbe step is a list."
f"Should be an integer."
f"{step}")
step = step[0]
c = test[(test[cycle_index_header] == cycle) &
(test[step_index_header] == step)]
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
if not self.is_empty(c):
v = c[voltage_header]
return v
else:
return None
|
Returns voltage for cycle, step.
Convinience function; same as issuing
dfdata[(dfdata[cycle_index_header] == cycle) &
(dfdata[step_index_header] == step)][voltage_header]
Args:
cycle: cycle number
step: step number
set_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty
|
juraj-google-style
|
def add_constant(self, stream, value):
if stream in self.constant_database:
raise ArgumentError("Attempted to set the same constant twice", stream=stream, old_value=self.constant_database[stream], new_value=value)
self.constant_database[stream] = value
|
Store a constant value for use in this sensor graph.
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers.
Args:
stream (DataStream): The constant stream to assign the value to
value (int): The value to assign.
|
juraj-google-style
|
def GrabObject(self, identifier):
if identifier not in self._values:
raise KeyError('Missing cached object for identifier: {0:s}'.format(
identifier))
cache_value = self._values[identifier]
if not cache_value:
raise RuntimeError('Missing cache value for identifier: {0:s}'.format(
identifier))
cache_value.IncrementReferenceCount()
|
Grabs a cached object based on the identifier.
This method increments the cache value reference count.
Args:
identifier (str): VFS object identifier.
Raises:
KeyError: if the VFS object is not found in the cache.
RuntimeError: if the cache value is missing.
|
juraj-google-style
|
def get_average_record(self, n):
history_deque = collections.deque()
averages = []
for d in self.data_points:
history_deque.appendleft(d)
if (len(history_deque) > n):
history_deque.pop()
avg = (sum(history_deque) / len(history_deque))
averages.append(round(avg, self.lr))
return averages
|
Returns a list of average current numbers, each representing the
average over the last n data points.
Args:
n: Number of data points to average over.
Returns:
A list of average current values.
|
codesearchnet
|
class PatchTSMixerPatchify(nn.Module):
def __init__(self, config: PatchTSMixerConfig):
super().__init__()
self.sequence_length = config.context_length
self.patch_length = config.patch_length
self.patch_stride = config.patch_stride
if self.sequence_length <= self.patch_length:
raise ValueError(f'Sequence length ({self.sequence_length}) has to be greater than the patch length ({self.patch_length})')
self.num_patches = (max(self.sequence_length, self.patch_length) - self.patch_length)
new_sequence_length = self.patch_length + self.patch_stride * (self.num_patches - 1)
self.sequence_start = self.sequence_length - new_sequence_length
def forward(self, past_values: torch.Tensor):
sequence_length = past_values.shape[-2]
if sequence_length != self.sequence_length:
raise ValueError(f"Input sequence length ({sequence_length}) doesn't match model configuration ({self.sequence_length}).")
output = past_values[:, self.sequence_start:, :]
output = output.unfold(dimension=-2, size=self.patch_length, step=self.patch_stride)
output = output.transpose(-2, -3).contiguous()
return output
|
A class to patchify the time series sequence into different patches
Returns:
`torch.Tensor` of shape `(batch_size, num_channels, num_patches, patch_length)`
|
github-repos
|
def get_data_xls(file_name, file_contents=None, on_demand=False):
def tuple_to_iso_date(tuple_date):
"\n Turns a gregorian (year, month, day, hour, minute, nearest_second) into a\n standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's\n assumed to be a time; if the time part is all zeros it's assumed to be a date;\n if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).\n\n Note that datetimes of midnight will come back as date-only strings. A date\n of month=0 and day=0 is meaningless, so that part of the coercion is safe.\n For more on the hairy nature of Excel date/times see\n http:
(y, m, d, hh, mm, ss) = tuple_date
non_zero = (lambda n: (n != 0))
date = (('%04d-%02d-%02d' % (y, m, d)) if list(filter(non_zero, (y, m, d))) else '')
time = (('T%02d:%02d:%02d' % (hh, mm, ss)) if (list(filter(non_zero, (hh, mm, ss))) or (not date)) else '')
return (date + time)
def format_excel_val(book, val_type, value, want_tuple_date):
'Cleans up the incoming excel data'
if (val_type == 2):
if (value == int(value)):
value = int(value)
elif (val_type == 3):
datetuple = xlrd.xldate_as_tuple(value, book.datemode)
value = (datetuple if want_tuple_date else tuple_to_iso_date(datetuple))
elif (val_type == 5):
value = xlrd.error_text_from_code[value]
return value
def xlrd_xsl_to_array(file_name, file_contents=None):
'\n Returns:\n A list of 2-D tables holding the converted cells of each sheet\n '
book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand)
formatter = (lambda t_v: format_excel_val(book, t_v[0], t_v[1], False))
row_builder = (lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r)))))
data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)]
if (not on_demand):
for sheet in data:
sheet.load()
book.release_resources()
return data
return xlrd_xsl_to_array(file_name, file_contents)
|
Loads the old excel format files. New format files will automatically
get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
|
codesearchnet
|
def IsDefault(self):
if ((not self._tsk_attribute) or (not self._file_system)):
return True
if self._file_system.IsHFS():
attribute_type = getattr(self._tsk_attribute.info, 'type', None)
return (attribute_type in (pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT, pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA))
if self._file_system.IsNTFS():
return (not bool(self.name))
return True
|
Determines if the data stream is the default data stream.
Returns:
bool: True if the data stream is the default data stream, false if not.
|
codesearchnet
|
def _GetProcessedStorageFilePath(self, task):
filename = '{0:s}.plaso'.format(task.identifier)
return os.path.join(self._processed_task_storage_path, filename)
|
Retrieves the path of a task storage file in the processed directory.
Args:
task (Task): task.
Returns:
str: path of a task storage file in the processed directory.
|
codesearchnet
|
def _process_worker(call_queue, result_queue, initializer, initargs, processes_management_lock, timeout, worker_exit_lock, current_depth):
if (initializer is not None):
try:
initializer(*initargs)
except BaseException:
_base.LOGGER.critical('Exception in initializer:', exc_info=True)
return
global _CURRENT_DEPTH
_CURRENT_DEPTH = current_depth
_process_reference_size = None
_last_memory_leak_check = None
pid = os.getpid()
mp.util.debug(('Worker started with timeout=%s' % timeout))
while True:
try:
call_item = call_queue.get(block=True, timeout=timeout)
if (call_item is None):
mp.util.info('Shutting down worker on sentinel')
except queue.Empty:
mp.util.info(('Shutting down worker after timeout %0.3fs' % timeout))
if processes_management_lock.acquire(block=False):
processes_management_lock.release()
call_item = None
else:
mp.util.info('Could not acquire processes_management_lock')
continue
except BaseException as e:
previous_tb = traceback.format_exc()
try:
result_queue.put(_RemoteTraceback(previous_tb))
except BaseException:
print(previous_tb)
sys.exit(1)
if (call_item is None):
result_queue.put(pid)
with worker_exit_lock:
return
try:
r = call_item()
except BaseException as e:
exc = _ExceptionWithTraceback(e)
result_queue.put(_ResultItem(call_item.work_id, exception=exc))
else:
_sendback_result(result_queue, call_item.work_id, result=r)
del r
del call_item
if _USE_PSUTIL:
if (_process_reference_size is None):
_process_reference_size = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
continue
if ((time() - _last_memory_leak_check) > _MEMORY_LEAK_CHECK_DELAY):
mem_usage = _get_memory_usage(pid)
_last_memory_leak_check = time()
if ((mem_usage - _process_reference_size) < _MAX_MEMORY_LEAK_SIZE):
continue
mem_usage = _get_memory_usage(pid, force_gc=True)
_last_memory_leak_check = time()
if ((mem_usage - _process_reference_size) < _MAX_MEMORY_LEAK_SIZE):
continue
mp.util.info('Memory leak detected: shutting down worker')
result_queue.put(pid)
with worker_exit_lock:
return
elif ((_last_memory_leak_check is None) or ((time() - _last_memory_leak_check) > _MEMORY_LEAK_CHECK_DELAY)):
gc.collect()
_last_memory_leak_check = time()
|
Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A ctx.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A ctx.Queue of _ResultItems that will written
to by the worker.
initializer: A callable initializer, or None
initargs: A tuple of args for the initializer
process_management_lock: A ctx.Lock avoiding worker timeout while some
workers are being spawned.
timeout: maximum time to wait for a new item in the call_queue. If that
time is expired, the worker will shutdown.
worker_exit_lock: Lock to avoid flagging the executor as broken on
workers timeout.
current_depth: Nested parallelism level, to avoid infinite spawning.
|
codesearchnet
|
def compose_path(pub, uuid_url=False):
if uuid_url:
return join(
"/",
UUID_DOWNLOAD_KEY,
str(pub.uuid)
)
return join(
"/",
DOWNLOAD_KEY,
basename(pub.file_pointer),
basename(pub.filename)
)
|
Compose absolute path for given `pub`.
Args:
pub (obj): :class:`.DBPublication` instance.
uuid_url (bool, default False): Compose URL using UUID.
Returns:
str: Absolute url-path of the publication, without server's address \
and protocol.
Raises:
PrivatePublicationError: When the `pub` is private publication.
|
juraj-google-style
|
def ExpandSubClasses(self, t):
queue = [t]
seen = set()
while queue:
item = queue.pop()
if item not in seen:
seen.add(item)
queue.extend(self._subclasses[item])
return seen
|
Generate a set of all (known) subclasses for a type.
Arguments:
t: A type. E.g. NamedType("int").
Returns:
A set of types. This set includes t as well as all its subclasses. For
example, this will return "int" and "bool" for "int".
|
github-repos
|
def victim(self, name, owner=None, **kwargs):
return Victim(self.tcex, name, owner=owner, **kwargs)
|
Create the Victim TI object.
Args:
owner:
name:
**kwargs:
Return:
|
codesearchnet
|
def rh45(msg):
d = hex2bin(data(msg))
if (d[38] == '0'):
return None
rh = (bin2int(d[39:51]) * 16)
return rh
|
Radio height.
Args:
msg (String): 28 bytes hexadecimal message string
Returns:
int: radio height in ft
|
codesearchnet
|
async def invoke(self, context):
try:
tasks = (await self._run_cancellable(claim_work(context)))
if ((not tasks) or (not tasks.get('tasks', []))):
(await self._run_cancellable(asyncio.sleep(context.config['poll_interval'])))
return None
status = None
for task_defn in tasks.get('tasks', []):
prepare_to_run_task(context, task_defn)
reclaim_fut = context.event_loop.create_task(reclaim_task(context, context.task))
try:
status = (await do_run_task(context, self._run_cancellable, self._to_cancellable_process))
artifacts_paths = filepaths_in_dir(context.config['artifact_dir'])
except WorkerShutdownDuringTask:
shutdown_artifact_paths = [os.path.join('public', 'logs', log_file) for log_file in ['chain_of_trust.log', 'live_backing.log']]
artifacts_paths = [path for path in shutdown_artifact_paths if os.path.isfile(os.path.join(context.config['artifact_dir'], path))]
status = STATUSES['worker-shutdown']
status = worst_level(status, (await do_upload(context, artifacts_paths)))
(await complete_task(context, status))
reclaim_fut.cancel()
cleanup(context)
return status
except asyncio.CancelledError:
return None
|
Claims and processes Taskcluster work.
Args:
context (scriptworker.context.Context): context of worker
Returns: status code of build
|
codesearchnet
|
def event_stream(self, from_token, timeout=30000):
warnings.warn("event_stream is deprecated. Use sync instead.",
DeprecationWarning)
path = "/events"
return self._send(
"GET", path, query_params={
"timeout": timeout,
"from": from_token
}
)
|
Deprecated. Use sync instead.
Performs /events
Args:
from_token (str): The 'from' query parameter.
timeout (int): Optional. The 'timeout' query parameter.
|
juraj-google-style
|
def get_nested_dmaps(dmap):
if not isinstance(dmap, DynamicMap):
return []
dmaps = [dmap]
for o in dmap.callback.inputs:
dmaps.extend(get_nested_dmaps(o))
return list(set(dmaps))
|
Recurses DynamicMap to find DynamicMaps inputs
Args:
dmap: DynamicMap to recurse to look for DynamicMap inputs
Returns:
List of DynamicMap instances that were found
|
juraj-google-style
|
def __init__(self, weight_shape: Sequence[int], same_scale_op: str) -> None:
self.filters = np.random.uniform(low=-1.0, high=1.0, size=weight_shape)
self.same_scale_op = same_scale_op
|
Initializes a MatmulModel.
Args:
weight_shape: Shape of the weight tensor.
same_scale_op: Name of the same-scale op to be tested. Raises error
when an unknown name is given.
|
github-repos
|
def extract_all_content(
self,
path=None,
payload=None,
objectInput=None,
pretty_print=False,
convert_to_obj=False,
):
f = file_path(path, payload, objectInput)
switches = ["-J", "-t", "-r", f]
if not pretty_print:
switches.remove("-r")
result = self._command_template(switches)
if result and convert_to_obj:
result = json.loads(result, encoding="utf-8")
return result, path, f
|
This function returns a JSON of all contents and
metadata of passed file
Args:
path (string): Path of file to analyze
payload (string): Payload base64 to analyze
objectInput (object): file object/standard input to analyze
pretty_print (boolean): If True adds newlines and whitespace,
for better readability
convert_to_obj (boolean): If True convert JSON in object
|
juraj-google-style
|
def process_layer(layer_data):
layer_name = layer_data['name']
if 'module' not in layer_data:
layer = saving_utils.model_from_config(layer_data, custom_objects=custom_objects)
else:
layer = serialization_lib.deserialize_keras_object(layer_data, custom_objects=custom_objects)
if not isinstance(layer, Operation):
raise ValueError(f'Unexpected object from deserialization, expected a layer or operation, got a {type(layer)}')
created_layers[layer_name] = layer
inbound_nodes_data = layer_data['inbound_nodes']
for node_data in inbound_nodes_data:
add_unprocessed_node(layer, node_data)
|
Deserializes a layer and index its inbound nodes.
Args:
layer_data: layer config dict.
|
github-repos
|
def node_info(self, args, screen_info=None):
_ = screen_info
parsed = self._arg_parsers['node_info'].parse_args(args)
node_name, unused_slot = debug_graphs.parse_node_or_tensor_name(parsed.node_name)
if not self._debug_dump.node_exists(node_name):
output = cli_shared.error('There is no node named "%s" in the partition graphs' % node_name)
_add_main_menu(output, node_name=None, enable_list_tensors=True, enable_node_info=False, enable_list_inputs=False, enable_list_outputs=False)
return output
lines = ['Node %s' % node_name]
font_attr_segs = {0: [(len(lines[-1]) - len(node_name), len(lines[-1]), 'bold')]}
lines.append('')
lines.append(' Op: %s' % self._debug_dump.node_op_type(node_name))
lines.append(' Device: %s' % self._debug_dump.node_device(node_name))
output = debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)
inputs = self._exclude_denylisted_ops(self._debug_dump.node_inputs(node_name))
ctrl_inputs = self._exclude_denylisted_ops(self._debug_dump.node_inputs(node_name, is_control=True))
output.extend(self._format_neighbors('input', inputs, ctrl_inputs))
recs = self._exclude_denylisted_ops(self._debug_dump.node_recipients(node_name))
ctrl_recs = self._exclude_denylisted_ops(self._debug_dump.node_recipients(node_name, is_control=True))
output.extend(self._format_neighbors('recipient', recs, ctrl_recs))
if parsed.attributes:
output.extend(self._list_node_attributes(node_name))
if parsed.dumps:
output.extend(self._list_node_dumps(node_name))
if parsed.traceback:
output.extend(self._render_node_traceback(node_name))
_add_main_menu(output, node_name=node_name, enable_node_info=False)
return output
|
Command handler for node_info.
Query information about a given node.
Args:
args: Command-line arguments, excluding the command prefix, as a list of
str.
screen_info: Optional dict input containing screen information such as
cols.
Returns:
Output text lines as a RichTextLines object.
|
github-repos
|
def _example_from_complex_def(self, prop_spec):
if 'schema' not in prop_spec:
return [{}]
elif 'type' not in prop_spec['schema']:
definition_name = self.get_definition_name_from_ref(prop_spec['schema']['$ref'])
if self.build_one_definition_example(definition_name):
return self.definitions_example[definition_name]
elif prop_spec['schema']['type'] == 'array':
if 'items' in prop_spec.keys():
definition_name = self.get_definition_name_from_ref(prop_spec['items']['$ref'])
else:
if '$ref' in prop_spec['schema']['items']:
definition_name = self.get_definition_name_from_ref(prop_spec['schema']['items']['$ref'])
else:
definition_name = self.get_definition_name_from_ref(prop_spec['schema']['items']['type'])
return [definition_name]
return [self.definitions_example[definition_name]]
else:
return self.get_example_from_prop_spec(prop_spec['schema'])
|
Get an example from a property specification.
In case there is no "type" key in the root of the dictionary.
Args:
prop_spec: property specification you want an example of.
Returns:
An example.
|
juraj-google-style
|
def _load_config_section(self, section_name):
if self._config.has_section(section_name):
section = dict(self._config.items(section_name))
elif self._config.has_section('Default'):
section = dict(self._config.items('Default'))
else:
raise KeyError(("'{}' was not found in the configuration file and no default " + 'configuration was provided.').format(section_name))
if (('protocol' in section) and ('host' in section) and ('token' in section)):
return section
else:
raise KeyError(('Missing values in configuration data. ' + 'Must contain: protocol, host, token'))
|
Method to load the specific Service section from the config file if it
exists, or fall back to the default
Args:
section_name (str): The desired service section name
Returns:
(dict): the section parameters
|
codesearchnet
|
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields):
if remove_all_metadata_fields:
for df in concated_meta_dfs:
df.drop(df.columns, axis=1, inplace=True)
all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0)
n_rows = all_concated_meta_df.shape[0]
logger.debug('all_concated_meta_df.shape[0]: {}'.format(n_rows))
n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs])
assert (n_rows == n_rows_cumulative)
all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1)
return all_concated_meta_df_sorted
|
Assemble the concatenated metadata dfs together. For example,
if horizontally concatenating, the concatenated metadata dfs are the
column metadata dfs. Both indices are sorted.
Args:
concated_meta_dfs (list of pandas dfs)
Returns:
all_concated_meta_df_sorted (pandas df)
|
codesearchnet
|
def inspect_distribution(self, image, auth_config=None):
(registry, _) = auth.resolve_repository_name(image)
headers = {}
if (auth_config is None):
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
url = self._url('/distribution/{0}/json', image)
return self._result(self._get(url, headers=headers), True)
|
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def parameterized_codec(raw, b64):
if isinstance(raw, bytes):
raw = raw.decode('utf-8')
result = _parameterize_string(raw)
return (Base64(result.data) if b64 else result)
|
Parameterize a string, possibly encoding it as Base64 afterwards
Args:
raw (`str` | `bytes`): String to be processed. Byte strings will be
interpreted as UTF-8.
b64 (`bool`): Whether to wrap the output in a Base64 CloudFormation
call
Returns:
:class:`troposphere.AWSHelperFn`: output to be included in a
CloudFormation template.
|
codesearchnet
|
def backward(ctx, grad_at_output: torch.Tensor):
multiplier, selected_experts, masked_gates = ctx.saved_tensors
grad_at_output = grad_at_output * multiplier
grad_at_scores_expanded = masked_gates * grad_at_output.mul(-1)
grad_at_scores_expanded.scatter_add_(dim=-1, index=selected_experts, src=grad_at_output)
return (grad_at_scores_expanded, None, None, None, None)
|
Backward pass for the custom autograd function.
Args:
ctx: Context object with saved tensors from the forward pass.
grad_at_output (torch.Tensor): Gradient at the output.
Returns:
Tuple[torch.Tensor, None, None, None, None]: Gradients for the inputs.
|
github-repos
|
def get_container_details(self, container_id_or_name: str) -> dict:
container = self._client.containers.get(container_id_or_name)
return container.attrs
|
Get details of a container.
Args:
container_id_or_name (string): docker container id or name
Returns:
dict, details of the container
|
juraj-google-style
|
def validate(obj, schema):
if isinstance(obj, str):
obj = json.loads(obj)
return JsonValidator(schema)._validate(obj)
|
Validate an object against a schema
Args:
obj (dict):
schema (dict):
|
juraj-google-style
|
def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error):
line = clean_lines.elided[linenum]
if ('&' not in line):
return
if IsDerivedFunction(clean_lines, linenum):
return
if IsOutOfLineMethodDefinition(clean_lines, linenum):
return
if (linenum > 1):
previous = None
if Match('\\s*::(?:[\\w<>]|::)+\\s*&\\s*\\S', line):
previous = Search('\\b((?:const\\s*)?(?:[\\w<>]|::)+[\\w<>])\\s*$', clean_lines.elided[(linenum - 1)])
elif Match('\\s*[a-zA-Z_]([\\w<>]|::)+\\s*&\\s*\\S', line):
previous = Search('\\b((?:const\\s*)?(?:[\\w<>]|::)+::)\\s*$', clean_lines.elided[(linenum - 1)])
if previous:
line = (previous.group(1) + line.lstrip())
else:
endpos = line.rfind('>')
if (endpos > (- 1)):
(_, startline, startpos) = ReverseCloseExpression(clean_lines, linenum, endpos)
if ((startpos > (- 1)) and (startline < linenum)):
line = ''
for i in xrange(startline, (linenum + 1)):
line += clean_lines.elided[i].strip()
if (nesting_state.previous_stack_top and (not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo)))):
return
if (linenum > 0):
for i in xrange((linenum - 1), max(0, (linenum - 10)), (- 1)):
previous_line = clean_lines.elided[i]
if (not Search('[),]\\s*$', previous_line)):
break
if Match('^\\s*:\\s+\\S', previous_line):
return
if Search('\\\\\\s*$', line):
return
if IsInitializerList(clean_lines, linenum):
return
whitelisted_functions = '(?:[sS]wap(?:<\\w:+>)?|operator\\s*[<>][<>]|static_assert|COMPILE_ASSERT)\\s*\\('
if Search(whitelisted_functions, line):
return
elif (not Search('\\S+\\([^)]*$', line)):
for i in xrange(2):
if ((linenum > i) and Search(whitelisted_functions, clean_lines.elided[((linenum - i) - 1)])):
return
decls = ReplaceAll('{[^}]*}', ' ', line)
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if ((not Match(_RE_PATTERN_CONST_REF_PARAM, parameter)) and (not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter))):
error(filename, linenum, 'runtime/references', 2, ('Is this a non-const reference? If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)))
|
Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
|
codesearchnet
|
def call(self, input_ids: TFModelInputType | None=None, attention_mask: np.ndarray | tf.Tensor | None=None, token_type_ids: np.ndarray | tf.Tensor | None=None, position_ids: np.ndarray | tf.Tensor | None=None, head_mask: np.ndarray | tf.Tensor | None=None, inputs_embeds: np.ndarray | tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=False) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
outputs = self.tapas(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training)
return outputs
|
Returns:
Examples:
```python
>>> from transformers import AutoTokenizer, TapasModel
>>> import pandas as pd
>>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
>>> model = TapasModel.from_pretrained("google/tapas-base")
>>> data = {
... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
... "Age": ["56", "45", "59"],
... "Number of movies": ["87", "53", "69"],
... }
>>> table = pd.DataFrame.from_dict(data)
>>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
>>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
|
github-repos
|
def validate(self):
if (not isinstance(self.value, bytes)):
raise TypeError('secret value must be bytes')
elif (not isinstance(self.data_type, enums.SecretDataType)):
raise TypeError('secret data type must be a SecretDataType enumeration')
mask_count = len(self.cryptographic_usage_masks)
for i in range(mask_count):
mask = self.cryptographic_usage_masks[i]
if (not isinstance(mask, enums.CryptographicUsageMask)):
position = '({0} in list)'.format(i)
raise TypeError('secret data mask {0} must be a CryptographicUsageMask enumeration'.format(position))
name_count = len(self.names)
for i in range(name_count):
name = self.names[i]
if (not isinstance(name, six.string_types)):
position = '({0} in list)'.format(i)
raise TypeError('secret data name {0} must be a string'.format(position))
|
Verify that the contents of the SecretData object are valid.
Raises:
TypeError: if the types of any SecretData attributes are invalid.
|
codesearchnet
|
def intersection(self, other):
if not hasattr(other, "__iter__"):
other = [other]
bounds = self.bounds
for range in other:
bounds = self._intersection(bounds, range.bounds)
if not bounds:
return None
range = VersionRange(None)
range.bounds = bounds
return range
|
AND together version ranges.
Calculates the intersection of this range with one or more other ranges.
Args:
other: VersionRange object (or list of) to AND with.
Returns:
New VersionRange object representing the intersection, or None if
no ranges intersect.
|
juraj-google-style
|
def get_version(
here_path,
default_version=DEFAULT_VERSION,
):
if 'site-packages' in here_path:
return _version_from_file(here_path)
if os.environ.get('TRAVIS_TAG'):
if not TEST_MODE:
return os.environ.get('TRAVIS_TAG').replace('v', '')
else:
warnings.warn(
'Travis detected, but TEST_MODE enabled',
exceptions.ProsperVersionTestModeWarning)
try:
current_tag = _read_git_tags(default_version=default_version)
except Exception:
return _version_from_file(here_path)
with open(os.path.join(here_path, 'version.txt'), 'w') as v_fh:
v_fh.write(current_tag)
return current_tag
|
tries to resolve version number
Args:
here_path (str): path to project local dir
default_version (str): what version to return if all else fails
Returns:
str: semantic_version information for library
|
juraj-google-style
|
def get_oxi_state_decorated_structure(self, structure):
s = structure.copy()
if s.is_ordered:
valences = self.get_valences(s)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(s)
s = add_oxidation_state_by_site_fraction(s, valences)
return s
|
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
|
codesearchnet
|
def restore_component(self, component_name, save_path):
component = self.get_component(component_name=component_name)
self._validate_savable(component=component, component_name=component_name)
component.restore(sess=self.session, save_path=save_path)
|
Restores a component's parameters from a save location.
Args:
component_name: The component to restore.
save_path: The save location.
|
juraj-google-style
|
def rest_error(self):
error_json = self.__format_error('errors')
return json.dumps(error_json, indent=1, sort_keys=True)
|
Format this error into a response to a REST request.
Returns:
A string containing the reformatted error response.
|
codesearchnet
|
def InitPrivateKey(self):
if self.private_key:
try:
self.common_name = rdf_client.ClientURN.FromPrivateKey(self.private_key)
logging.info('Starting client %s', self.common_name)
return self.private_key
except type_info.TypeValueError:
pass
key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=config.CONFIG['Client.rsa_key_length'])
self.common_name = rdf_client.ClientURN.FromPrivateKey(key)
logging.info('Client pending enrolment %s', self.common_name)
self.SavePrivateKey(key)
return key
|
Makes sure this client has a private key set.
It first tries to load an RSA key from the certificate.
If no certificate is found, or it is invalid, we make a new random RSA key,
and store it as our certificate.
Returns:
An RSA key - either from the certificate or a new random key.
|
codesearchnet
|
def create_leaflet_viewer(self, idaho_image_results, filename):
description = self.describe_images(idaho_image_results)
if (len(description) > 0):
functionstring = ''
for (catid, images) in description.items():
for (partnum, part) in images['parts'].items():
num_images = len(list(part.keys()))
partname = None
if (num_images == 1):
partname = [p for p in list(part.keys())][0]
pan_image_id = ''
elif (num_images == 2):
partname = [p for p in list(part.keys()) if (p is not 'PAN')][0]
pan_image_id = part['PAN']['id']
if (not partname):
self.logger.debug('Cannot find part for idaho image.')
continue
bandstr = {'RGBN': '0,1,2', 'WORLDVIEW_8_BAND': '4,2,1', 'PAN': '0'}.get(partname, '0,1,2')
part_boundstr_wkt = part[partname]['boundstr']
part_polygon = from_wkt(part_boundstr_wkt)
bucketname = part[partname]['bucket']
image_id = part[partname]['id']
(W, S, E, N) = part_polygon.bounds
functionstring += ("addLayerToMap('%s','%s',%s,%s,%s,%s,'%s');\n" % (bucketname, image_id, W, S, E, N, pan_image_id))
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
try:
with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile:
data = htmlfile.read().decode('utf8')
except AttributeError:
with open(os.path.join(__location__, 'leafletmap_template.html'), 'r') as htmlfile:
data = htmlfile.read()
data = data.replace('FUNCTIONSTRING', functionstring)
data = data.replace('CENTERLAT', str(S))
data = data.replace('CENTERLON', str(W))
data = data.replace('BANDS', bandstr)
data = data.replace('TOKEN', self.gbdx_connection.access_token)
with codecs.open(filename, 'w', 'utf8') as outputfile:
self.logger.debug(('Saving %s' % filename))
outputfile.write(data)
else:
print('No items returned.')
|
Create a leaflet viewer html file for viewing idaho images.
Args:
idaho_image_results (dict): IDAHO image result set as returned from
the catalog.
filename (str): Where to save output html file.
|
codesearchnet
|
def _infer_graph(self, inputs, clusters):
assert isinstance(inputs, list)
scores = self._distance_graph(inputs, clusters, self._distance_metric)
output = []
if self._distance_metric == COSINE_DISTANCE and (not self._clusters_l2_normalized()):
with ops.colocate_with(clusters, ignore_existing=True):
clusters = nn_impl.l2_normalize(clusters, axis=1)
for inp, score in zip(inputs, scores):
with ops.colocate_with(inp, ignore_existing=True):
indices, distances = gen_clustering_ops.nearest_neighbors(inp, clusters, 1)
if self._distance_metric == COSINE_DISTANCE:
distances *= 0.5
output.append((score, array_ops.squeeze(distances, [-1]), array_ops.squeeze(indices, [-1])))
return zip(*output)
|
Maps input to closest cluster and the score.
Args:
inputs: list of input Tensors.
clusters: Tensor of cluster centers.
Returns:
List of tuple, where each value in tuple corresponds to a value in inp.
The tuple has following three elements:
all_scores: distance of each input to each cluster center.
score: distance of each input to closest cluster center.
cluster_idx: index of cluster center closest to the corresponding input.
|
github-repos
|
class SimpleSlidingQuantileTracker(WindowedTracker, QuantileTracker):
def __init__(self, window_size, q):
super().__init__(window_mode=WindowMode.SLIDING, window_size=window_size)
QuantileTracker.__init__(self, q)
def get(self):
with warnings.catch_warnings(record=False):
warnings.simplefilter('ignore')
return np.nanquantile(self._queue, self._q)
|
Sliding window quantile tracker using NumPy.
This tracker uses NumPy's `nanquantile` function to calculate the specified
quantile of the values currently in the sliding window. It's a simple,
non-incremental approach.
Args:
window_size: The size of the sliding window.
q: The quantile to calculate, a float between 0 and 1 (inclusive).
|
github-repos
|
def removeTags(dom):
try:
string_type = basestring
except NameError:
string_type = str
element_stack = None
if type(dom) in [list, tuple]:
element_stack = dom
elif isinstance(dom, HTMLElement):
element_stack = dom.childs if dom.isTag() else [dom]
elif isinstance(dom, string_type):
element_stack = parseString(dom).childs
else:
element_stack = dom
output = ""
while element_stack:
el = element_stack.pop(0)
if not (el.isTag() or el.isComment() or not el.getTagName()):
output += el.__str__()
if el.childs:
element_stack = el.childs + element_stack
return output
|
Remove all tags from `dom` and obtain plaintext representation.
Args:
dom (str, obj, array): str, HTMLElement instance or array of elements.
Returns:
str: Plain string without tags.
|
juraj-google-style
|
def radar_xsect(scatterer, h_pol=True):
Z = scatterer.get_Z()
if h_pol:
return 2 * np.pi * \
(Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])
else:
return 2 * np.pi * \
(Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])
|
Radar cross section for the current setup.
Args:
scatterer: a Scatterer instance.
h_pol: If True (default), use horizontal polarization.
If False, use vertical polarization.
Returns:
The radar cross section.
|
juraj-google-style
|
def _status(self):
job_id_list = ' '.join(self.resources.keys())
jobs_missing = list(self.resources.keys())
retcode, stdout, stderr = self.channel.execute_wait("qstat {0}".format(job_id_list), 3)
for line in stdout.split('\n'):
parts = line.split()
if not parts or parts[0].upper().startswith('JOB') or parts[0].startswith('---'):
continue
job_id = parts[0]
status = translate_table.get(parts[4], 'UNKNOWN')
self.resources[job_id]['status'] = status
jobs_missing.remove(job_id)
for missing_job in jobs_missing:
if self.resources[missing_job]['status'] in ['PENDING', 'RUNNING']:
self.resources[missing_job]['status'] = translate_table['E']
|
Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
|
juraj-google-style
|
def state_nums():
st_nums = {}
fname = pkg_resources.resource_filename(__name__, 'resources/States.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
i = 0
for row in reader:
st_nums[row[0]] = i
i = (i + 1)
return st_nums
|
Get a dictionary of state names mapped to their 'legend' value.
Returns:
dictionary of state names mapped to their numeric value
|
codesearchnet
|
def _get_model_field(self, name: str):
field_name = self._normalize_field_name(name)
if ((field_name == 'pk') and self.query.model._meta.pk):
return self.query.model._meta.pk
for field in self.query.model._meta.local_concrete_fields:
if ((field.name == field_name) or (field.column == field_name)):
return field
return None
|
Gets the field on a model with the specified name.
Arguments:
name:
The name of the field to look for.
This can be both the actual field name, or
the name of the column, both will work :)
Returns:
The field with the specified name or None if
no such field exists.
|
codesearchnet
|
def select(self, inputs: List[Any], global_state: pg.geno.AttributeDict, step: int) -> List[Any]:
|
Select a list of outputs from the inputs.
A selector has two use cases:
* Used as parents selector, which selects individuals from the population
as parents for recombination. It will be called before the recombination
step within the :meth:`pyglove.evolution.Evolution.propose` method.
* Used as a population updater, which selects individuals from previous
population as a new population. It will be called everytime the
population is updated, triggered by the
:meth:`pyglove.evolution.Evolution.feedback` method.
Args:
inputs: a list of objects as input.
global_state: An `AttributeDict` object as the global state container,
which is readable/writable during the operation.
step: Number of examples historically proposed, which can be used for
determining a cross over schedule.
|
github-repos
|
def save_pickle(obj, outfile, protocol=2):
with open(outfile, 'wb') as f:
pickle.dump(obj, f, protocol=protocol)
return outfile
|
Save the object as a pickle file
Args:
outfile (str): Filename
protocol (int): Pickle protocol to use. Default is 2 to remain compatible with Python 2
Returns:
str: Path to pickle file
|
codesearchnet
|
def seq_int_arr(seqs):
return np.array([[NT_TO_INT[c] for c in x.upper()] for x in seqs])
|
Convert list of ACGT strings to matix of 1-4 ints
Args:
seqs (list of str): nucleotide sequences with only 'ACGT' characters
Returns:
numpy.array of int: matrix of integers from 1 to 4 inclusive representing A, C, G, and T
str: nucleotide sequence string
|
juraj-google-style
|
def load_settings(self, path):
if not os.path.exists(path):
raise exceptions.ConfigurationError(
"The server configuration file ('{0}') could not be "
"located.".format(path)
)
self._logger.info(
"Loading server configuration settings from: {0}".format(path)
)
parser = configparser.ConfigParser()
parser.read(path)
self._parse_settings(parser)
self.parse_auth_settings(parser)
|
Load configuration settings from the file pointed to by path.
This will overwrite all current setting values.
Args:
path (string): The path to the configuration file containing
the settings to load. Required.
Raises:
ConfigurationError: Raised if the path does not point to an
existing file or if a setting value is invalid.
|
juraj-google-style
|
def swap_gain(mapping, node_id1, mapping_id1, node_id2, mapping_id2, weight_dict, match_num):
new_mapping_list = mapping[:]
new_mapping_list[node_id1] = mapping_id2
new_mapping_list[node_id2] = mapping_id1
if tuple(new_mapping_list) in match_triple_dict:
return match_triple_dict[tuple(new_mapping_list)] - match_num
gain = 0
new_mapping1 = (node_id1, mapping_id2)
new_mapping2 = (node_id2, mapping_id1)
old_mapping1 = (node_id1, mapping_id1)
old_mapping2 = (node_id2, mapping_id2)
if node_id1 > node_id2:
new_mapping2 = (node_id1, mapping_id2)
new_mapping1 = (node_id2, mapping_id1)
old_mapping1 = (node_id2, mapping_id2)
old_mapping2 = (node_id1, mapping_id1)
if new_mapping1 in weight_dict:
for key in weight_dict[new_mapping1]:
if key == -1:
gain += weight_dict[new_mapping1][-1]
elif new_mapping_list[key[0]] == key[1]:
gain += weight_dict[new_mapping1][key]
if new_mapping2 in weight_dict:
for key in weight_dict[new_mapping2]:
if key == -1:
gain += weight_dict[new_mapping2][-1]
elif key[0] == node_id1:
continue
elif new_mapping_list[key[0]] == key[1]:
gain += weight_dict[new_mapping2][key]
if old_mapping1 in weight_dict:
for key in weight_dict[old_mapping1]:
if key == -1:
gain -= weight_dict[old_mapping1][-1]
elif mapping[key[0]] == key[1]:
gain -= weight_dict[old_mapping1][key]
if old_mapping2 in weight_dict:
for key in weight_dict[old_mapping2]:
if key == -1:
gain -= weight_dict[old_mapping2][-1]
elif key[0] == node_id1:
continue
elif mapping[key[0]] == key[1]:
gain -= weight_dict[old_mapping2][key]
match_triple_dict[tuple(new_mapping_list)] = match_num + gain
return gain
|
Compute the triple match number gain from the swapping
Arguments:
mapping: current node mapping list
node_id1: node 1 index in AMR 1
mapping_id1: the node index in AMR 2 node 1 maps to (in the current mapping)
node_id2: node 2 index in AMR 1
mapping_id2: the node index in AMR 2 node 2 maps to (in the current mapping)
weight_dict: weight dictionary
match_num: the original matching triple number
Returns:
the gain number (might be negative)
|
juraj-google-style
|
def replace_dimensions(cls, dimensions, overrides):
from .dimension import Dimension
replaced = []
for d in dimensions:
if (d.name in overrides):
override = overrides[d.name]
elif (d.label in overrides):
override = overrides[d.label]
else:
override = None
if (override is None):
replaced.append(d)
elif isinstance(override, (util.basestring, tuple)):
replaced.append(d.clone(override))
elif isinstance(override, Dimension):
replaced.append(override)
elif isinstance(override, dict):
replaced.append(d.clone(override.get('name', None), **{k: v for (k, v) in override.items() if (k != 'name')}))
else:
raise ValueError('Dimension can only be overridden with another dimension or a dictionary of attributes')
return replaced
|
Replaces dimensions in list with dictionary of overrides.
Args:
dimensions: List of dimensions
overrides: Dictionary of dimension specs indexed by name
Returns:
list: List of dimensions with replacements applied
|
codesearchnet
|
def prep_itasser_modeling(self, itasser_installation, itlib_folder, runtype, create_in_dir=None, execute_from_dir=None, print_exec=False, **kwargs):
if (not create_in_dir):
if (not self.structure_dir):
raise ValueError('Output directory must be specified')
self.homology_models_dir = op.join(self.structure_dir, 'homology_models')
else:
self.homology_models_dir = create_in_dir
ssbio.utils.make_dir(self.homology_models_dir)
if (not execute_from_dir):
execute_from_dir = self.homology_models_dir
repseq = self.representative_sequence
itasser_kwargs = {'light': True, 'java_home': None, 'binding_site_pred': False, 'ec_pred': False, 'go_pred': False, 'job_scheduler_header': None, 'additional_options': None}
if kwargs:
itasser_kwargs.update(kwargs)
ITASSERPrep(ident=self.id, seq_str=repseq.seq_str, root_dir=self.homology_models_dir, itasser_path=itasser_installation, itlib_path=itlib_folder, runtype=runtype, print_exec=print_exec, execute_dir=execute_from_dir, java_home=itasser_kwargs['java_home'], light=itasser_kwargs['light'], binding_site_pred=itasser_kwargs['binding_site_pred'], ec_pred=itasser_kwargs['ec_pred'], go_pred=itasser_kwargs['go_pred'], job_scheduler_header=itasser_kwargs['job_scheduler_header'], additional_options=itasser_kwargs['additional_options'])
log.debug('Prepared I-TASSER modeling folder {}'.format(self.homology_models_dir))
|
Prepare to run I-TASSER homology modeling for the representative sequence.
Args:
itasser_installation (str): Path to I-TASSER folder, i.e. ``~/software/I-TASSER4.4``
itlib_folder (str): Path to ITLIB folder, i.e. ``~/software/ITLIB``
runtype: How you will be running I-TASSER - local, slurm, or torque
create_in_dir (str): Local directory where folders will be created
execute_from_dir (str): Optional path to execution directory - use this if you are copying the homology
models to another location such as a supercomputer for running
all_genes (bool): If all genes should be prepped, or only those without any mapped structures
print_exec (bool): If the execution statement should be printed to run modelling
Todo:
* Document kwargs - extra options for I-TASSER, SLURM or Torque execution
* Allow modeling of any sequence in sequences attribute, select by ID or provide SeqProp?
|
codesearchnet
|
def convert_graphdef(input_data, input_tensors, output_tensors, **kwargs):
model_flags = build_model_flags(**kwargs)
conversion_flags = build_conversion_flags(**kwargs)
saved_model_dir = kwargs.get('saved_model_dir', None)
input_shapes = kwargs.get('input_shapes', None)
quantized_input_stats = kwargs.get('quantized_input_stats', None)
debug_info = kwargs.get('debug_info', None)
for idx, input_tensor in enumerate(input_tensors):
input_array = model_flags.input_arrays.add()
if saved_model_dir:
input_array.name = input_tensor.name
else:
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = convert_tensor_tf_type_to_tflite_type(input_tensor.dtype, usage='input type of the TensorFlow model')
if _is_quantized_input_stats_required(conversion_flags):
if quantized_input_stats:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
else:
warnings.warn('Statistics for quantized inputs were expected, but not specified; continuing anyway.')
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
if shape.rank is not None:
dims = []
for dim in shape:
if dim is None or (isinstance(dim, tensor_shape.Dimension) and dim.value is None):
dims.append(-1)
else:
dims.append(int(dim))
input_array.shape.dims.extend(dims)
input_array.shape.unknown_rank = False
else:
input_array.shape.unknown_rank = True
for output_tensor in output_tensors:
if saved_model_dir:
model_flags.output_arrays.append(output_tensor.name)
else:
model_flags.output_arrays.append(util.get_tensor_name(output_tensor))
data = convert(model_flags, conversion_flags, input_data.SerializeToString(), debug_info_str=debug_info.SerializeToString() if debug_info else None)
return data
|
Convert a frozen GraphDef model using the TF Lite converter.
Conversion can be customized by providing arguments that are forwarded to
`build_model_flags` and `build_conversion_flags` (see documentation).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
**kwargs: See `build_model_flags` and `build_conversion_flags`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_conversion_flags`.
|
github-repos
|
def table_update(self, table_name, table_info):
url = (Api._ENDPOINT + (Api._TABLES_PATH % table_name))
return datalab.utils.Http.request(url, method='PUT', data=table_info, credentials=self._credentials)
|
Updates the Table info.
Args:
table_name: the name of the table to update as a tuple of components.
table_info: the Table resource with updated fields.
|
codesearchnet
|
def inject_positional_args(self, method):
inspect = self._modules['inspect']
argspec = inspect.getargspec(method)
keyword_arg_index = ((- 1) * len((argspec.defaults or [])))
arg_names = argspec.args[:(keyword_arg_index or None)]
kwarg_names = argspec.args[len(arg_names):]
functools = self._modules['functools']
@functools.wraps(method)
def method_wrapper(**kwargs):
'Wrapper that pulls values from openhtf.util.conf.'
for kwarg in kwarg_names:
if (kwarg in self):
self._logger.warning('Keyword arg %s not set from configuration, but is a configuration key', kwarg)
final_kwargs = {name: self[name] for name in arg_names if (name in self)}
for overridden in (set(kwargs) & set(final_kwargs)):
self._logger.warning('Overriding configuration value for kwarg %s (%s) with provided kwarg value: %s', overridden, self[overridden], kwargs[overridden])
final_kwargs.update(kwargs)
if inspect.ismethod(method):
name = ('%s.%s' % (method.__self__.__class__.__name__, method.__name__))
else:
name = method.__name__
self._logger.debug('Invoking %s with %s', name, final_kwargs)
return method(**final_kwargs)
if (argspec.args[0] == 'self'):
@functools.wraps(method)
def self_wrapper(self, **kwargs):
'Wrapper that pulls values from openhtf.util.conf.'
kwargs['self'] = self
return method_wrapper(**kwargs)
return self_wrapper
return method_wrapper
|
Decorator for injecting positional arguments from the configuration.
This decorator wraps the given method, so that any positional arguments are
passed with corresponding values from the configuration. The name of the
positional argument must match the configuration key.
Keyword arguments are *NEVER* modified, even if their names match
configuration keys. Avoid naming keyword args names that are also
configuration keys to avoid confusion.
Additional positional arguments may be used that do not appear in the
configuration, but those arguments *MUST* be specified as keyword arguments
upon invocation of the method. This is to avoid ambiguity in which
positional arguments are getting which values.
Args:
method: The method to wrap.
Returns:
A wrapper that, when invoked, will call the wrapped method, passing in
configuration values for positional arguments.
|
codesearchnet
|
def process_latest_result(self, latest_results, current_time_ms, recognize_element):
if latest_results.shape[0] != self._label_count:
raise ValueError('The results for recognition should contain {} elements, but there are {} produced'.format(self._label_count, latest_results.shape[0]))
if self._previous_results.__len__() != 0 and current_time_ms < self._previous_results[0][0]:
raise ValueError('Results must be fed in increasing time order, but receive a timestamp of {}, which was earlier than the previous one of {}'.format(current_time_ms, self._previous_results[0][0]))
self._previous_results.append([current_time_ms, latest_results])
time_limit = current_time_ms - self._average_window_duration_ms
while time_limit > self._previous_results[0][0]:
self._previous_results.popleft()
how_many_results = self._previous_results.__len__()
earliest_time = self._previous_results[0][0]
sample_duration = current_time_ms - earliest_time
if how_many_results < self._minimum_count or sample_duration < self._average_window_duration_ms / 4:
recognize_element.founded_command = self._previous_top_label
recognize_element.score = 0.0
recognize_element.is_new_command = False
return
average_scores = np.zeros(self._label_count)
for item in self._previous_results:
score = item[1]
for i in range(score.size):
average_scores[i] += score[i] / how_many_results
sorted_averaged_index_score = []
for i in range(self._label_count):
sorted_averaged_index_score.append([i, average_scores[i]])
sorted_averaged_index_score = sorted(sorted_averaged_index_score, key=lambda p: p[1], reverse=True)
current_top_index = sorted_averaged_index_score[0][0]
current_top_label = self._labels[current_top_index]
current_top_score = sorted_averaged_index_score[0][1]
time_since_last_top = 0
if self._previous_top_label == '_silence_' or self._previous_top_time == -np.inf:
time_since_last_top = np.inf
else:
time_since_last_top = current_time_ms - self._previous_top_time
if current_top_score > self._detection_threshold and current_top_label != self._previous_top_label and (time_since_last_top > self._suppression_ms):
self._previous_top_label = current_top_label
self._previous_top_time = current_time_ms
recognize_element.is_new_command = True
else:
recognize_element.is_new_command = False
recognize_element.founded_command = current_top_label
recognize_element.score = current_top_score
|
Smoothing the results in average window when a new result is added in.
Receive a new result from inference and put the founded command into
a RecognizeResult instance after the smoothing procedure.
Args:
latest_results: A list containing the confidences of all labels.
current_time_ms: The start timestamp of the input audio clip.
recognize_element: An instance of RecognizeResult to store founded
command, its scores and if it is a new command.
Raises:
ValueError: The length of this result from inference doesn't match
label count.
ValueError: The timestamp of this result is earlier than the most
previous one in the average window
|
github-repos
|
def min(cls, x: 'TensorFluent', y: 'TensorFluent') -> 'TensorFluent':
return cls._binary_op(x, y, tf.minimum, tf.float32)
|
Returns a TensorFluent for the minimum function.
Args:
x: The first operand.
y: The second operand.
Returns:
A TensorFluent wrapping the minimum function.
|
juraj-google-style
|
def sanitize_filename(filename):
sanitized_filename = re.sub('[/\\\\:*?"<>|]', '-', filename)
sanitized_filename = sanitized_filename.replace('&', 'and')
sanitized_filename = sanitized_filename.replace('"', '')
sanitized_filename = sanitized_filename.replace("'", '')
sanitized_filename = sanitized_filename.replace('/', '')
sanitized_filename = sanitized_filename.replace('\\', '')
if (sanitized_filename[0] == '.'):
sanitized_filename = (u'dot' + sanitized_filename[1:])
return sanitized_filename
|
Make sure filenames are valid paths.
Returns:
str:
|
codesearchnet
|
def recursively_convert_to_json_serializable(test_obj):
try:
if not isinstance(test_obj, list) and np.isnan(test_obj):
return None
except TypeError:
pass
except ValueError:
pass
if isinstance(test_obj, (string_types, integer_types, float, bool)):
return test_obj
elif isinstance(test_obj, dict):
new_dict = {}
for key in test_obj:
new_dict[str(key)] = recursively_convert_to_json_serializable(
test_obj[key])
return new_dict
elif isinstance(test_obj, (list, tuple, set)):
new_list = []
for val in test_obj:
new_list.append(recursively_convert_to_json_serializable(val))
return new_list
elif isinstance(test_obj, (np.ndarray, pd.Index)):
return [recursively_convert_to_json_serializable(x) for x in test_obj.tolist()]
elif test_obj is None:
return test_obj
elif isinstance(test_obj, (datetime.datetime, datetime.date)):
return str(test_obj)
elif np.issubdtype(type(test_obj), np.bool_):
return bool(test_obj)
elif np.issubdtype(type(test_obj), np.integer) or np.issubdtype(type(test_obj), np.uint):
return int(test_obj)
elif np.issubdtype(type(test_obj), np.floating):
return float(round(test_obj, sys.float_info.dig))
elif isinstance(test_obj, pd.DataFrame):
return recursively_convert_to_json_serializable(test_obj.to_dict(orient='records'))
elif isinstance(test_obj, decimal.Decimal):
return float(test_obj)
else:
raise TypeError('%s is of type %s which cannot be serialized.' % (
str(test_obj), type(test_obj).__name__))
|
Helper function to convert a dict object to one that is serializable
Args:
test_obj: an object to attempt to convert a corresponding json-serializable object
Returns:
(dict) A converted test_object
Warning:
test_obj may also be converted in place.
|
juraj-google-style
|
def validate_gcs_path(path, require_object):
(bucket, key) = datalab.storage._bucket.parse_name(path)
if (bucket is None):
raise Exception(('Invalid GCS path "%s"' % path))
if (require_object and (key is None)):
raise Exception(('It appears the GCS path "%s" is a bucket path but not an object path' % path))
|
Check whether a given path is a valid GCS path.
Args:
path: the config to check.
require_object: if True, the path has to be an object path but not bucket path.
Raises:
Exception if the path is invalid
|
codesearchnet
|
def get_correct_answer(question, default=None, required=False, answer=None, is_answer_correct=None):
while 1:
if (default is None):
msg = u' - No Default Available'
else:
msg = u'\n[DEFAULT] -> {}\nPress Enter To Use Default'.format(default)
prompt = ((question + msg) + u'\n--> ')
if (answer is None):
answer = six.moves.input(prompt)
if ((answer == '') and required and (default is not None)):
print(u'You have to enter a value\n\n')
six.moves.input(u'Press enter to continue')
print(u'\n\n')
answer = None
continue
if ((answer == u'') and (default is not None)):
answer = default
_ans = ask_yes_no(u'You entered {}, is this correct?'.format(answer), answer=is_answer_correct)
if _ans:
return answer
else:
answer = None
|
u"""Ask user a question and confirm answer
Args:
question (str): Question to ask user
default (str): Default answer if no input from user
required (str): Require user to input answer
answer (str): Used for testing
is_answer_correct (str): Used for testing
|
codesearchnet
|
def _ParseAttribute(self, file_object):
file_offset = file_object.tell()
attribute_map = self._GetDataTypeMap('cups_ipp_attribute')
try:
attribute, _ = self._ReadStructureFromFileObject(
file_object, file_offset, attribute_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse attribute with error: {0!s}'.format(exception))
value = None
if attribute.tag_value in self._INTEGER_TAG_VALUES:
value = self._ParseIntegerValue(attribute.value_data, file_offset)
elif attribute.tag_value == self._TAG_VALUE_BOOLEAN:
value = self._ParseBooleanValue(attribute.value_data)
elif attribute.tag_value == self._TAG_VALUE_DATE_TIME:
value = self._ParseDateTimeValue(attribute.value_data, file_offset)
elif attribute.tag_value in self._STRING_WITHOUT_LANGUAGE_VALUES:
value = attribute.value_data.decode(self._last_charset_attribute)
elif attribute.tag_value in self._ASCII_STRING_VALUES:
value = attribute.value_data.decode('ascii')
if attribute.tag_value == self._TAG_VALUE_CHARSET:
self._last_charset_attribute = value
else:
value = attribute.value_data
return attribute.name, value
|
Parses a CUPS IPP attribute from a file-like object.
Args:
file_object (dfvfs.FileIO): file-like object.
Returns:
tuple[str, object]: attribute name and value.
Raises:
ParseError: if the attribute cannot be parsed.
|
juraj-google-style
|
def _get_val_from_ddb_data(data, keylist):
next_type = None
for k in keylist:
for k1 in k:
if next_type is None:
data = data[k[k1]]
else:
temp_dict = data[next_type]
data = temp_dict[k[k1]]
next_type = k1
if next_type == 'L':
return _convert_ddb_list_to_list(data[next_type])
if next_type == 'N':
return int(data[next_type])
return str(data[next_type])
|
Given a dictionary of dynamodb data (including the datatypes) and a
properly structured keylist, it will return the value of the lookup
Args:
data (dict): the raw dynamodb data
keylist(list): a list of keys to lookup. This must include the
datatype
Returns:
various: It returns the value from the dynamodb record, and casts it
to a matching python datatype
|
juraj-google-style
|
def next(self):
self._set_consumer_timeout_start()
while True:
try:
return six.next(self._get_message_iterator())
except StopIteration:
self._reset_message_iterator()
self._check_consumer_timeout()
|
Return the next available message
Blocks indefinitely unless consumer_timeout_ms > 0
Returns:
a single KafkaMessage from the message iterator
Raises:
ConsumerTimeout after consumer_timeout_ms and no message
Note:
This is also the method called internally during iteration
|
codesearchnet
|
def List(self, request, global_params=None):
config = self.GetMethodConfig('List')
return self._RunMethod(config, request, global_params=global_params)
|
Lists all row access policies on the specified table.
Args:
request: (BigqueryRowAccessPoliciesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListRowAccessPoliciesResponse) The response message.
|
github-repos
|
def username(self, value):
self._username = value
self._connectionXML.set('username', value)
|
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
|
juraj-google-style
|
def CheckMySQLConnection(db_options):
for tries_left in range(_MYSQL_MAX_RETRIES, -1, -1):
try:
connection_options = dict(
host=db_options["Mysql.host"],
port=db_options["Mysql.port"],
db=db_options["Mysql.database_name"],
user=db_options["Mysql.database_username"],
passwd=db_options["Mysql.database_password"],
charset="utf8")
ssl_enabled = "Mysql.client_key_path" in db_options
if ssl_enabled:
connection_options["ssl"] = {
"key": db_options["Mysql.client_key_path"],
"cert": db_options["Mysql.client_cert_path"],
"ca": db_options["Mysql.ca_cert_path"],
}
connection = MySQLdb.connect(**connection_options)
if ssl_enabled:
cursor = connection.cursor()
cursor.execute("SHOW VARIABLES LIKE 'have_ssl'")
res = cursor.fetchone()
if res[0] == "have_ssl" and res[1] == "YES":
print("SSL enabled successfully.")
else:
print("Unable to establish SSL connection to MySQL.")
return False
return True
except MySQLdb.OperationalError as mysql_op_error:
if len(mysql_op_error.args) < 2:
print("Unexpected exception type received from MySQL. %d attempts "
"left: %s" % (tries_left, mysql_op_error))
time.sleep(_MYSQL_RETRY_WAIT_SECS)
continue
if mysql_op_error.args[0] == mysql_conn_errors.CONNECTION_ERROR:
print("Failed to connect to MySQL. Is it running? %d attempts left." %
tries_left)
elif mysql_op_error.args[0] == mysql_conn_errors.UNKNOWN_HOST:
print("Unknown-hostname error encountered while trying to connect to "
"MySQL.")
return False
elif mysql_op_error.args[0] == general_mysql_errors.BAD_DB_ERROR:
return True
elif mysql_op_error.args[0] in (
general_mysql_errors.ACCESS_DENIED_ERROR,
general_mysql_errors.DBACCESS_DENIED_ERROR):
print("Permission error encountered while trying to connect to "
"MySQL: %s" % mysql_op_error)
return False
else:
print("Unexpected operational error encountered while trying to "
"connect to MySQL. %d attempts left: %s" %
(tries_left, mysql_op_error))
except MySQLdb.Error as mysql_error:
print("Unexpected error encountered while trying to connect to MySQL. "
"%d attempts left: %s" % (tries_left, mysql_error))
time.sleep(_MYSQL_RETRY_WAIT_SECS)
return False
|
Checks whether a connection can be established to MySQL.
Args:
db_options: A dict mapping GRR MySQL config options to their values.
Returns:
A boolean indicating whether a connection could be made to a MySQL server
instance with the given options.
|
juraj-google-style
|
def is_test_executed(self, test_name):
for record in self.executed:
if record.test_name == test_name:
return True
return False
|
Checks if a specific test has been executed.
Args:
test_name: string, the name of the test to check.
Returns:
True if the test has been executed according to the test result,
False otherwise.
|
juraj-google-style
|
def Matches(self, file_entry):
if not self._date_time_ranges:
return None
for date_time_range in self._date_time_ranges:
time_attribute = self._TIME_VALUE_MAPPINGS.get(
date_time_range.time_value, None)
if not time_attribute:
continue
timestamp = getattr(file_entry, time_attribute, None)
if timestamp is None:
continue
if (date_time_range.start_date_time is not None and
timestamp < date_time_range.start_date_time):
return False
if (date_time_range.end_date_time is not None and
timestamp > date_time_range.end_date_time):
return False
return True
|
Compares the file entry against the filter.
Args:
file_entry (dfvfs.FileEntry): file entry to compare.
Returns:
bool: True if the file entry matches the filter, False if not or
None if the filter does not apply.
|
juraj-google-style
|
def list(self):
self._initialize_list()
interested = True
response = self._cloudFormation.list_stacks()
print('Stack(s):')
while interested:
if 'StackSummaries' in response:
for stack in response['StackSummaries']:
stack_status = stack['StackStatus']
if stack_status != 'DELETE_COMPLETE':
print(' [{}] - {}'.format(stack['StackStatus'], stack['StackName']))
next_token = response.get('NextToken', None)
if next_token:
response = self._cloudFormation.list_stacks(NextToken=next_token)
else:
interested = False
return True
|
List the existing stacks in the indicated region
Args:
None
Returns:
True if True
Todo:
Figure out what could go wrong and take steps
to hanlde problems.
|
juraj-google-style
|
def wait_for_compilation_job(self, job, poll=5):
desc = _wait_until((lambda : _compilation_job_status(self.sagemaker_client, job)), poll)
self._check_job_status(job, desc, 'CompilationJobStatus')
return desc
|
Wait for an Amazon SageMaker Neo compilation job to complete.
Args:
job (str): Name of the compilation job to wait for.
poll (int): Polling interval in seconds (default: 5).
Returns:
(dict): Return value from the ``DescribeCompilationJob`` API.
Raises:
ValueError: If the compilation job fails.
|
codesearchnet
|
def transform(self, col):
out = pd.DataFrame()
out[self.col_name] = self.safe_datetime_cast(col)
out[self.col_name] = self.to_timestamp(out)
return out
|
Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
juraj-google-style
|
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
with tf.name_scope(name):
def make_shape_tensor(x):
return tf.convert_to_tensor(value=x, name="shape", dtype=tf.int32)
def get_tensor_shape(s):
if isinstance(s, tf.TensorShape):
return s
s_ = tf.get_static_value(make_shape_tensor(s))
if s_ is not None:
return tf.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tf.TensorShape):
return make_shape_tensor(s)
if tensorshape_util.is_fully_defined(s):
return make_shape_tensor(tensorshape_util.as_list(s))
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return tf.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return tf.broadcast_dynamic_shape(shape1_, shape2_)
|
Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
|
juraj-google-style
|
def workspace_from_url(self, mets_url, dst_dir=None, clobber_mets=False, mets_basename=None, download=False, baseurl=None):
if (dst_dir and (not dst_dir.startswith('/'))):
dst_dir = abspath(dst_dir)
if (mets_url is None):
if (baseurl is None):
raise Exception('Must pass mets_url and/or baseurl to workspace_from_url')
else:
mets_url = ('file:
if (baseurl is None):
baseurl = mets_url.rsplit('/', 1)[0]
log.debug("workspace_from_url\nmets_url='%s'\nbaseurl='%s'\ndst_dir='%s'", mets_url, baseurl, dst_dir)
if (':
mets_url = ('file:
if (dst_dir is None):
if mets_url.startswith('file:
dst_dir = dirname(mets_url[len('file:
else:
dst_dir = tempfile.mkdtemp(prefix=TMP_PREFIX)
log.debug("Creating workspace '%s' for METS @ <%s>", dst_dir, mets_url)
if (mets_basename is None):
mets_basename = mets_url.rsplit('/', 1)[(- 1)].split('?')[0].split('
dst_mets = join(dst_dir, mets_basename)
log.debug("Copying mets url '%s' to '%s'", mets_url, dst_mets)
if (('file:
log.debug('Target and source mets are identical')
elif (exists(dst_mets) and (not clobber_mets)):
raise Exception(("File '%s' already exists but clobber_mets is false" % dst_mets))
else:
self.download_to_directory(dst_dir, mets_url, basename=mets_basename)
workspace = Workspace(self, dst_dir, mets_basename=mets_basename, baseurl=baseurl)
if download:
for f in workspace.mets.find_files():
workspace.download_file(f)
return workspace
|
Create a workspace from a METS by URL.
Sets the mets.xml file
Arguments:
mets_url (string): Source mets URL
dst_dir (string, None): Target directory for the workspace
clobber_mets (boolean, False): Whether to overwrite existing mets.xml. By default existing mets.xml will raise an exception.
download (boolean, False): Whether to download all the files
baseurl (string, None): Base URL for resolving relative file locations
Returns:
Workspace
|
codesearchnet
|
def message_index(index_url):
idx = csv.reader(urllib2.urlopen(index_url), delimiter=':')
messages = []
for line in idx:
messages.append(line)
return messages
|
get message index of components for urllib2.
Args:
url(string):
Returns:
list: messages
|
codesearchnet
|
def batch_predict_async(training_dir, prediction_input_file, output_dir, mode, batch_size=16, shard_files=True, output_format='csv', cloud=False):
import google.datalab.utils as du
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if cloud:
runner_results = cloud_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format)
job = du.DataflowJob(runner_results)
else:
runner_results = local_batch_predict(training_dir, prediction_input_file, output_dir, mode, batch_size, shard_files, output_format)
job = du.LambdaJob((lambda : runner_results.wait_until_finish()), job_id=None)
return job
|
Local and cloud batch prediction.
Args:
training_dir: The output folder of training.
prediction_input_file: csv file pattern to a file. File must be on GCS if
running cloud prediction
output_dir: output location to save the results. Must be a GSC path if
running cloud prediction.
mode: 'evaluation' or 'prediction'. If 'evaluation', the input data must
contain a target column. If 'prediction', the input data must not
contain a target column.
batch_size: Int. How many instances to run in memory at once. Larger values
mean better performace but more memeory consumed.
shard_files: If False, the output files are not shardded.
output_format: csv or json. Json file are json-newlined.
cloud: If ture, does cloud batch prediction. If False, runs batch prediction
locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
codesearchnet
|
def get_q2(self, thetas=None, phis=None):
if ((thetas is not None) and (phis is not None)):
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
sqrt_15_2pi = sqrt((15.0 / (2.0 * pi)))
sqrt_5_pi = sqrt((5.0 / pi))
pre_y_2_2 = [((0.25 * sqrt_15_2pi) * val) for val in self._pow_sin_t[2]]
pre_y_2_1 = [(((0.5 * sqrt_15_2pi) * val[0]) * val[1]) for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])]
acc = 0.0
real = imag = 0.0
for i in nnn_range:
real += (pre_y_2_2[i] * self._cos_n_p[2][i])
imag -= (pre_y_2_2[i] * self._sin_n_p[2][i])
acc += ((real * real) + (imag * imag))
real = imag = 0.0
for i in nnn_range:
real += (pre_y_2_1[i] * self._cos_n_p[1][i])
imag -= (pre_y_2_1[i] * self._sin_n_p[1][i])
acc += ((real * real) + (imag * imag))
real = imag = 0.0
for i in nnn_range:
real += ((0.25 * sqrt_5_pi) * ((3.0 * self._pow_cos_t[2][i]) - 1.0))
acc += (real * real)
real = imag = 0.0
for i in nnn_range:
real -= (pre_y_2_1[i] * self._cos_n_p[1][i])
imag -= (pre_y_2_1[i] * self._sin_n_p[1][i])
acc += ((real * real) + (imag * imag))
real = imag = 0.0
for i in nnn_range:
real += (pre_y_2_2[i] * self._cos_n_p[2][i])
imag += (pre_y_2_2[i] * self._sin_n_p[2][i])
acc += ((real * real) + (imag * imag))
q2 = sqrt((((4.0 * pi) * acc) / (5.0 * float((nnn * nnn)))))
return q2
|
Calculates the value of the bond orientational order parameter of
weight l=2. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=2
corresponding to the input angles thetas and phis.
|
codesearchnet
|
def cast_vdata(vdata=None, vtype='REG_SZ'):
registry = Registry()
vtype_value = registry.vtype[vtype]
if (vtype_value in [win32con.REG_SZ, win32con.REG_EXPAND_SZ]):
return _to_unicode(vdata)
elif (vtype_value == win32con.REG_BINARY):
if isinstance(vdata, six.text_type):
return vdata.encode('utf-8')
return vdata
elif (vtype_value == win32con.REG_MULTI_SZ):
return [_to_unicode(i) for i in vdata]
elif (vtype_value == win32con.REG_QWORD):
return (vdata if six.PY3 else long(vdata))
else:
return int(vdata)
|
Cast the ``vdata` value to the appropriate data type for the registry type
specified in ``vtype``
Args:
vdata (str, int, list, bytes): The data to cast
vtype (str):
The type of data to be written to the registry. Must be one of the
following:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_QWORD
- REG_SZ
Returns:
The vdata cast to the appropriate type. Will be unicode string, binary,
list of unicode strings, or int
Usage:
.. code-block:: python
import salt.utils.win_reg
winreg.cast_vdata(vdata='This is the string', vtype='REG_SZ')
|
codesearchnet
|
def _get_resource(self, label: str, source: dict, resource_type: str):
try:
return source[label]
except KeyError:
raise ValueError("Cannot find {0} with label '{1}'.\nExisting {0} labels: {2}".format(
resource_type, label, list(source.keys())))
|
Generic resoure fetcher handling errors.
Args:
label (str): The label to fetch
source (dict): The dictionary to look up the label
resource_type str: The display name of the resource type (used in errors)
|
juraj-google-style
|
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
compressed_stream = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=resolver_context)
if not compressed_stream:
raise errors.BackEndError(
'Unable to open compressed stream: {0:s}.'.format(
self.path_spec.comparable))
super(CompressedStreamFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._compressed_stream = compressed_stream
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
|
Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
Raises:
BackEndError: when the compressed stream is missing.
|
juraj-google-style
|
def get_aws_session(account):
from cloud_inquisitor.config import dbconfig
from cloud_inquisitor.plugins.types.accounts import AWSAccount
if (not isinstance(account, AWSAccount)):
raise InquisitorError('Non AWSAccount passed to get_aws_session, got {}'.format(account.__class__.__name__))
session = get_local_aws_session()
if (session.get_credentials().method in ['iam-role', 'env', 'explicit']):
sts = session.client('sts')
else:
temp_sts = session.client('sts')
audit_sts_role = temp_sts.assume_role(RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='inquisitor')
sts = boto3.session.Session(audit_sts_role['Credentials']['AccessKeyId'], audit_sts_role['Credentials']['SecretAccessKey'], audit_sts_role['Credentials']['SessionToken']).client('sts')
role = sts.assume_role(RoleArn='arn:aws:iam::{}:role/{}'.format(account.account_number, dbconfig.get('role_name', default='cinq_role')), RoleSessionName='inquisitor')
sess = boto3.session.Session(role['Credentials']['AccessKeyId'], role['Credentials']['SecretAccessKey'], role['Credentials']['SessionToken'])
return sess
|
Function to return a boto3 Session based on the account passed in the first argument.
Args:
account (:obj:`Account`): Account to create the session object for
Returns:
:obj:`boto3:boto3.session.Session`
|
codesearchnet
|
def join(self):
c_api.TF_ServerJoin(self._server)
|
Blocks until the server has shut down.
This method currently blocks forever.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
joining the TensorFlow server.
|
github-repos
|
def response_list(data, key):
if (key not in data):
return None
if isinstance(data[key], list):
return data[key]
else:
return [data[key]]
|
Obtain the relevant response data in a list.
If the response does not already contain the result in a list, a new one
will be created to ease iteration in the parser methods.
Args:
data (dict): API response.
key (str): Attribute of the response that contains the result values.
Returns:
List of response items (usually dict) or None if the key is not present.
|
codesearchnet
|
def initialize_plugs(self, plug_types=None):
types = plug_types if plug_types is not None else self._plug_types
for plug_type in types:
plug_logger = self.logger.getChild(plug_type.__name__)
if plug_type in self._plugs_by_type:
continue
try:
if not issubclass(plug_type, BasePlug):
raise InvalidPlugError(
'Plug type "%s" is not an instance of BasePlug' % plug_type)
if plug_type.logger != _LOG:
raise InvalidPlugError(
'Do not override "logger" in your plugs.', plug_type)
plug_type.logger = plug_logger
try:
plug_instance = plug_type()
finally:
plug_type.logger = _LOG
if plug_instance.logger != _LOG:
raise InvalidPlugError(
'Do not set "self.logger" in __init__ in your plugs', plug_type)
else:
plug_instance.logger = plug_logger
except Exception:
plug_logger.exception('Exception instantiating plug type %s', plug_type)
self.tear_down_plugs()
raise
self.update_plug(plug_type, plug_instance)
|
Instantiate required plugs.
Instantiates plug types and saves the instances in self._plugs_by_type for
use in provide_plugs().
Args:
plug_types: Plug types may be specified here rather than passed
into the constructor (this is used primarily for unit testing
phases).
|
juraj-google-style
|
def AddFiles(self, hash_id_metadatas):
for hash_id, metadata in iteritems(hash_id_metadatas):
self.AddFile(hash_id, metadata)
|
Adds multiple files to the file store.
Args:
hash_id_metadatas: A dictionary mapping hash ids to file metadata (a tuple
of hash client path and blob references).
|
juraj-google-style
|
def create_cloudwatch_event(app_name, env, region, rules):
session = boto3.Session(profile_name=env, region_name=region)
cloudwatch_client = session.client('events')
rule_name = rules.get('rule_name')
schedule = rules.get('schedule')
rule_description = rules.get('rule_description')
json_input = rules.get('json_input', {})
if (schedule is None):
LOG.critical('Schedule is required and no schedule is defined!')
raise InvalidEventConfiguration('Schedule is required and no schedule is defined!')
if (rule_name is None):
LOG.critical('Rule name is required and no rule_name is defined!')
raise InvalidEventConfiguration('Rule name is required and no rule_name is defined!')
else:
LOG.info('%s and %s', app_name, rule_name)
rule_name = '{}_{}'.format(app_name, rule_name.replace(' ', '_'))
if (rule_description is None):
rule_description = '{} - {}'.format(app_name, rule_name)
lambda_arn = get_lambda_arn(app=app_name, account=env, region=region)
account_id = get_env_credential(env=env)['accountId']
principal = 'events.amazonaws.com'
statement_id = '{}_cloudwatch_{}'.format(app_name, rule_name)
source_arn = 'arn:aws:events:{}:{}:rule/{}'.format(region, account_id, rule_name)
add_lambda_permissions(function=lambda_arn, statement_id=statement_id, action='lambda:InvokeFunction', principal=principal, source_arn=source_arn, env=env, region=region)
cloudwatch_client.put_rule(Name=rule_name, ScheduleExpression=schedule, State='ENABLED', Description=rule_description)
targets = []
json_payload = '{}'.format(json.dumps(json_input))
target = {'Id': app_name, 'Arn': lambda_arn, 'Input': json_payload}
targets.append(target)
put_targets_response = cloudwatch_client.put_targets(Rule=rule_name, Targets=targets)
LOG.debug('Cloudwatch put targets response: %s', put_targets_response)
LOG.info('Created Cloudwatch event "%s" with schedule: %s', rule_name, schedule)
|
Create cloudwatch event for lambda from rules.
Args:
app_name (str): name of the lambda function
env (str): Environment/Account for lambda function
region (str): AWS region of the lambda function
rules (dict): Trigger rules from the settings
|
codesearchnet
|
def __init__(self, _args):
super(TcExRun, self).__init__(_args)
self._signal_handler_init()
self._config = None
self._profile = {}
self._staging_data = None
self.container = None
self.reports = Reports()
self.tcex = None
self.docker_image = 'tcintegrations/tci-dev:latest'
self.log = self._logger()
self._clear_redis_tracker = []
self.json_report = {}
self.max_diff = 10
self.sleep = 0
self.display_name = None
self.program_main = None
self.program_version = None
self.runtime_level = None
self.shell = False
|
Initialize Class properties.
Args:
_args (namespace): The argparser args Namespace.
|
juraj-google-style
|
def reschedule(cls,
mapreduce_state,
mapreduce_spec,
serial_id,
queue_name=None):
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
controller_callback_task = model.HugeTask(
url=(mapreduce_spec.params["base_path"] + "/controller_callback/" +
mapreduce_spec.mapreduce_id),
name=task_name, params=task_params,
countdown=parameters.config._CONTROLLER_PERIOD_SEC,
parent=mapreduce_state,
headers=util._get_task_headers(mapreduce_spec.mapreduce_id))
if not _run_task_hook(mapreduce_spec.get_hooks(),
"enqueue_controller_task",
controller_callback_task,
queue_name):
try:
controller_callback_task.add(queue_name)
except (taskqueue.TombstonedTaskError,
taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
|
Schedule new update status callback task.
Args:
mapreduce_state: mapreduce state as model.MapreduceState
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
|
juraj-google-style
|
def apply(self, func, *args, **kwargs):
ret = func(self._t, *args, **kwargs)
return LinearWrap(ret)
|
Apply a function on the wrapped tensor.
Returns:
LinearWrap: ``LinearWrap(func(self.tensor(), *args, **kwargs))``.
|
codesearchnet
|
def collect_trajectories(env, policy_fun, num_trajectories=1, policy='greedy', max_timestep=None, epsilon=0.1):
trajectories = []
for t in range(num_trajectories):
t_start = time.time()
rewards = []
actions = []
done = False
observation = env.reset()
observation_history = observation[(np.newaxis, np.newaxis, :)]
ts = 0
while ((not done) and ((not max_timestep) or (observation_history.shape[1] < max_timestep))):
ts_start = time.time()
predictions = policy_fun(observation_history)
predictions = np.squeeze(predictions, axis=0)[(- 1)]
action = None
if (policy == 'greedy'):
action = np.argmax(predictions)
elif (policy == 'epsilon-greedy'):
if (onp.random.random() < epsilon):
action = onp.random.randint(0, high=len(predictions))
else:
action = np.argmax(predictions)
elif (policy == 'categorical-sampling'):
predictions = np.exp(predictions)
action = onp.argwhere((onp.random.multinomial(1, predictions) == 1))
else:
raise ValueError(('Unknown policy: %s' % policy))
try:
action = int(action)
except TypeError as err:
logging.error('Cannot convert action into an integer: [%s]', err)
logging.error('action.shape: [%s]', action.shape)
logging.error('action: [%s]', action)
logging.error('predictions.shape: [%s]', predictions.shape)
logging.error('predictions: [%s]', predictions)
logging.error('observation_history: [%s]', observation_history)
raise err
(observation, reward, done, _) = env.step(action)
observation_history = np.concatenate([observation_history, observation[(np.newaxis, np.newaxis, :)]], axis=1)
rewards.append(reward)
actions.append(action)
ts += 1
logging.vlog(2, ' Collected time-step[ %5d] of trajectory[ %5d] in [%0.2f] msec.', ts, t, get_time(ts_start))
logging.vlog(2, ' Collected trajectory[ %5d] in [%0.2f] msec.', t, get_time(t_start))
assert (done or (max_timestep and (max_timestep >= observation_history.shape[1])))
observation_history = np.squeeze(observation_history, axis=0)
trajectories.append((observation_history, np.stack(actions), np.stack(rewards)))
return trajectories
|
Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fun to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env
returns done.
epsilon: float, the epsilon for `epsilon-greedy` policy.
Returns:
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
|
codesearchnet
|
def __init__(self, resolver_context):
super(APFSFileSystem, self).__init__(resolver_context)
self._fsapfs_volume = None
|
Initializes an APFS file system.
Args:
resolver_context (Context): resolver context.
|
juraj-google-style
|
def validate_signature(self, signature, data, encoding='utf8'):
if isinstance(data, string_types):
data = bytearray(data, encoding)
if isinstance(signature, string_types):
signature = bytearray(signature, encoding)
secret_key = bytearray(self.secret_key, 'utf8')
hashed = hmac.new(secret_key, data, sha1)
encoded = b64encode(hashed.digest())
return (encoded.strip() == signature.strip())
|
Validate the signature for the provided data.
Args:
signature (str or bytes or bytearray): Signature that was provided
for the request.
data (str or bytes or bytearray): Data string to validate against
the signature.
encoding (str, optional): If a string was provided for ``data`` or
``signature``, this is the character encoding.
Returns:
bool: Whether the signature is valid for the provided data.
|
codesearchnet
|
def add_nodes(self, root_id, current_node, indent=1):
if not current_node.children:
return
config.LOGGER.info("({count} of {total} uploaded) {indent}Processing {title} ({kind})".format(
count=self.node_count_dict['upload_count'],
total=self.node_count_dict['total_count'],
indent=" " * indent,
title=current_node.title,
kind=current_node.__class__.__name__)
)
try:
chunks = [current_node.children[x:x+10] for x in range(0, len(current_node.children), 10)]
for chunk in chunks:
payload_children = []
for child in chunk:
failed = [f for f in child.files if f.is_primary and (not f.filename or self.failed_uploads.get(f.filename))]
if any(failed):
if not self.failed_node_builds.get(root_id):
error_message = ""
for fail in failed:
reason = fail.filename + ": " + self.failed_uploads.get(fail.filename) if fail.filename else "File failed to download"
error_message = error_message + reason + ", "
self.failed_node_builds[root_id] = {'node': current_node, 'error': error_message[:-2]}
else:
payload_children.append(child.to_dict())
payload = {
'root_id': root_id,
'content_data': payload_children
}
response = config.SESSION.post(config.add_nodes_url(), data=json.dumps(payload))
if response.status_code != 200:
self.failed_node_builds[root_id] = {'node': current_node, 'error': response.reason}
else:
response_json = json.loads(response._content.decode("utf-8"))
self.node_count_dict['upload_count'] += len(chunk)
if response_json['root_ids'].get(child.get_node_id().hex):
for child in chunk:
self.add_nodes(response_json['root_ids'].get(child.get_node_id().hex), child, indent + 1)
except ConnectionError as ce:
self.failed_node_builds[root_id] = {'node': current_node, 'error': ce}
|
add_nodes: adds processed nodes to tree
Args:
root_id (str): id of parent node on Kolibri Studio
current_node (Node): node to publish children
indent (int): level of indentation for printing
Returns: link to uploadedchannel
|
juraj-google-style
|
def run(self, dag):
num_dag_qubits = sum([qreg.size for qreg in dag.qregs.values()])
if num_dag_qubits > self.coupling_map.size():
raise TranspilerError('Number of qubits greater than device.')
best_sub = self._best_subset(num_dag_qubits)
layout = Layout()
map_iter = 0
for qreg in dag.qregs.values():
for i in range(qreg.size):
layout[(qreg, i)] = int(best_sub[map_iter])
map_iter += 1
self.property_set['layout'] = layout
|
Pick a convenient layout depending on the best matching
qubit connectivity, and set the property `layout`.
Args:
dag (DAGCircuit): DAG to find layout for.
Raises:
TranspilerError: if dag wider than self.coupling_map
|
juraj-google-style
|
def flatten(structure):
return tree_impl.flatten(structure)
|
Flattens a possibly nested structure into a list.
In the case of dict instances, the sequence consists of the values,
sorted by key to ensure deterministic behavior. However, instances of
`collections.OrderedDict` are handled differently: their sequence order is
used instead of the sorted keys. The same convention is followed in
`pack_sequence_as`. This correctly unflattens dicts and `OrderedDict` after
they have been flattened, or vice-versa.
Dictionaries with non-sortable keys are not supported.
Examples:
>>> keras.tree.flatten([[1, 2, 3], [4, [5], [[6]]]])
[1, 2, 3, 4, 5, 6]
>>> keras.tree.flatten(None)
[None]
>>> keras.tree.flatten(1)
[1]
>>> keras.tree.flatten({100: 'world!', 6: 'Hello'})
['Hello', 'world!']
Args:
structure: An arbitrarily nested structure.
Returns:
A list, the flattened version of the input `structure`.
|
github-repos
|
def _read_messages_until_true(self, predicate, timeout):
while (not predicate()):
self._message_received.acquire()
if self._reader_lock.acquire(False):
try:
self._message_received.release()
if predicate():
return
self._handle_message(self.adb_connection.read_for_stream(self, timeout))
with self._message_received:
self._message_received.notify_all()
finally:
self._reader_lock.release()
else:
try:
self._message_received.wait(timeout.remaining)
if timeout.has_expired():
raise usb_exceptions.AdbTimeoutError('%s timed out reading messages.', self)
finally:
self._message_received.release()
|
Read a message from this stream and handle it.
This method tries to read a message from this stream, blocking until a
message is read. Once read, it will handle it accordingly by calling
self._handle_message().
This is repeated as long as predicate() returns False. There is some
locking used internally here so that we don't end up with multiple threads
blocked on a call to read_for_stream when another thread has read the
message that caused predicate() to become True.
Args:
predicate: Callable, keep reading messages until it returns true. Note
that predicate() should not block, as doing so may cause this method to
hang beyond its timeout.
timeout: Timeout to use for this call.
Raises:
AdbStreamClosedError: If this stream is already closed.
|
codesearchnet
|
def assert_is_compatible_with(self, other):
if not self.is_compatible_with(other):
raise ValueError('Dimensions %s and %s are not compatible' % (self, other))
|
Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
|
github-repos
|
def _IsBase64(cls, s):
try:
if base64.b64encode(base64.b64decode(s)).decode('utf-8') == s:
return True
except (TypeError, binascii.Error):
pass
return False
|
An imperfect but decent method for determining if a string is base64.
Args:
s: A string with the data to test.
Returns:
True if s is base64, else False.
|
juraj-google-style
|
def loss_l2(self, l2=0):
if isinstance(l2, (int, float)):
D = (l2 * torch.eye(self.d))
else:
D = torch.diag(torch.from_numpy(l2))
return (torch.norm((D @ (self.mu - self.mu_init))) ** 2)
|
L2 loss centered around mu_init, scaled optionally per-source.
In other words, diagonal Tikhonov regularization,
||D(\mu-\mu_{init})||_2^2
where D is diagonal.
Args:
- l2: A float or np.array representing the per-source regularization
strengths to use
|
codesearchnet
|
def get_table(e: exp.Expression) -> str:
table = e.find(exp.Table).args['this'].args['this']
if table in table_dataset_map:
table = table_dataset_map[table]
return table
|
Get the table name from an expression.
Args:
e (Expression): The expression containing table information.
Returns:
str: The table name.
|
github-repos
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.