code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def select_executor(elem, doc):
executor = EXECUTORS['default']
if 'cmd' in elem.attributes.keys():
executor = elem.attributes['cmd']
elif 'runas' in elem.attributes.keys():
executor = EXECUTORS[elem.attributes['runas']]
elif elem.classes[0] != 'exec':
executor = EXECUTORS[elem.classes[0]]
return executor
|
Determines the executor for the code in `elem.text`.
The elem attributes and classes select the executor in this order (highest
to lowest):
- custom commands (cmd=...)
- runas (runas=...) takes a key for the executors
- first element class (.class) determines language and thus executor
Args:
elem The AST element.
doc The document.
Returns:
The command to execute code.
|
juraj-google-style
|
async def init(self, *, advertise_addr: str=None, listen_addr: str='0.0.0.0:2377', force_new_cluster: bool=False, swarm_spec: Mapping=None) -> str:
data = {'AdvertiseAddr': advertise_addr, 'ListenAddr': listen_addr, 'ForceNewCluster': force_new_cluster, 'Spec': swarm_spec}
response = (await self.docker._query_json('swarm/init', method='POST', data=data))
return response
|
Initialize a new swarm.
Args:
ListenAddr: listen address used for inter-manager communication
AdvertiseAddr: address advertised to other nodes.
ForceNewCluster: Force creation of a new swarm.
SwarmSpec: User modifiable swarm configuration.
Returns:
id of the swarm node
|
codesearchnet
|
def Remove(self, row):
if ((row == 0) or (row > self.size)):
raise TableError('Attempt to remove header row')
new_table = []
for t_row in self._table:
if (t_row.row != row):
new_table.append(t_row)
if (t_row.row > row):
t_row.row -= 1
self._table = new_table
|
Removes a row from the table.
Args:
row: int, the row number to delete. Must be >= 1, as the header
cannot be removed.
Raises:
TableError: Attempt to remove nonexistent or header row.
|
codesearchnet
|
def delete_url(self, url, token=''):
if (token == ''):
token = self._user_token
return requests.delete(url,
headers={
'Authorization': 'Token {}'.format(token)},
verify=False,)
|
Returns a delete resquest object taking in a url and user token.
Arguments:
url (str): The url to make post to
token (str): The authentication token
Returns:
obj: Delete request object
|
juraj-google-style
|
def plot_state_paulivec(rho, title='', figsize=None, color=None):
if (not HAS_MATPLOTLIB):
raise ImportError('Must have Matplotlib installed.')
rho = _validate_input_state(rho)
if (figsize is None):
figsize = (7, 5)
num = int(np.log2(len(rho)))
labels = list(map((lambda x: x.to_label()), pauli_group(num)))
values = list(map((lambda x: np.real(np.trace(np.dot(x.to_matrix(), rho)))), pauli_group(num)))
numelem = len(values)
if (color is None):
color = '
ind = np.arange(numelem)
width = 0.5
(fig, ax) = plt.subplots(figsize=figsize)
ax.grid(zorder=0, linewidth=1, linestyle='--')
ax.bar(ind, values, width, color=color, zorder=2)
ax.axhline(linewidth=1, color='k')
ax.set_ylabel('Expectation value', fontsize=14)
ax.set_xticks(ind)
ax.set_yticks([(- 1), (- 0.5), 0, 0.5, 1])
ax.set_xticklabels(labels, fontsize=14, rotation=70)
ax.set_xlabel('Pauli', fontsize=14)
ax.set_ylim([(- 1), 1])
ax.set_facecolor('
for tick in (ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks()):
tick.label.set_fontsize(14)
ax.set_title(title, fontsize=16)
plt.close(fig)
return fig
|
Plot the paulivec representation of a quantum state.
Plot a bargraph of the mixed state rho over the pauli matrices
Args:
rho (ndarray): Numpy array for state vector or density matrix
title (str): a string that represents the plot title
figsize (tuple): Figure size in inches.
color (list or str): Color of the expectation value bars.
Returns:
matplotlib.Figure: The matplotlib.Figure of the visualization
Raises:
ImportError: Requires matplotlib.
|
codesearchnet
|
def _unique_parameters(self) -> 'list[cfg.Variable]':
return []
|
Get unique parameter subtypes as variables.
This will retrieve 'children' of this value that contribute to the
type of it. So it will retrieve type parameters, but not attributes. To
keep the number of possible combinations reasonable, when we encounter
multiple instances of the same type, we include only one.
Returns:
A list of variables.
|
github-repos
|
def metric(self, name, description, data_type, interval, keyed=False):
from .tcex_metrics_v2 import TcExMetricsV2
return TcExMetricsV2(self, name, description, data_type, interval, keyed)
|
Get instance of the Metrics module.
Args:
name (string): The name for the metric.
description (string): The description of the metric.
data_type (string): The type of metric: Sum, Count, Min, Max, First, Last, and Average.
interval (string): The metric interval: Hourly, Daily, Weekly, Monthly, and Yearly.
keyed (boolean): Indicates whether the data will have a keyed value.
Returns:
(object): An instance of the Metrics Class.
|
codesearchnet
|
class FlaxBeamSearchOutput(ModelOutput):
sequences: Optional[jnp.ndarray] = None
scores: Optional[jnp.ndarray] = None
|
Flax Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
The generated sequences.
scores (`jnp.ndarray` of shape `(batch_size,)`):
The scores (log probabilities) of the generated sequences.
|
github-repos
|
def _dict_to_tensor(self, x, k1, k2):
return array_ops_stack.stack([array_ops_stack.stack([x[i, j] for j in range(k2)]) for i in range(k1)])
|
Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
Returns:
A k1 * k2 tensor.
|
github-repos
|
def urlretrieve(url, filename, reporthook=None, data=None):
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
|
Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
|
github-repos
|
def by_type(blocks, slist=None):
layout = []
data = []
int_vol = []
unknown = []
for i in blocks:
if (slist and (i not in slist)):
continue
if (blocks[i].is_vtbl and blocks[i].is_valid):
layout.append(i)
elif (blocks[i].is_internal_vol and blocks[i].is_valid):
int_vol.append(i)
elif blocks[i].is_valid:
data.append(i)
else:
unknown.append(i)
return (layout, data, int_vol, unknown)
|
Sort blocks into layout, internal volume, data or unknown
Arguments:
Obj:blocks -- List of block objects.
List:slist -- (optional) List of block indexes.
Returns:
List:layout -- List of block indexes of blocks containing the
volume table records.
List:data -- List of block indexes containing filesystem data.
List:int_vol -- List of block indexes containing volume ids
greater than UBI_INTERNAL_VOL_START that are not
layout volumes.
List:unknown -- List of block indexes of blocks that failed validation
of crc in ed_hdr or vid_hdr.
|
codesearchnet
|
def plot_seebeck_mu(self, temp=600, output='eig', xlim=None):
import matplotlib.pyplot as plt
plt.figure(figsize=(9, 7))
seebeck = self._bz.get_seebeck(output=output, doping_levels=False)[
temp]
plt.plot(self._bz.mu_steps, seebeck,
linewidth=3.0)
self._plot_bg_limits()
self._plot_doping(temp)
if output == 'eig':
plt.legend(['S$_1$', 'S$_2$', 'S$_3$'])
if xlim is None:
plt.xlim(-0.5, self._bz.gap + 0.5)
else:
plt.xlim(xlim[0], xlim[1])
plt.ylabel("Seebeck \n coefficient ($\\mu$V/K)", fontsize=30.0)
plt.xlabel("E-E$_f$ (eV)", fontsize=30)
plt.xticks(fontsize=25)
plt.yticks(fontsize=25)
plt.tight_layout()
return plt
|
Plot the seebeck coefficient in function of Fermi level
Args:
temp:
the temperature
xlim:
a list of min and max fermi energy by default (0, and band gap)
Returns:
a matplotlib object
|
juraj-google-style
|
def are_symmetrically_related(self, point_a, point_b, tol=0.001):
if np.allclose(self.operate(point_a), point_b, atol=tol):
return True
if np.allclose(self.operate(point_b), point_a, atol=tol):
return True
return False
|
Checks if two points are symmetrically related.
Args:
point_a (3x1 array): First point.
point_b (3x1 array): Second point.
tol (float): Absolute tolerance for checking distance.
Returns:
True if self.operate(point_a) == point_b or vice versa.
|
codesearchnet
|
def Write(self, string):
if sys.version_info[0] < 3:
super(StdoutOutputWriter, self).Write(string)
else:
sys.stdout.write(string)
|
Writes a string to the output.
Args:
string (str): output.
|
juraj-google-style
|
def unique(ar):
r
import dask.array as da
if isinstance(ar, da.core.Array):
return da.unique(ar)
return _unique(ar)
|
r"""Find the unique elements of an array.
It uses ``dask.array.unique`` if necessary.
Args:
ar (array_like): Input array.
Returns:
array_like: the sorted unique elements.
|
juraj-google-style
|
def _RemoveDefaultAttrs(producer_op_list, graph_def):
producer_op_dict = {op.name: op for op in producer_op_list.op}
for node in graph_def.node:
if node.op in producer_op_dict:
op_def = op_def_registry.get(node.op)
if op_def is None:
continue
producer_op_def = producer_op_dict[node.op]
for key in list(node.attr):
if _FindAttrInOpDef(key, op_def) is None:
attr_def = _FindAttrInOpDef(key, producer_op_def)
if attr_def and attr_def.HasField('default_value') and (node.attr[key] == attr_def.default_value):
del node.attr[key]
|
Removes unknown default attrs according to `producer_op_list`.
Removes any unknown attrs in `graph_def` (i.e. attrs that do not appear in
registered OpDefs) that have a default value in `producer_op_list`.
Args:
producer_op_list: OpList proto.
graph_def: GraphDef proto
|
github-repos
|
def SetHasherNames(self, hasher_names_string):
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(hasher_names_string)
debug_hasher_names = ', '.join(hasher_names)
logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
self._hasher_names_string = hasher_names_string
|
Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
|
codesearchnet
|
def _set_auditpol_data(option, value):
auditpol_values = {'None': 'No Auditing', '0': 'No Auditing', '1': 'Success', '2': 'Failure', '3': 'Success and Failure'}
defaults = _get_audit_defaults(option)
return __utils__['auditpol.set_setting'](name=defaults['Auditpol Name'], value=auditpol_values[value])
|
Helper function that updates the current applied settings to match what has
just been set in the audit.csv files. We're doing it this way instead of
running `gpupdate`
Args:
option (str): The name of the option to set
value (str): The value to set. ['None', '0', '1', '2', '3']
Returns:
bool: ``True`` if successful, otherwise ``False``
|
codesearchnet
|
def prepare_xml_read(data, objectify=False):
mod = (_objectify if objectify else etree)
if hasattr(data, 'readlines'):
data = mod.parse(data).getroot()
elif isinstance(data, list):
data = mod.fromstring(''.join(data))
elif isinstance(data, basestring):
data = mod.parse(open(data)).getroot()
else:
raise TypeError(('Unable to handle data of type %r' % type(data)))
return data
|
Prepare various input types for XML parsing.
Args:
data (iter): Data to read
objectify (bool): Parse using lxml's objectify data binding
Returns:
etree.ElementTree: Tree suitable for parsing
Raises:
TypeError: Invalid value for data
|
codesearchnet
|
def _parse_single_video(self, example_proto):
context_features = {'game_duration_loops': tf.io.FixedLenFeature([1], tf.int64), 'game_duration_seconds': tf.io.FixedLenFeature([1], tf.float32), 'n_steps': tf.io.FixedLenFeature([1], tf.int64), 'screen_size': tf.io.FixedLenFeature([2], tf.int64)}
sequence_features = {'rgb_screen': tf.io.FixedLenSequenceFeature([], tf.string)}
(_, seq_feat) = tf.io.parse_single_sequence_example(example_proto, context_features=context_features, sequence_features=sequence_features)
video_frames = tf.map_fn(tf.image.decode_png, seq_feat['rgb_screen'], dtype=tf.uint8)
return video_frames
|
Parses single video from the input tfrecords.
Args:
example_proto: tfExample proto with a single video.
Returns:
dict with all frames, positions and actions.
|
codesearchnet
|
def on_snapshot(self, proto):
TargetChange = firestore_pb2.TargetChange
target_changetype_dispatch = {
TargetChange.NO_CHANGE: self._on_snapshot_target_change_no_change,
TargetChange.ADD: self._on_snapshot_target_change_add,
TargetChange.REMOVE: self._on_snapshot_target_change_remove,
TargetChange.RESET: self._on_snapshot_target_change_reset,
TargetChange.CURRENT: self._on_snapshot_target_change_current,
}
target_change = proto.target_change
if str(target_change):
target_change_type = target_change.target_change_type
_LOGGER.debug("on_snapshot: target change: " + str(target_change_type))
meth = target_changetype_dispatch.get(target_change_type)
if meth is None:
_LOGGER.info(
"on_snapshot: Unknown target change " + str(target_change_type)
)
self.close(
reason="Unknown target change type: %s " % str(target_change_type)
)
else:
try:
meth(proto)
except Exception as exc2:
_LOGGER.debug("meth(proto) exc: " + str(exc2))
raise
elif str(proto.document_change):
_LOGGER.debug("on_snapshot: document change")
target_ids = proto.document_change.target_ids or []
removed_target_ids = proto.document_change.removed_target_ids or []
changed = False
removed = False
if WATCH_TARGET_ID in target_ids:
changed = True
if WATCH_TARGET_ID in removed_target_ids:
removed = True
if changed:
_LOGGER.debug("on_snapshot: document change: CHANGED")
document_change = proto.document_change
document = document_change.document
data = _helpers.decode_dict(document.fields, self._firestore)
document_name = document.name
db_str = self._firestore._database_string
db_str_documents = db_str + "/documents/"
if document_name.startswith(db_str_documents):
document_name = document_name[len(db_str_documents) :]
document_ref = self._firestore.document(document_name)
snapshot = self.DocumentSnapshot(
reference=document_ref,
data=data,
exists=True,
read_time=None,
create_time=document.create_time,
update_time=document.update_time,
)
self.change_map[document.name] = snapshot
elif removed:
_LOGGER.debug("on_snapshot: document change: REMOVED")
document = proto.document_change.document
self.change_map[document.name] = ChangeType.REMOVED
elif str(proto.document_delete):
_LOGGER.debug("on_snapshot: document change: DELETE")
name = proto.document_delete.document
self.change_map[name] = ChangeType.REMOVED
elif str(proto.document_remove):
_LOGGER.debug("on_snapshot: document change: REMOVE")
name = proto.document_remove.document
self.change_map[name] = ChangeType.REMOVED
elif proto.filter:
_LOGGER.debug("on_snapshot: filter update")
if proto.filter.count != self._current_size():
self._reset_docs()
else:
_LOGGER.debug("UNKNOWN TYPE. UHOH")
self.close(reason=ValueError("Unknown listen response type: %s" % proto))
|
Called everytime there is a response from listen. Collect changes
and 'push' the changes in a batch to the customer when we receive
'current' from the listen response.
Args:
listen_response(`google.cloud.firestore_v1beta1.types.ListenResponse`):
Callback method that receives a object to
|
juraj-google-style
|
def get_tick(self, index):
name = self.tick_name(index)
if name is None:
return [pack_error(ControllerSubsystem.SENSOR_GRAPH, Error.INVALID_ARRAY_KEY), 0]
return [Error.NO_ERROR, self.ticks[name]]
|
Get a tick's interval.
Args:
index (int): The index of the tick that you want to fetch.
Returns:
int, int: Error code and The tick's interval in seconds.
A value of 0 means that the tick is disabled.
|
juraj-google-style
|
def data_in_db(db_data, user_data):
if isinstance(user_data, list):
if (db_data in user_data):
return True
return False
|
Validate db data in user data.
Args:
db_data (str): The data store in Redis.
user_data (list): The user provided data.
Returns:
bool: True if the data passed validation.
|
codesearchnet
|
def add_timestamps(with_ms: bool=False, substream_name: str | None=None) -> processor.Processor:
if substream_name is None:
substream_name = ''
return processor.processor_function(functools.partial(_add_timestamps, with_ms=with_ms, substream_name=substream_name))
|
Adds timestamps to image chunks.
By default the timestamps are added with the format `mm:ss` where
`mm` is the number of minutes, `ss` is the number of seconds.
Args:
with_ms: Whether to add milliseconds to the timestamp. When `True`, the
timestamp is added with the format `mm:ss.SSS` where `SSS` is the number
of milliseconds.
substream_name: The substream name to use for the timestamps.
Returns:
A processor that adds timestamps after each image chunk.
|
github-repos
|
def set(config, section, opt, value):
if section not in config.keys():
config[section] = {}
config[section][opt] = value
|
Sets specified option in the config.
Args:
config (configobj.ConfigObj): config to work on.
section (str): section name.
opt (str): option name.
value: value to set option to.
|
juraj-google-style
|
def override(state, solution):
old_ast = state.solution_ast
new_ast = ast.parse(solution)
if ((not isinstance(old_ast, ast.Module)) and (len(new_ast.body) == 1)):
expr = new_ast.body[0]
candidates = ([expr, expr.value] if isinstance(expr, ast.Expr) else [expr])
for node in candidates:
if isinstance(node, old_ast.__class__):
new_ast = node
break
kwargs = (state.messages[(- 1)] if state.messages else {})
child = state.to_child(solution_ast=new_ast, student_ast=state.student_ast, highlight=state.highlight, append_message={'msg': '', 'kwargs': kwargs})
return child
|
Override the solution code with something arbitrary.
There might be cases in which you want to temporarily override the solution code
so you can allow for alternative ways of solving an exercise.
When you use ``override()`` in an SCT chain, the remainder of that SCT chain will
run as if the solution code you specified is the only code that was in the solution.
Check the glossary for an example (pandas plotting)
Args:
solution: solution code as a string that overrides the original solution code.
state: State instance describing student and solution code. Can be omitted if used with Ex().
|
codesearchnet
|
def get_pipeline_options(project: str, job_name: str, mode: str, num_workers: int=cfg.NUM_WORKERS, streaming: bool=True) -> PipelineOptions:
job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}'
staging_bucket = f'gs:
dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'DataflowRunner', 'job_name': job_name, 'project': project, 'region': cfg.REGION, 'staging_location': f'{staging_bucket}/dflow-staging', 'temp_location': f'{staging_bucket}/dflow-temp', 'setup_file': './setup.py', 'streaming': streaming}
if num_workers:
dataflow_options.update({'num_workers': num_workers})
return PipelineOptions(flags=[], **dataflow_options)
|
Function to retrieve the pipeline options.
Args:
project: GCP project to run on
mode: Indicator to run local, cloud or template
num_workers: Number of Workers for running the job parallely
max_num_workers: Maximum number of workers running the job parallely
Returns:
Dataflow pipeline options
|
github-repos
|
def check_provider_healthcheck(settings, default_provider='Discovery'):
ProviderHealthCheck = collections.namedtuple('ProviderHealthCheck', ['providers', 'has_healthcheck'])
eureka_enabled = settings['app']['eureka_enabled']
providers = settings['asg']['provider_healthcheck']
LOG.debug('Template defined Health Check Providers: %s', providers)
health_check_providers = []
has_healthcheck = False
normalized_default_provider = default_provider.capitalize()
if eureka_enabled:
LOG.info('Eureka enabled, enabling default Provider Health Check: %s', normalized_default_provider)
for (provider, active) in providers.items():
if (provider.lower() == normalized_default_provider.lower()):
providers[provider] = True
LOG.debug('Override defined Provider Health Check: %s -> %s', active, providers[provider])
break
else:
LOG.debug('Adding default Provider Health Check: %s', normalized_default_provider)
providers[normalized_default_provider] = True
for (provider, active) in providers.items():
if active:
health_check_providers.append(provider.capitalize())
LOG.info('Provider healthchecks: %s', health_check_providers)
if health_check_providers:
has_healthcheck = True
return ProviderHealthCheck(providers=health_check_providers, has_healthcheck=has_healthcheck)
|
Set Provider Health Check when specified.
Returns:
collections.namedtuple: **ProviderHealthCheck** with attributes:
* providers (list): Providers set to use native Health Check.
* has_healthcheck (bool): If any native Health Checks requested.
|
codesearchnet
|
def __init__(self, raise_warnings=False):
self.raise_warnings = raise_warnings
self.accumulator = SimpleProblemAccumulator()
|
Initialise.
Args:
raise_warnings: If this is True then warnings are also raised as
exceptions.
If it is false, warnings are printed to the console using
SimpleProblemAccumulator.
|
juraj-google-style
|
def get_group(self, group_id):
group = self.group_id_map.get(group_id)
if group:
return group
self.logger.error('Group ID "%s" is not in datafile.' % group_id)
self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID_ERROR))
return None
|
Get group for the provided group ID.
Args:
group_id: Group ID for which group is to be determined.
Returns:
Group corresponding to the provided group ID.
|
juraj-google-style
|
def ParseFileEntry(self, parser_mediator, file_entry):
stat_object = file_entry.GetStat()
if not stat_object:
return
file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry)
event_data = FileStatEventData()
event_data.file_entry_type = stat_object.type
event_data.file_size = getattr(stat_object, 'size', None)
event_data.file_system_type = file_system_type
event_data.is_allocated = file_entry.IsAllocated()
if file_entry.access_time:
event = time_events.DateTimeValuesEvent(
file_entry.access_time, definitions.TIME_DESCRIPTION_LAST_ACCESS)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.creation_time:
event = time_events.DateTimeValuesEvent(
file_entry.creation_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.change_time:
event = time_events.DateTimeValuesEvent(
file_entry.change_time, definitions.TIME_DESCRIPTION_CHANGE)
parser_mediator.ProduceEventWithEventData(event, event_data)
if file_entry.modification_time:
event = time_events.DateTimeValuesEvent(
file_entry.modification_time,
definitions.TIME_DESCRIPTION_MODIFICATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
for time_attribute, usage in self._TIMESTAMP_DESCRIPTIONS.items():
posix_time = getattr(stat_object, time_attribute, None)
if posix_time is None:
continue
nano_time_attribute = '{0:s}_nano'.format(time_attribute)
nano_time_attribute = getattr(stat_object, nano_time_attribute, None)
timestamp = posix_time * 1000000
if nano_time_attribute is not None:
micro_time_attribute, _ = divmod(nano_time_attribute, 10)
timestamp += micro_time_attribute
if (file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK and
not timestamp):
continue
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_entry (dfvfs.FileEntry): a file entry.
|
juraj-google-style
|
def __verify_server_version(self):
if (compare_versions('.'.join([_lib_major_version, _lib_minor_version]), self.product_version) > 0):
logger.warning('Client version {} connecting to server with newer minor release {}.'.format(_lib_full_version, self.product_version))
if (compare_versions(_lib_major_version, self.product_version) != 0):
raise InvalidSwimlaneProductVersion(self, '{}.0'.format(_lib_major_version), '{}.0'.format(str((int(_lib_major_version) + 1))))
|
Verify connected to supported server product version
Notes:
Logs warning if connecting to a newer minor server version
Raises:
swimlane.exceptions.InvalidServerVersion: If server major version is higher than package major version
|
codesearchnet
|
def cosmic_link(variant_obj):
cosmic_ids = variant_obj.get('cosmic_ids')
if (not cosmic_ids):
return None
else:
cosmic_id = cosmic_ids[0]
url_template = 'https:
return url_template.format(cosmic_id)
|
Compose link to COSMIC Database.
Args:
variant_obj(scout.models.Variant)
Returns:
url_template(str): Link to COSMIIC database if cosmic id is present
|
codesearchnet
|
def call_fn(fn: TransitionOperator, args: Union[(Tuple[Any], Any)]) -> Any:
if (isinstance(args, (list, tuple)) and (not mcmc_util.is_namedtuple_like(args))):
args = args
return fn(*args)
else:
return fn(args)
|
Calls a transition operator with args, unpacking args if its a sequence.
Args:
fn: A `TransitionOperator`.
args: Arguments to `fn`
Returns:
ret: Return value of `fn`.
|
codesearchnet
|
def get_counter(self, name, combine_fn):
with self._lock:
counter = self.counters.get(name, None)
if counter:
assert counter.combine_fn == combine_fn
else:
if isinstance(combine_fn, cy_combiners.AccumulatorCombineFn):
counter = AccumulatorCombineFnCounter(name, combine_fn)
else:
counter = Counter(name, combine_fn)
self.counters[name] = counter
return counter
|
Returns a counter with the requested name.
Passing in the same name will return the same counter; the
combine_fn must agree.
Args:
name: the name of this counter. Typically has three parts:
"step-output-counter".
combine_fn: the CombineFn to use for aggregation
Returns:
A new or existing counter with the requested name.
|
github-repos
|
def _CreateEventTag(self, event, comment, labels):
event_identifier = event.GetIdentifier()
event_tag = events.EventTag(comment=comment)
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Created event tag: {0:s} for event: {1:s}'.format(
comment, event_identifier_string))
return event_tag
|
Creates an event tag.
Args:
event (EventObject): event to tag.
comment (str): event tag comment.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. PhoBERT does not
make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def nr_cases(self, snv_cases=None, sv_cases=None):
query = {}
if snv_cases:
query = {'vcf_path': {'$exists':True}}
if sv_cases:
query = {'vcf_sv_path': {'$exists':True}}
if snv_cases and sv_cases:
query = None
return self.db.case.count_documents(query)
|
Return the number of cases in the database
Args:
snv_cases(bool): If only snv cases should be searched
sv_cases(bool): If only snv cases should be searched
Returns:
cases (Iterable(Case)): A iterable with mongo cases
|
juraj-google-style
|
async def _open_connection_http(self, location):
sock = await connect_tcp(location[0], location[1], bind_host=self.source_address)
sock._active = True
return sock
|
Creates a normal async socket, returns it.
Args:
location (tuple(str, int)): A tuple of net location (eg
'127.0.0.1' or 'example.org') and port (eg 80 or 25000).
|
juraj-google-style
|
def importGurobiSolution(self, grbmodel):
self.eval(''.join(('let {} := {};'.format(var.VarName, var.X) for var in grbmodel.getVars() if ('$' not in var.VarName))))
|
Import the solution from a gurobipy.Model object.
Args:
grbmodel: A :class:`gurobipy.Model` object with the model solved.
|
codesearchnet
|
def get_use_xla_spmd(device_type):
return device_type == 'TPU' and '0' != os.environ.get('DTENSOR_TEST_USE_XLA_SPMD', '0')
|
Returns True when device_type is TPU and environment variable is set.
Args:
device_type: A str representing the type of device on the mesh.
Returns:
bool: True when device_type is TPU and environment variable is set.
|
github-repos
|
def __add__(self, other):
if isinstance(other, FieldPath):
parts = self.parts + other.parts
return FieldPath(*parts)
elif isinstance(other, six.string_types):
parts = self.parts + FieldPath.from_string(other).parts
return FieldPath(*parts)
else:
return NotImplemented
|
Adds `other` field path to end of this field path.
Args:
other (~google.cloud.firestore_v1beta1._helpers.FieldPath, str):
The field path to add to the end of this `FieldPath`.
|
juraj-google-style
|
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
if token_ids_1 is None:
return len(sep + token_ids_0) * [0]
return len(sep + token_ids_0 + sep + sep + token_ids_1) * [0]
|
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
|
github-repos
|
def CheckAddressState(self, script_hash):
for (key, contract) in self._contracts.items():
if (contract.ScriptHash.ToBytes() == script_hash.ToBytes()):
return AddressState.InWallet
for watch in self._watch_only:
if (watch == script_hash):
return (AddressState.InWallet | AddressState.WatchOnly)
return AddressState.NoState
|
Determine the address state of the provided script hash.
Args:
script_hash (UInt160): a script hash to determine the address state of.
Returns:
AddressState: the address state.
|
codesearchnet
|
def mds(means, weights, d):
X = dim_reduce(means, weights, d)
if (X.shape[0] == 2):
return X.dot(weights)
else:
return X.T.dot(weights)
|
Dimensionality reduction using MDS.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
|
codesearchnet
|
def memory_write32(self, addr, data, zone=None):
return self.memory_write(addr, data, zone, 32)
|
Writes words to memory of a target system.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to write to
data (list): list of words to write
zone (str): optional memory zone to access
Returns:
Number of words written to target.
Raises:
JLinkException: on memory access error.
|
codesearchnet
|
def set_status(self, status, msg):
if (len(msg) > 2000):
msg = msg[:2000]
msg += '\n... snip ...\n'
if ((self.status == self.S_LOCKED) or (status == self.S_LOCKED)):
err_msg = ('Locked files must be explicitly unlocked before calling set_status but\ntask.status = %s, input status = %s' % (self.status, status))
raise RuntimeError(err_msg)
status = Status.as_status(status)
changed = True
if hasattr(self, '_status'):
changed = (status != self._status)
self._status = status
if (status == self.S_RUN):
if (self.datetimes.start is None):
self.datetimes.start = datetime.datetime.now()
if changed:
if (status == self.S_SUB):
self.datetimes.submission = datetime.datetime.now()
self.history.info(('Submitted with MPI=%s, Omp=%s, Memproc=%.1f [Gb] %s ' % (self.mpi_procs, self.omp_threads, self.mem_per_proc.to('Gb'), msg)))
elif (status == self.S_OK):
self.history.info('Task completed %s', msg)
elif (status == self.S_ABICRITICAL):
self.history.info('Status set to S_ABI_CRITICAL due to: %s', msg)
else:
self.history.info('Status changed to %s. msg: %s', status, msg)
if (status == self.S_DONE):
self._on_done()
if (status == self.S_OK):
if (not self.finalized):
self._on_ok()
if ((self.gc is not None) and (self.gc.policy == 'task')):
self.clean_output_files()
if (self.status == self.S_OK):
self.send_signal(self.S_OK)
return status
|
Set and return the status of the task.
Args:
status: Status object or string representation of the status
msg: string with human-readable message used in the case of errors.
|
codesearchnet
|
def deploy_ray_func(func, partition, kwargs):
try:
return func(partition, **kwargs)
except ValueError:
return func(partition.copy(), **kwargs)
|
Deploy a function to a partition in Ray.
Note: Ray functions are not detected by codecov (thus pragma: no cover)
Args:
func: The function to apply.
partition: The partition to apply the function to.
kwargs: A dictionary of keyword arguments for the function.
Returns:
The result of the function.
|
juraj-google-style
|
def __send_ses_email(self, recipients, subject, body_html, body_text):
source_arn = dbconfig.get('source_arn', NS_EMAIL)
return_arn = dbconfig.get('return_path_arn', NS_EMAIL)
session = get_local_aws_session()
ses = session.client('ses', region_name=dbconfig.get('ses_region', NS_EMAIL, 'us-west-2'))
body = {}
if body_html:
body['Html'] = {'Data': body_html}
if body_text:
body['Text'] = {'Data': body_text}
ses_options = {'Source': self.sender, 'Destination': {'ToAddresses': recipients}, 'Message': {'Subject': {'Data': subject}, 'Body': body}}
if (source_arn and return_arn):
ses_options.update({'SourceArn': source_arn, 'ReturnPathArn': return_arn})
ses.send_email(**ses_options)
|
Send an email using SES
Args:
recipients (`1ist` of `str`): List of recipient email addresses
subject (str): Subject of the email
body_html (str): HTML body of the email
body_text (str): Text body of the email
Returns:
`None`
|
codesearchnet
|
def _create_events_writer(self, directory):
total_size = 0
events_files = self._fetch_events_files_on_disk()
for file_name in events_files:
file_path = os.path.join(self._events_directory, file_name)
total_size += tf.io.gfile.stat(file_path).length
if (total_size >= self.total_file_size_cap_bytes):
for file_name in events_files:
if (total_size < self.total_file_size_cap_bytes):
break
file_path = os.path.join(self._events_directory, file_name)
file_size = tf.io.gfile.stat(file_path).length
try:
tf.io.gfile.remove(file_path)
total_size -= file_size
logger.info('Deleted %s because events files take up over %d bytes', file_path, self.total_file_size_cap_bytes)
except IOError as err:
logger.error('Deleting %s failed: %s', file_path, err)
self._events_file_count += 1
file_path = ('%s.%d.%d' % (os.path.join(directory, DEBUGGER_EVENTS_FILE_STARTING_TEXT), time.time(), self._events_file_count))
logger.info('Creating events file %s', file_path)
return pywrap_tensorflow.EventsWriter(tf.compat.as_bytes(file_path))
|
Creates a new events writer.
Args:
directory: The directory in which to write files containing events.
Returns:
A new events writer, which corresponds to a new events file.
|
codesearchnet
|
def register_list(self):
num_items = self.MAX_NUM_CPU_REGISTERS
buf = (ctypes.c_uint32 * num_items)()
num_regs = self._dll.JLINKARM_GetRegisterList(buf, num_items)
return buf[:num_regs]
|
Returns a list of the indices for the CPU registers.
The returned indices can be used to read the register content or grab
the register name.
Args:
self (JLink): the ``JLink`` instance
Returns:
List of registers.
|
codesearchnet
|
def GetUpdates(self, s3_client, bucket, obj, since):
try:
if since is not None:
response = s3_client.get_object(Bucket=bucket, IfModifiedSince=timestamps.FromTimestampToDateTime(since), Key=obj)
else:
response = s3_client.get_object(Bucket=bucket, Key=obj)
body = response['Body']
last_modified_ts = timestamps.FromDateTimeToTimestamp(response['LastModified'])
except ClientError as e:
error_code = int(e.response['Error']['Code'])
if error_code == 304:
return []
self.log.error('error getting S3 object ({}): {}'.format(obj, e))
raise error.SourceUnavailable('unable to download object from S3')
data_map = self.GetMap(cache_info=body)
data_map.SetModifyTimestamp(last_modified_ts)
return data_map
|
Get updates from a source.
Args:
s3_client: initialized s3 client
bucket: s3 bucket
obj: object with the data
since: a timestamp representing the last change (None to force-get)
Returns:
A tuple containing the map of updates and a maximum timestamp
Raises:
ValueError: an object in the source map is malformed
ConfigurationError:
|
github-repos
|
def to_hour(num) -> str:
to_str = str(int(num))
return pd.Timestamp(f'{to_str[:-2]}:{to_str[-2:]}').strftime('%H:%M')
|
Convert YAML input to hours
Args:
num: number in YMAL file, e.g., 900, 1700, etc.
Returns:
str
Examples:
>>> to_hour(900)
'09:00'
>>> to_hour(1700)
'17:00'
|
juraj-google-style
|
def make_initializable_iterator(self):
return self._make_initializable_iterator()
|
Get an initializable iterator for DistributedDatasetV1.
Note: This API is deprecated. Please use
`tf.compat.v1.data.make_initializable_iterator(dataset)` to create an
initializable iterator.
Returns:
A DistributedIteratorV1 instance.
|
github-repos
|
def _fluent_range_type(cls, fluents, ordering) -> Sequence[str]:
range_types = []
for name in ordering:
fluent = fluents[name]
range_type = fluent.range
range_types.append(range_type)
return tuple(range_types)
|
Returns the range types of `fluents` following the given `ordering`.
Returns:
Sequence[str]: A tuple of range types representing
the range of each fluent.
|
codesearchnet
|
def _scalar_to_vector(self, m):
if not isinstance(m.y0, numbers.Number):
return m
else:
m = copy.deepcopy(m)
t0 = 0.0
if isinstance(m.y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(m.y0)
y0_orig = m.y0
m.y0 = np.array([m.y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_coupling_fn(fn):
def newfn(source_y, target_y, weight):
return np.array([fn(source_y[0], target_y[0], weight)])
newfn.__name__ = fn.__name__
return newfn
if isinstance(m.f(y0_orig, t0), numbers.Number):
m.f = make_vector_fn(m.f)
if hasattr(m, 'G') and isinstance(m.G(y0_orig,t0), numbers.Number):
m.G = make_matrix_fn(m.G)
if (hasattr(m, 'coupling') and
isinstance(m.coupling(y0_orig, y0_orig, 0.5),
numbers.Number)):
m.coupling = make_coupling_fn(m.coupling)
return m
|
Allow submodels with scalar equations. Convert to 1D vector systems.
Args:
m (Model)
|
juraj-google-style
|
def main():
parser = argparse.ArgumentParser(description='Cherry picking automation.')
parser.add_argument('--filename', help='path to whl file we are copying', required=True)
parser.add_argument('--new_py_ver', help='two digit py version eg. 27 or 33', required=True)
args = parser.parse_args()
args.filename = os.path.abspath(args.filename)
check_existence(args.filename)
regex_groups = re.search(TF_NIGHTLY_REGEX, args.filename)
directory = regex_groups.group(1)
package = regex_groups.group(2)
version = regex_groups.group(3)
origin_tag = regex_groups.group(4)
old_py_ver = re.search('(cp\\d\\d)', origin_tag).group(1)
new_tag = origin_tag.replace(old_py_ver, 'cp' + args.new_py_ver)
copy_binary(directory, origin_tag, new_tag, version, package)
|
This script copies binaries.
Requirements:
filename: The path to the whl file
AND
new_py_ver: Create a nightly tag with current date
Raises:
RuntimeError: If the whl file was not found
|
github-repos
|
def delete(self, option=None):
write_pb = _helpers.pb_for_delete(self._document_path, option)
commit_response = self._client._firestore_api.commit(self._client._database_string, [write_pb], transaction=None, metadata=self._client._rpc_metadata)
return commit_response.commit_time
|
Delete the current document in the Firestore database.
Args:
option (Optional[~.firestore_v1beta1.client.WriteOption]): A
write option to make assertions / preconditions on the server
state of the document before applying changes.
Returns:
google.protobuf.timestamp_pb2.Timestamp: The time that the delete
request was received by the server. If the document did not exist
when the delete was sent (i.e. nothing was deleted), this method
will still succeed and will still return the time that the
request was received by the server.
|
codesearchnet
|
def make_message(self, text, channel):
try:
channel_id = self.slack.channel_from_name(channel)['id']
except ValueError:
channel_id = channel
return pack({'text': text, 'type': 'message', 'channel': channel_id, 'id': self.message_id})
|
High-level function for creating messages. Return packed bytes.
Args:
text: {str}
channel: {str} Either name or ID
|
codesearchnet
|
def extract(self, destdir, decompress='auto'):
for e in self.mardata.index.entries:
name = e.name
entry_path = safejoin(destdir, name)
entry_dir = os.path.dirname(entry_path)
mkdir(entry_dir)
with open(entry_path, 'wb') as f:
write_to_file(self.extract_entry(e, decompress), f)
os.chmod(entry_path, e.flags)
|
Extract the entire MAR file into a directory.
Args:
destdir (str): A local directory on disk into which the contents of
this MAR file will be extracted. Required parent directories
will be created as necessary.
decompress (obj, optional): Controls whether files are decompressed
when extracted. Must be one of 'auto' or None. Defaults to
'auto'.
|
codesearchnet
|
def linear_extrapolation_plot(log_prob_adv_array, y, file_name, min_epsilon=(- 10), max_epsilon=10, num_points=21):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')
correct_idx = np.argmax(y, axis=0)
fig = plt.figure()
plt.xlabel('Epsilon')
plt.ylabel('Logits')
x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
plt.xlim((min_epsilon - 1), (max_epsilon + 1))
for i in range(y.shape[0]):
if (i == correct_idx):
ls = '-'
linewidth = 5
else:
ls = '--'
linewidth = 2
plt.plot(x_axis, log_prob_adv_array[(:, i)], ls=ls, linewidth=linewidth, label='{}'.format(i))
plt.legend(loc='best', fontsize=14)
plt.show()
fig.savefig(file_name)
plt.clf()
return figure
|
Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate
|
codesearchnet
|
def build_bird_configuration(config):
bird_configuration = {}
if config.getboolean('daemon', 'ipv4'):
if os.path.islink(config.get('daemon', 'bird_conf')):
config_file = os.path.realpath(config.get('daemon', 'bird_conf'))
print("'bird_conf' is set to a symbolic link ({s} -> {d}, but we will use the canonical path of that link".format(s=config.get('daemon', 'bird_conf'), d=config_file))
else:
config_file = config.get('daemon', 'bird_conf')
dummy_ip_prefix = config.get('daemon', 'dummy_ip_prefix')
if (not valid_ip_prefix(dummy_ip_prefix)):
raise ValueError('invalid dummy IPv4 prefix: {i}'.format(i=dummy_ip_prefix))
bird_configuration[4] = {'config_file': config_file, 'variable_name': config.get('daemon', 'bird_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird_keep_changes'), 'changes_counter': config.getint('daemon', 'bird_changes_counter')}
if config.getboolean('daemon', 'ipv6'):
if os.path.islink(config.get('daemon', 'bird6_conf')):
config_file = os.path.realpath(config.get('daemon', 'bird6_conf'))
print("'bird6_conf' is set to a symbolic link ({s} -> {d}, but we will use the canonical path of that link".format(s=config.get('daemon', 'bird6_conf'), d=config_file))
else:
config_file = config.get('daemon', 'bird6_conf')
dummy_ip_prefix = config.get('daemon', 'dummy_ip6_prefix')
if (not valid_ip_prefix(dummy_ip_prefix)):
raise ValueError('invalid dummy IPv6 prefix: {i}'.format(i=dummy_ip_prefix))
bird_configuration[6] = {'config_file': config_file, 'variable_name': config.get('daemon', 'bird6_variable'), 'dummy_ip_prefix': dummy_ip_prefix, 'reconfigure_cmd': config.get('daemon', 'bird6_reconfigure_cmd'), 'keep_changes': config.getboolean('daemon', 'bird6_keep_changes'), 'changes_counter': config.getint('daemon', 'bird6_changes_counter')}
return bird_configuration
|
Build bird configuration structure.
First it performs a sanity check against bird settings and then builds a
dictionary structure with bird configuration per IP version.
Arguments:
config (obj): A configparser object which holds our configuration.
Returns:
A dictionary
Raises:
ValueError if sanity check fails.
|
codesearchnet
|
def add_object(self, file_path, file_object, error_fct=None):
error_fct = (error_fct or self.raise_os_error)
if (not file_path):
target_directory = self.root
else:
target_directory = self.resolve(file_path)
if (not S_ISDIR(target_directory.st_mode)):
error = (errno.ENOENT if self.is_windows_fs else errno.ENOTDIR)
error_fct(error, file_path)
target_directory.add_entry(file_object)
|
Add a fake file or directory into the filesystem at file_path.
Args:
file_path: The path to the file to be added relative to self.
file_object: File or directory to add.
error_class: The error class to be thrown if file_path does
not correspond to a directory (used internally(
Raises:
IOError or OSError: if file_path does not correspond to a
directory.
|
codesearchnet
|
def inverse_guass(self, mu: float, sigma: float) -> float:
return float(
lib.TCOD_random_get_gaussian_double_inv(self.random_c, mu, sigma)
)
|
Return a random Gaussian number using the Box-Muller transform.
Args:
mu (float): The median returned value.
sigma (float): The standard deviation.
Returns:
float: A random float.
|
juraj-google-style
|
def get_max_instability(self, min_voltage=None, max_voltage=None):
data = []
for pair in self._select_in_voltage_range(min_voltage, max_voltage):
if (pair.decomp_e_charge is not None):
data.append(pair.decomp_e_charge)
if (pair.decomp_e_discharge is not None):
data.append(pair.decomp_e_discharge)
return (max(data) if (len(data) > 0) else None)
|
The maximum instability along a path for a specific voltage range.
Args:
min_voltage: The minimum allowable voltage.
max_voltage: The maximum allowable voltage.
Returns:
Maximum decomposition energy of all compounds along the insertion
path (a subset of the path can be chosen by the optional arguments)
|
codesearchnet
|
def add_adsorbate_atom(self, indices, specie, distance):
center = np.sum([self[i].coords for i in indices], axis=0) / len(
indices)
coords = center + self.normal * distance / np.linalg.norm(self.normal)
self.append(specie, coords, coords_are_cartesian=True)
|
Gets the structure of single atom adsorption.
slab structure from the Slab class(in [0, 0, 1])
Args:
indices ([int]): Indices of sites on which to put the absorbate.
Absorbed atom will be displaced relative to the center of
these sites.
specie (Specie/Element/str): adsorbed atom species
distance (float): between centers of the adsorbed atom and the
given site in Angstroms.
|
juraj-google-style
|
def set_property_filter(filter_proto, name, op, value):
filter_proto.Clear()
pf = filter_proto.property_filter
pf.property.name = name
pf.op = op
set_value(pf.value, value)
return filter_proto
|
Set property filter contraint in the given datastore.Filter proto message.
Args:
filter_proto: datastore.Filter proto message
name: property name
op: datastore.PropertyFilter.Operation
value: property value
Returns:
the same datastore.Filter.
Usage:
>>> set_property_filter(filter_proto, 'foo',
... datastore.PropertyFilter.EQUAL, 'a') # WHERE 'foo' = 'a'
|
juraj-google-style
|
def from_hyperplane(basis, origin, point, internal = True):
basis = np.array(basis)
assert basis.shape[0] + 1 == basis.shape[1]
big_basis = np.zeros((basis.shape[1], basis.shape[1]))
big_basis[:basis.shape[0],:basis.shape[1]] = basis
u, s, vh = np.linalg.svd(big_basis)
null_mask = (s <= 1e-8)
normal = np.compress(null_mask, vh, axis=0)[0]
if np.inner(np.array(point)-np.array(origin), normal) > 0:
if internal:
normal *= -1
else:
if not internal:
normal *= -1
offset = -np.dot(origin, normal)
return Halfspace(normal, offset)
|
Returns a Halfspace defined by a list of vectors parallel to the
bounding hyperplane.
Args:
basis: basis for the hyperplane (array with vector rows)
origin: point on the hyperplane
point: point not on the hyperplane
internal: whether point is inside the halfspace
|
juraj-google-style
|
def get_decor(self, c, match_only=None):
if isinstance(c, Component):
if c:
if match_only:
c = Component({k: getattr(c, k, None) for k in match_only})
for decor in self.__list:
try:
if c == decor.component:
return decor
except AttributeError:
continue
else:
for decor in self.__list:
try:
if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic:
return decor
except AttributeError:
continue
return Decor({'colour': '
|
Get the decor for a component.
Args:
c (component): The component to look up.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
Decor. The matching Decor from the Legend, or None if not found.
|
juraj-google-style
|
def __init__(self, system_time_tuple=None):
super(Systemtime, self).__init__()
self._number_of_seconds = None
self._precision = definitions.PRECISION_1_MILLISECOND
self.day_of_month = None
self.day_of_week = None
self.hours = None
self.milliseconds = None
self.minutes = None
self.month = None
self.seconds = None
self.year = None
if system_time_tuple:
if len(system_time_tuple) < 8:
raise ValueError('Invalid system time tuple 8 elements required.')
if system_time_tuple[0] < 1601 or system_time_tuple[0] > 30827:
raise ValueError('Year value out of bounds.')
if system_time_tuple[1] not in range(1, 13):
raise ValueError('Month value out of bounds.')
if system_time_tuple[2] not in range(0, 7):
raise ValueError('Day of week value out of bounds.')
days_per_month = self._GetDaysPerMonth(
system_time_tuple[0], system_time_tuple[1])
if system_time_tuple[3] < 1 or system_time_tuple[3] > days_per_month:
raise ValueError('Day of month value out of bounds.')
if system_time_tuple[4] not in range(0, 24):
raise ValueError('Hours value out of bounds.')
if system_time_tuple[5] not in range(0, 60):
raise ValueError('Minutes value out of bounds.')
if system_time_tuple[6] not in range(0, 60):
raise ValueError('Seconds value out of bounds.')
if system_time_tuple[7] < 0 or system_time_tuple[7] > 999:
raise ValueError('Milliseconds value out of bounds.')
self.day_of_month = system_time_tuple[3]
self.day_of_week = system_time_tuple[2]
self.hours = system_time_tuple[4]
self.milliseconds = system_time_tuple[7]
self.minutes = system_time_tuple[5]
self.month = system_time_tuple[1]
self.seconds = system_time_tuple[6]
self.year = system_time_tuple[0]
self._number_of_seconds = self._GetNumberOfSecondsFromElements(
self.year, self.month, self.day_of_month, self.hours, self.minutes,
self.seconds)
|
Initializes a SYSTEMTIME structure.
Args:
system_time_tuple
(Optional[tuple[int, int, int, int, int, int, int, int]]):
system time, contains year, month, day of week, day of month,
hours, minutes, seconds and milliseconds.
Raises:
ValueError: if the system time is invalid.
|
juraj-google-style
|
def get_product_value(self, value_name, wanted_type=None):
if (not self.__reg_products_handle):
return None
(subkey, search_value_name) = os.path.split(value_name)
try:
if subkey:
handle = win32api.RegOpenKeyEx(self.__reg_products_handle, subkey, 0, (win32con.KEY_READ | self.__reg_32bit_access))
(item_value, item_type) = self.__reg_query_value(handle, search_value_name)
win32api.RegCloseKey(handle)
else:
(item_value, item_type) = win32api.RegQueryValueEx(self.__reg_products_handle, value_name)
except pywintypes.error as exc:
if (exc.winerror == winerror.ERROR_FILE_NOT_FOUND):
return None
raise
if (wanted_type and (item_type not in self.__reg_types[wanted_type])):
item_value = None
return item_value
|
For the product section of the registry return the name value.
Args:
value_name (str): Registry value name.
wanted_type (str):
The type of value wanted if the type does not match
None is return. wanted_type support values are
``str`` ``int`` ``list`` ``bytes``.
Returns:
value: Value requested or ``None`` if not found.
|
codesearchnet
|
def union(self, other, recursive=True, overwrite=False):
if (not isinstance(other, composite)):
raise AssertionError('Cannot union composite and {} types'.format(type(other)))
if (self.meta_type != other.meta_type):
return composite([self, other])
if (self.meta_type == 'list'):
keep = []
for item in self._list:
keep.append(item)
for item in other._list:
if (item not in self._list):
keep.append(item)
return composite(keep)
elif (self.meta_type == 'dict'):
keep = {}
for key in list(set((list(self._dict.keys()) + list(other._dict.keys())))):
left = self._dict.get(key)
right = other._dict.get(key)
if (recursive and isinstance(left, composite) and isinstance(right, composite)):
keep[key] = left.union(right, recursive=recursive, overwrite=overwrite)
elif (left == right):
keep[key] = left
elif (left is None):
keep[key] = right
elif (right is None):
keep[key] = left
elif overwrite:
keep[key] = right
else:
keep[key] = composite([left, right])
return composite(keep)
return
|
Recursively compute union of data. For dictionaries, items
for specific keys will be combined into a list, depending on the
status of the overwrite= parameter. For lists, items will be appended
and reduced to unique items. This method is meant to be analogous
to set.union for composite objects.
Args:
other (composite): Other composite object to union with.
recursive (bool): Whether or not to perform the operation recursively,
for all nested composite objects.
overwrite (bool): Whether or not to overwrite entries with the same
key in a nested dictionary.
|
codesearchnet
|
def ParseFileObject(self, parser_mediator, file_object):
fixed_section_data_map = self._GetDataTypeMap(
'job_fixed_length_data_section')
try:
fixed_length_section, file_offset = self._ReadStructureFromFileObject(
file_object, 0, fixed_section_data_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile(
'Unable to parse fixed-length data section with error: {0!s}'.format(
exception))
if not fixed_length_section.product_version in self._PRODUCT_VERSIONS:
raise errors.UnableToParseFile(
'Unsupported product version in: 0x{0:04x}'.format(
fixed_length_section.product_version))
if not fixed_length_section.format_version == 1:
raise errors.UnableToParseFile(
'Unsupported format version in: {0:d}'.format(
fixed_length_section.format_version))
variable_section_data_map = self._GetDataTypeMap(
'job_variable_length_data_section')
try:
variable_length_section, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, variable_section_data_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse variable-length data section with error: '
'{0!s}').format(exception))
file_offset += data_size
event_data = self._ParseEventData(variable_length_section)
date_time = self._ParseLastRunTime(parser_mediator, fixed_length_section)
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_RUN)
parser_mediator.ProduceEventWithEventData(event, event_data)
trigger_data_map = self._GetDataTypeMap('job_trigger')
for trigger_index in range(0, variable_length_section.number_of_triggers):
try:
trigger, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, trigger_data_map)
except (ValueError, errors.ParseError) as exception:
raise errors.UnableToParseFile((
'Unable to parse trigger: {0:d} with error: {2!s}').format(
trigger_index, exception))
file_offset += data_size
event_data.trigger_type = trigger.trigger_type
date_time = self._ParseTriggerStartTime(parser_mediator, trigger)
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
date_time = self._ParseTriggerEndTime(parser_mediator, trigger)
if date_time:
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_SCHEDULED_TO_START,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a Windows job file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
|
juraj-google-style
|
def load(self, read_tuple_name):
self.prefix_width = 0
self.read_tuple_id_width = 0
self.genome_id_width = 0
self.chr_id_width = 0
self.coor_width = 0
parts = read_tuple_name.split('__')
self.prefix_width = len(parts[0])
self.read_tuple_id_width = len(parts[1])
segments = parts[2][1:(- 1)].split('),(')
for segment in segments:
int_widths = list(map(len, segment.split(',')))
self.genome_id_width = max(self.genome_id_width, int_widths[0])
self.chr_id_width = max(self.chr_id_width, int_widths[1])
self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
|
Load RNF values from a read tuple name.
Args:
read_tuple_name (str): Read tuple name which the values are taken from.
|
codesearchnet
|
def struct_member_error(err, sid, name, offset, size):
exception, msg = STRUCT_ERROR_MAP[err]
struct_name = idc.GetStrucName(sid)
return exception(('AddStructMember(struct="{}", member="{}", offset={}, size={}) '
'failed: {}').format(
struct_name,
name,
offset,
size,
msg
))
|
Create and format a struct member exception.
Args:
err: The error value returned from struct member creation
sid: The struct id
name: The member name
offset: Memeber offset
size: Member size
Returns:
A ``SarkErrorAddStructMemeberFailed`` derivative exception, with an
informative message.
|
juraj-google-style
|
def _get_message(self, target_message, indices, pending, timeout, condition):
start_time = time.time()
target_id = self._get_message_id(target_message)
if (target_id not in indices):
for (i, incoming) in enumerate(self._incoming):
if (incoming.id > target_id):
indices[target_id] = i
break
else:
indices[target_id] = len(self._incoming)
future = self._client.loop.create_future()
last_idx = indices[target_id]
if (last_idx < len(self._incoming)):
incoming = self._incoming[last_idx]
if condition(incoming, target_id):
indices[target_id] += 1
future.set_result(incoming)
return future
pending[target_id] = future
return self._get_result(future, start_time, timeout)
|
Gets the next desired message under the desired condition.
Args:
target_message (`object`):
The target message for which we want to find another
response that applies based on `condition`.
indices (`dict`):
This dictionary remembers the last ID chosen for the
input `target_message`.
pending (`dict`):
This dictionary remembers {msg_id: Future} to be set
once `condition` is met.
timeout (`int`):
The timeout (in seconds) override to use for this operation.
condition (`callable`):
The condition callable that checks if an incoming
message is a valid response.
|
codesearchnet
|
def whois_domains_history(self, domains):
api_name = 'opendns-whois-domain-history'
fmt_url_path = u'whois/{0}/history'
return self._multi_get(api_name, fmt_url_path, domains)
|
Calls WHOIS domain history end point
Args:
domains: An enumerable of domains
Returns:
A dict of {domain: domain_history_result}
|
codesearchnet
|
def copy(source_file_names, destination_file_names):
if len(source_file_names) == 0:
return
filesystem = FileSystems.get_filesystem(source_file_names[0])
return filesystem.copy(source_file_names, destination_file_names)
|
Recursively copy the file list from the source to the destination
Args:
source_file_names: list of source file objects that needs to be copied
destination_file_names: list of destination of the new object
Raises:
``BeamIOError``: if any of the copy operations fail
|
github-repos
|
def _is_in_targets(self, site, targets):
elems = self._get_elements(site)
for elem in elems:
if elem not in targets:
return False
return True
|
Test whether a site contains elements in the target list
Args:
site (Site): Site to assess
targets ([Element]) List of elements
Returns:
(boolean) Whether this site contains a certain list of elements
|
juraj-google-style
|
def __init__(self, max_attempts, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.max_attempts = max_attempts
|
Initializer.
Args:
max_attempts: Maximum number of attempts to make for this task,
inclusive. So 2 means try two times and then retire the task.
*args, **kwargs: Optional Exception arguments.
|
juraj-google-style
|
def simple_layer_stack(include_encdec_attention, num_layers=6, d_ff=2048, num_heads=8, d_kv=128, dropout_rate=0.1):
ret = []
for _ in xrange(num_layers):
ret.append(transformer_layers.SelfAttention(num_heads=num_heads, key_value_size=d_kv, attention_kwargs={'dropout_rate': dropout_rate}))
if include_encdec_attention:
ret.append(transformer_layers.EncDecAttention(num_heads=num_heads, key_value_size=d_kv, attention_kwargs={'dropout_rate': dropout_rate}))
ret.append(transformer_layers.DenseReluDense(hidden_size=d_ff, dropout_rate=dropout_rate))
return transformer.LayerStack(ret)
|
Create a layer stack.
Args:
include_encdec_attention: a boolean
num_layers: an integer
d_ff: an integer
num_heads: an integer
d_kv: an integer
dropout_rate: a float
Returns:
a LayerStack
|
codesearchnet
|
def generate_index(fn, cols=None, names=None, sep=' '):
assert (cols is not None), "'cols' was not set"
assert (names is not None), "'names' was not set"
assert (len(cols) == len(names))
(bgzip, open_func) = get_open_func(fn, return_fmt=True)
data = pd.read_csv(fn, sep=sep, engine='c', usecols=cols, names=names, compression=('gzip' if bgzip else None))
f = open_func(fn, 'rb')
data['seek'] = np.fromiter(_seek_generator(f), dtype=np.uint)[:(- 1)]
f.close()
write_index(get_index_fn(fn), data)
return data
|
Build a index for the given file.
Args:
fn (str): the name of the file.
cols (list): a list containing column to keep (as int).
names (list): the name corresponding to the column to keep (as str).
sep (str): the field separator.
Returns:
pandas.DataFrame: the index.
|
codesearchnet
|
def split(self, path):
path = path.strip()
if not path.startswith(S3FileSystem.S3_PREFIX):
raise ValueError('Path %r must be S3 path.' % path)
prefix_len = len(S3FileSystem.S3_PREFIX)
last_sep = path[prefix_len:].rfind('/')
if last_sep >= 0:
last_sep += prefix_len
if last_sep > 0:
return (path[:last_sep], path[last_sep + 1:])
elif last_sep < 0:
return (path, '')
else:
raise ValueError('Invalid path: %s' % path)
|
Splits the given path into two parts.
Splits the path into a pair (head, tail) such that tail contains the last
component of the path and head contains everything up to that.
Head will include the S3 prefix ('s3://').
Args:
path: path as a string
Returns:
a pair of path components as strings.
|
github-repos
|
def baby_names(max_length=15):
names = []
lengths = []
targets = []
with open(os.path.join(os.path.dirname(sys.modules[__name__].__file__),
'baby_names.csv'), 'rb') as f:
first = True
for l in csv.reader(f, delimiter=','):
if first:
first = False
continue
assert len(l) == 4, l
name = l[0]
if max_length < len(name):
raise ValueError('Max length is too small: %d > %d' %
(max_length, len(name)))
chars = [convert_to_int(c) for c in name]
names.append(chars + ([EOS] * (max_length - len(chars))))
lengths.append([len(name)])
values = [float(l[2]), float(l[3])]
if abs(sum(values) - 1) > 0.001:
raise ValueError('Each row must sum to 1: %s' % l)
targets.append(values)
return np.array(names), np.array(targets), np.array(lengths)
|
Opens the baby_names csv file and produces numpy array.
Args:
max_length: The maximum length, 15 was the longest name when this was
written. Short entries will be padded with the EOS marker.
Returns:
A numpy array of the names converted to ascii codes, the labels and an
array of lengths.
Raises:
ValueError: if max_length is too small.
|
juraj-google-style
|
def name(self, name):
self._data['name'] = name
request = self._base_request
request['name'] = name
return self._tc_requests.update(request, owner=self.owner)
|
Updates the security labels name.
Args:
name:
|
juraj-google-style
|
def vocabulary_size(self):
return self._lookup_layer.vocabulary_size()
|
Gets the current size of the layer's vocabulary.
Returns:
The integer size of the vocabulary, including optional
mask and OOV indices.
|
github-repos
|
def closed_by(self, **kwargs):
path = '%s/%s/closed_by' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs)
|
List merge requests that will close the issue when merged.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetErrot: If the merge requests could not be retrieved
Returns:
list: The list of merge requests.
|
juraj-google-style
|
def modify_user_power_levels(self, users=None, users_default=None):
try:
content = self.client.api.get_power_levels(self.room_id)
if users_default:
content["users_default"] = users_default
if users:
if "users" in content:
content["users"].update(users)
else:
content["users"] = users
for user, power_level in list(content["users"].items()):
if power_level is None:
del content["users"][user]
self.client.api.set_power_levels(self.room_id, content)
return True
except MatrixRequestError:
return False
|
Modify the power level for a subset of users
Args:
users(dict): Power levels to assign to specific users, in the form
{"@name0:host0": 10, "@name1:host1": 100, "@name3:host3", None}
A level of None causes the user to revert to the default level
as specified by users_default.
users_default(int): Default power level for users in the room
Returns:
True if successful, False if not
|
juraj-google-style
|
def parse_clnsig(acc, sig, revstat, transcripts):
clnsig_accsessions = []
if acc:
try:
acc = int(acc)
except ValueError:
pass
if isinstance(acc, int):
revstat_groups = []
if revstat:
revstat_groups = [rev.lstrip('_') for rev in revstat.split(',')]
sig_groups = []
if sig:
for significance in sig.split('/'):
splitted_word = significance.split('_')
sig_groups.append(' '.join(splitted_word[:2]))
for sign_term in sig_groups:
clnsig_accsessions.append({
'value': sign_term,
'accession': int(acc),
'revstat': ', '.join(revstat_groups),
})
else:
acc_groups = acc.split('|')
sig_groups = sig.split('|')
revstat_groups = revstat.split('|')
for acc_group, sig_group, revstat_group in zip(acc_groups, sig_groups, revstat_groups):
accessions = acc_group.split(',')
significances = sig_group.split(',')
revstats = revstat_group.split(',')
for accession, significance, revstat in zip(accessions, significances, revstats):
clnsig_accsessions.append({
'value': int(significance),
'accession': accession,
'revstat': revstat,
})
elif transcripts:
clnsig = set()
for transcript in transcripts:
for annotation in transcript.get('clinsig', []):
clnsig.add(annotation)
for annotation in clnsig:
clnsig_accsessions.append({'value': annotation})
return clnsig_accsessions
|
Get the clnsig information
Args:
acc(str): The clnsig accession number, raw from vcf
sig(str): The clnsig significance score, raw from vcf
revstat(str): The clnsig revstat, raw from vcf
transcripts(iterable(dict))
Returns:
clnsig_accsessions(list): A list with clnsig accessions
|
juraj-google-style
|
def angle(x, y):
dot = np.dot(x, y)
x_mod = np.linalg.norm(x)
y_mod = np.linalg.norm(y)
cos_angle = (dot / (x_mod * y_mod))
return np.degrees(np.arccos(cos_angle))
|
Calculate the angle between two vectors, in degrees.
Args:
x (np.array): one vector.
y (np.array): the other vector.
Returns:
(float): the angle between x and y in degrees.
|
codesearchnet
|
def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool) -> Tensor:
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat([torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype), causal_mask], axis=-1)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError('Wrong shape for input_ids (shape {}) or attention_mask (shape {})'.format(input_shape, attention_mask.shape))
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
|
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (`Tuple[int]`):
The shape of the input to the model.
device (`torch.device`):
The device of the input to the model.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
|
github-repos
|
def load(file_path, parse_line_fn):
vocabulary = []
embeddings = []
embeddings_dim = None
for line in tf.gfile.GFile(file_path):
token, embedding = parse_line_fn(line)
if not embeddings_dim:
embeddings_dim = len(embedding)
elif embeddings_dim != len(embedding):
raise ValueError(
"Inconsistent embedding dimension detected, %d != %d for token %s",
embeddings_dim, len(embedding), token)
vocabulary.append(token)
embeddings.append(embedding)
return vocabulary, np.array(embeddings)
|
Loads a text embedding into memory as a numpy matrix.
Args:
file_path: Path to the text embedding file.
parse_line_fn: callback function to parse each file line.
Returns:
A tuple of (list of vocabulary tokens, numpy matrix of embedding vectors).
Raises:
ValueError: if the data in the sstable is inconsistent.
|
juraj-google-style
|
def from_sr_code(code):
code = str(code)
proj4 = utils.crscode_to_string('sr-org', code, 'proj4')
crs = from_proj4(proj4)
return crs
|
Load crs object from sr-org code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The SR-ORG code as an integer.
Returns:
- A CS instance of the indicated type.
|
codesearchnet
|
def CompileFilter(self, filter_expression):
filter_parser = pfilter.BaseParser(filter_expression).Parse()
matcher = filter_parser.Compile(pfilter.PlasoAttributeFilterImplementation)
self._filter_expression = filter_expression
self._matcher = matcher
|
Compiles the filter expression.
The filter expression contains an object filter expression.
Args:
filter_expression (str): filter expression.
Raises:
ParseError: if the filter expression cannot be parsed.
|
juraj-google-style
|
def clear(self, color: Tuple[int, int, int]) -> None:
lib.TCOD_image_clear(self.image_c, color)
|
Fill this entire Image with color.
Args:
color (Union[Tuple[int, int, int], Sequence[int]]):
An (r, g, b) sequence or Color instance.
|
juraj-google-style
|
def _channel_flatten_input(x, data_format):
graph = ops.get_default_graph()
cache_key = (graph, x.ref(), data_format)
if cache_key not in _channel_flatten_input_cache:
x_shape = array_ops.shape(x)
neg_ones = constant_op.constant([-1], dtype=x_shape.dtype)
if data_format == b'NCHW':
order = [1, 0, 2, 3, 4]
shape = array_ops.concat([x_shape[1:2], neg_ones, x_shape[3:]], axis=0)
reverse_order = order
else:
order = [1, 2, 3, 0, 4]
shape = array_ops.concat([x_shape[1:4], neg_ones], axis=0)
reverse_order = [3, 0, 1, 2, 4]
x = array_ops.transpose(x, order)
reverse_shape = array_ops.shape(x)
x = array_ops.reshape(x, shape)
outputs = (x, reverse_order, reverse_shape)
_channel_flatten_input_cache[cache_key] = outputs
else:
outputs = _channel_flatten_input_cache[cache_key]
return outputs
|
Merge the stack dimension with the channel dimension.
If S is pfor's stacking dimension, then,
- for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose
should be cheap.
- for SNHWC, we transpose to NHWSC.
We then merge the S and C dimension.
Args:
x: tensor_lib.Tensor to transform.
data_format: "NCHW" or "NHWC".
Returns:
A 3-element tuple with the transformed value, along with the shape for
reshape and order for transpose required to transform back.
|
github-repos
|
def __extract_file(self, path, fileinfo, destination):
if 'offset' not in fileinfo:
self.__copy_extracted(path, destination)
return
self.asarfile.seek(
self.__absolute_offset(fileinfo['offset'])
)
contents = self.asarfile.read(
self.__absolute_offset(fileinfo['size'])
)
destination_path = os.path.join(destination, path)
with open(destination_path, 'wb') as fp:
fp.write(contents)
LOGGER.debug('Extracted %s to %s', path, destination_path)
|
Extracts the specified file to the specified destination.
Args:
path (str):
Relative (to the root of the archive) path of the
file to extract.
fileinfo (dict):
Dictionary containing the offset and size of the file
(Extracted from the header).
destination (str):
Directory to extract the archive to.
|
juraj-google-style
|
def get_help_usage(command):
if (not command):
doc = get_primary_command_usage()
elif (command in ('-a', '--all')):
subcommands = [k for k in settings.subcommands if (k is not None)]
available_commands = (subcommands + ['help'])
command_doc = '\nAvailable commands:\n{}\n'.format('\n'.join((' {}'.format(c) for c in sorted(available_commands))))
doc = get_primary_command_usage(command_doc)
elif command.startswith('-'):
raise ValueError("Unrecognized option '{}'.".format(command))
elif (command in settings.subcommands):
subcommand = settings.subcommands[command]
doc = format_usage(subcommand.__doc__)
docopt.docopt(doc, argv=('--help',))
|
Print out a help message and exit the program.
Args:
command: If a command value is supplied then print the help message for
the command module if available. If the command is '-a' or '--all',
then print the standard help message but with a full list of
available commands.
Raises:
ValueError: Raised if the help message is requested for an invalid
command or an unrecognized option is passed to help.
|
codesearchnet
|
def _buildTraitCovar(self, trait_covar_type='freeform', rank=1, fixed_trait_covar=None, jitter=0.0001):
assert (trait_covar_type in ['freeform', 'diag', 'lowrank', 'lowrank_id', 'lowrank_diag', 'block', 'block_id', 'block_diag', 'fixed']), 'VarianceDecomposition:: trait_covar_type not valid'
if (trait_covar_type == 'freeform'):
cov = FreeFormCov(self.P, jitter=jitter)
elif (trait_covar_type == 'fixed'):
assert (fixed_trait_covar is not None), 'VarianceDecomposition:: set fixed_trait_covar'
assert (fixed_trait_covar.shape[0] == self.P), 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
assert (fixed_trait_covar.shape[1] == self.P), 'VarianceDecomposition:: Incompatible shape for fixed_trait_covar'
cov = FixedCov(fixed_trait_covar)
elif (trait_covar_type == 'diag'):
cov = DiagonalCov(self.P)
elif (trait_covar_type == 'lowrank'):
cov = LowRankCov(self.P, rank=rank)
elif (trait_covar_type == 'lowrank_id'):
cov = SumCov(LowRankCov(self.P, rank=rank), FixedCov(sp.eye(self.P)))
elif (trait_covar_type == 'lowrank_diag'):
cov = SumCov(LowRankCov(self.P, rank=rank), DiagonalCov(self.P))
elif (trait_covar_type == 'block'):
cov = FixedCov(sp.ones([self.P, self.P]))
elif (trait_covar_type == 'block_id'):
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
elif (trait_covar_type == 'block_diag'):
cov1 = FixedCov(sp.ones([self.P, self.P]))
cov2 = FixedCov(sp.eye(self.P))
cov = SumCov(cov1, cov2)
return cov
|
Internal functions that builds the trait covariance matrix using the LIMIX framework
Args:
trait_covar_type: type of covaraince to use. Default 'freeform'. possible values are
rank: rank of a possible lowrank component (default 1)
fixed_trait_covar: PxP matrix for the (predefined) trait-to-trait covariance matrix if fixed type is used
jitter: diagonal contribution added to freeform covariance matrices for regularization
Returns:
LIMIX::Covariance for Trait covariance matrix
|
codesearchnet
|
def point_consensus(self, consensus_type):
if ('mean' in consensus_type):
consensus_data = np.mean(self.data, axis=0)
elif ('std' in consensus_type):
consensus_data = np.std(self.data, axis=0)
elif ('median' in consensus_type):
consensus_data = np.median(self.data, axis=0)
elif ('max' in consensus_type):
consensus_data = np.max(self.data, axis=0)
elif ('percentile' in consensus_type):
percentile = int(consensus_type.split('_')[1])
consensus_data = np.percentile(self.data, percentile, axis=0)
else:
consensus_data = np.zeros(self.data.shape[1:])
consensus = EnsembleConsensus(consensus_data, consensus_type, self.ensemble_name, self.run_date, self.variable, self.start_date, self.end_date, self.units)
return consensus
|
Calculate grid-point statistics across ensemble members.
Args:
consensus_type: mean, std, median, max, or percentile_nn
Returns:
EnsembleConsensus containing point statistic
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.