code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
async def update_notifications(self, on_match_open: bool = None, on_tournament_end: bool = None):
params = {}
if on_match_open is not None:
params['notify_users_when_matches_open'] = on_match_open
if on_tournament_end is not None:
params['notify_users_when_the_tournament_ends'] = on_tournament_end
assert_or_raise(len(params) > 0, ValueError, 'At least one of the notifications must be given')
await self.update(**params)
|
update participants notifications for this tournament
|methcoro|
Args:
on_match_open: Email registered Challonge participants when matches open up for them
on_tournament_end: Email registered Challonge participants the results when this tournament ends
Raises:
APIException
|
juraj-google-style
|
def roll_to_business_day(self, date_tensor, roll_convention):
if roll_convention == constants.BusinessDayConvention.NONE:
return date_tensor
ordinals = dt.convert_to_date_tensor(date_tensor).ordinal()
biz_days, is_bizday = self._to_biz_space(ordinals)
biz_days_rolled = self._apply_roll_biz_space(date_tensor, biz_days, is_bizday, roll_convention)
return dt.from_ordinals(self._from_biz_space(biz_days_rolled))
|
Rolls the given dates to business dates according to given convention.
Args:
date_tensor: `DateTensor` of dates to roll from.
roll_convention: BusinessDayConvention. Determines how to roll a date that
falls on a holiday.
Returns:
The resulting `DateTensor`.
|
github-repos
|
async def getTempCoreCmdr(mods=None, outp=None):
acm = genTempCoreProxy(mods)
prox = await acm.__aenter__()
cmdrcore = await CmdrCore.anit(prox, outp=outp)
cmdrcore.acm = acm
return cmdrcore
|
Get a CmdrCore instance which is backed by a temporary Cortex.
Args:
mods (list): A list of additional CoreModules to load in the Cortex.
outp: A output helper. Will be used for the Cmdr instance.
Notes:
The CmdrCore returned by this should be fini()'d to tear down the temporary Cortex.
Returns:
CmdrCore: A CmdrCore instance.
|
juraj-google-style
|
def _file_io_read_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment):
if (pipeline := test_spec.get('pipeline', None)):
for transform in pipeline.get('transforms', []):
if transform.get('type', '').startswith('ReadFrom'):
file_name = transform['config']['path'].split('/')[-1]
return replace_recursive(test_spec, transform['type'], 'path', env.input_file(file_name, INPUT_FILES[file_name]))
return test_spec
|
This preprocessor replaces any file IO ReadFrom transform with a Create
transform that reads from a predefined in-memory dictionary. This allows
the test to verify the pipeline's correctness without relying on external
files.
Args:
test_spec: The dictionary representation of the YAML pipeline specification.
expected: A list of strings representing the expected output of the
pipeline.
env: The TestEnvironment object providing utilities for creating temporary
files.
Returns:
The modified test_spec dictionary with ReadFrom transforms replaced.
|
github-repos
|
def insert_at_frontier(self, operations: ops.OP_TREE, start: int, frontier: Dict[(ops.Qid, int)]=None) -> Dict[(ops.Qid, int)]:
if (frontier is None):
frontier = defaultdict((lambda : 0))
operations = tuple(ops.flatten_op_tree(operations))
if (not operations):
return frontier
qubits = set((q for op in operations for q in op.qubits))
if any(((frontier[q] > start) for q in qubits)):
raise ValueError('The frontier for qubits on which the operationsto insert act cannot be after start.')
next_moments = self.next_moments_operating_on(qubits, start)
(insertion_indices, _) = self._pick_inserted_ops_moment_indices(operations, start, frontier)
self._push_frontier(frontier, next_moments)
self._insert_operations(operations, insertion_indices)
return frontier
|
Inserts operations inline at frontier.
Args:
operations: the operations to insert
start: the moment at which to start inserting the operations
frontier: frontier[q] is the earliest moment in which an operation
acting on qubit q can be placed.
|
codesearchnet
|
def rename(script, label='blank', layer_num=None):
filter_xml = ''.join([' <filter name="Rename Current Mesh">\n', ' <Param name="newName" ', 'value="{}" '.format(label), 'description="New Label" ', 'type="RichString" ', '/>\n', ' </filter>\n'])
if isinstance(script, mlx.FilterScript):
if ((layer_num is None) or (layer_num == script.current_layer())):
util.write_filter(script, filter_xml)
script.layer_stack[script.current_layer()] = label
else:
cur_layer = script.current_layer()
change(script, layer_num)
util.write_filter(script, filter_xml)
change(script, cur_layer)
script.layer_stack[layer_num] = label
else:
util.write_filter(script, filter_xml)
return None
|
Rename layer label
Can be useful for outputting mlp files, as the output file names use
the labels.
Args:
script: the mlx.FilterScript object or script filename to write
the filter to.
label (str): new label for the mesh layer
layer_num (int): layer number to rename. Default is the
current layer. Not supported on the file base API.
Layer stack:
Renames a layer
MeshLab versions:
2016.12
1.3.4BETA
|
codesearchnet
|
def range(self, dim, data_range=True, dimension_range=True):
iskdim = self.get_dimension(dim) not in self.vdims
return super(StatisticsElement, self).range(dim, iskdim, dimension_range)
|
Return the lower and upper bounds of values along dimension.
Args:
dimension: The dimension to compute the range on.
data_range (bool): Compute range from data values
dimension_range (bool): Include Dimension ranges
Whether to include Dimension range and soft_range
in range calculation
Returns:
Tuple containing the lower and upper bound
|
juraj-google-style
|
def _read_range(self, start, end=0):
if start >= self._size:
return bytes()
with _handle_oss_error():
response = self._bucket.get_object(key=self._key, headers=dict(
Range=self._http_range(
start, end if end <= self._size else self._size)))
return response.read()
|
Read a range of bytes in stream.
Args:
start (int): Start stream position.
end (int): End stream position.
0 To not specify end.
Returns:
bytes: number of bytes read
|
juraj-google-style
|
def add(self, username, user_api, filename=None):
keys = API.__get_keys(filename)
user = user_api.find(username)[0]
distinguished_name = user.entry_dn
if 'ldapPublicKey' not in user.objectClass:
raise ldap3.core.exceptions.LDAPNoSuchAttributeResult(
'LDAP Public Key Object Class not found. ' +
'Please ensure user was created correctly.')
else:
for key in list(set(keys)):
print(key)
try:
SSHKey(key).parse()
except Exception as err:
raise err from None
else:
operation = {'sshPublicKey': [(ldap3.MODIFY_ADD, [key])]}
self.client.modify(distinguished_name, operation)
|
Add SSH public key to a user's profile.
Args:
username: Username to attach SSH public key to
filename: Filename containing keys to add (optional)
Raises:
ldap3.core.exceptions.LDAPNoSuchAttributeResult:
ldapPublicKey isn't attached to objectClass
|
juraj-google-style
|
def conversations_invite(
self, *, channel: str, users: List[str], **kwargs
) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({"channel": channel, "users": users})
return self.api_call("conversations.invite", json=kwargs)
|
Invites users to a channel.
Args:
channel (str): The channel id. e.g. 'C1234567890'
users (list): An list of user id's to invite. e.g. ['U2345678901', 'U3456789012']
|
juraj-google-style
|
def set_weather_from_metar(
metar: typing.Union[Metar.Metar, str],
in_file: typing.Union[str, Path],
out_file: typing.Union[str, Path] = None
) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]:
error, metar = custom_metar.CustomMetar.get_metar(metar)
if error:
return error, None
if metar:
LOGGER.debug('METAR: %s', metar.code)
in_file = elib.path.ensure_file(in_file)
if out_file is None:
out_file = in_file
else:
out_file = elib.path.ensure_file(out_file, must_exist=False)
LOGGER.debug('applying metar: %s -> %s', in_file, out_file)
try:
LOGGER.debug('building MissionWeather')
_mission_weather = mission_weather.MissionWeather(metar)
with Miz(str(in_file)) as miz:
_mission_weather.apply_to_miz(miz)
miz.zip(str(out_file))
return None, f'successfully applied METAR to {in_file}'
except ValueError:
error = f'Unable to apply METAR string to the mission.\n' \
f'This is most likely due to a freak value, this feature is still experimental.\n' \
f'I will fix it ASAP !'
return error, None
|
Applies the weather from a METAR object to a MIZ file
Args:
metar: metar object
in_file: path to MIZ file
out_file: path to output MIZ file (will default to in_file)
Returns: tuple of error, success
|
juraj-google-style
|
def split(self, file):
with open(file, 'rb') as f:
for record in sagemaker.amazon.common.read_recordio(f):
yield record
|
Split a file into records using a specific strategy
This RecordIOSplitter splits the data into individual RecordIO records.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from the file
|
juraj-google-style
|
def siblings(self, as_resources=False):
siblings = set()
for parent in self.parents(as_resources=True):
for sibling in parent.children(as_resources=as_resources):
siblings.add(sibling)
if as_resources:
siblings.remove(self)
if not as_resources:
siblings.remove(self.uri)
return list(siblings)
|
method to return hierarchical siblings of this resource.
Args:
as_resources (bool): if True, opens each as appropriate resource type instead of return URI only
Returns:
(list): list of resources
|
juraj-google-style
|
class InstructBlipForConditionalGenerationModelOutput(ModelOutput):
loss: Optional[Tuple[torch.FloatTensor]] = None
logits: Optional[Tuple[torch.FloatTensor]] = None
vision_outputs: Optional[torch.FloatTensor] = None
qformer_outputs: Optional[Tuple[torch.FloatTensor]] = None
language_model_outputs: Optional[Tuple[torch.FloatTensor]] = None
def to_tuple(self) -> Tuple[Any]:
return tuple((self[k] if k not in ['vision_outputs', 'qformer_outputs', 'language_model_outputs'] else getattr(self, k).to_tuple() for k in self.keys()))
|
Class defining the outputs of [`InstructBlipForConditionalGeneration`].
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Language modeling loss from the language model.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head of the language model.
vision_outputs (`BaseModelOutputWithPooling`):
Outputs of the vision encoder.
qformer_outputs (`BaseModelOutputWithPoolingAndCrossAttentions`):
Outputs of the Q-Former (Querying Transformer).
language_model_outputs (`CausalLMOutputWithPast` or `Seq2SeqLMOutput`):
Outputs of the language model.
|
github-repos
|
def from_serializable(cls, obj):
if (obj.get('version', {'bqm_schema': '1.0.0'})['bqm_schema'] != '2.0.0'):
return cls._from_serializable_v1(obj)
variables = [(tuple(v) if isinstance(v, list) else v) for v in obj['variable_labels']]
if obj['use_bytes']:
ldata = bytes2array(obj['linear_biases'])
qdata = bytes2array(obj['quadratic_biases'])
irow = bytes2array(obj['quadratic_head'])
icol = bytes2array(obj['quadratic_tail'])
else:
ldata = obj['linear_biases']
qdata = obj['quadratic_biases']
irow = obj['quadratic_head']
icol = obj['quadratic_tail']
offset = obj['offset']
vartype = obj['variable_type']
bqm = cls.from_numpy_vectors(ldata, (irow, icol, qdata), offset, str(vartype), variable_order=variables)
bqm.info.update(obj['info'])
return bqm
|
Deserialize a binary quadratic model.
Args:
obj (dict):
A binary quadratic model serialized by :meth:`~.BinaryQuadraticModel.to_serializable`.
Returns:
:obj:`.BinaryQuadraticModel`
Examples:
Encode and decode using JSON
>>> import dimod
>>> import json
...
>>> bqm = dimod.BinaryQuadraticModel({'a': -1.0, 'b': 1.0}, {('a', 'b'): -1.0}, 0.0, dimod.SPIN)
>>> s = json.dumps(bqm.to_serializable())
>>> new_bqm = dimod.BinaryQuadraticModel.from_serializable(json.loads(s))
See also:
:meth:`~.BinaryQuadraticModel.to_serializable`
:func:`json.loads`, :func:`json.load` JSON deserialization functions
|
codesearchnet
|
def _parse_string(self, xml):
if (not isinstance(xml, HTMLElement)):
xml = dhtmlparser.parseString(str(xml))
record = xml.find('record')
if (not record):
raise ValueError('There is no <record> in your MARC XML document!')
record = record[0]
self.oai_marc = (len(record.find('oai_marc')) > 0)
if (not self.oai_marc):
leader = record.find('leader')
if (len(leader) >= 1):
self.leader = leader[0].getContent()
if self.oai_marc:
self._parse_control_fields(record.find('fixfield'), 'id')
self._parse_data_fields(record.find('varfield'), 'id', 'label')
else:
self._parse_control_fields(record.find('controlfield'), 'tag')
self._parse_data_fields(record.find('datafield'), 'tag', 'code')
if (self.oai_marc and ('LDR' in self.controlfields)):
self.leader = self.controlfields['LDR']
|
Parse MARC XML document to dicts, which are contained in
self.controlfields and self.datafields.
Args:
xml (str or HTMLElement): input data
Also detect if this is oai marc format or not (see elf.oai_marc).
|
codesearchnet
|
def column_type(self, agencyId: int, advertiserId: int, column: str) -> str:
if column not in self.columns:
for saved_column in API_SearchAds(self.config, self.auth, iterate=True).savedcolumns().list(agencyId=agencyId, advertiserId=advertiserId).execute():
self.columns[saved_column['savedColumnName']] = SA_TYPES.get(saved_column['type'], 'STRING')
return self.columns.get(column, 'STRING')
|
Return the column type for the given column name.
Intended mostly as an internl helper function but left open for convenience.
Leverages both saved columns and standard columns.
Does not distinguish saved from standard, will this be a problem?
Args:
agencyId - required only for saved columns, usually derived from report
advertiserid - required only for saved columns, usually derived from report
Returns:
Column type as defnined by BigQuery. Defaults to STRING if not found.
|
github-repos
|
def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):
pos_A_in_B = pose_A_in_B[:3, 3]
rot_A_in_B = pose_A_in_B[:3, :3]
skew_symm = _skew_symmetric_translation(pos_A_in_B)
force_B = rot_A_in_B.T.dot(force_A)
torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A)
return force_B, torque_B
|
Converts linear and rotational force at a point in frame A to the equivalent in frame B.
Args:
force_A: 3-dim iterable for linear force in A
torque_A: 3-dim iterable for rotational force (moment) in A
pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B
Returns:
force_B, torque_B: two numpy arrays of shape (3,) for the forces in B
|
juraj-google-style
|
def member_del(self, member_id, reconfig=True):
server_id = self._servers.host_to_server_id(
self.member_id_to_host(member_id))
if reconfig and member_id in [member['_id'] for member in self.members()]:
config = self.config
config['members'].pop(member_id)
self.repl_update(config)
self._servers.remove(server_id)
return True
|
remove member from replica set
Args:
member_id - member index
reconfig - is need reconfig replica
return True if operation success otherwise False
|
juraj-google-style
|
def verify_tree_consistency(self, old_tree_size: int, new_tree_size: int, old_root: bytes, new_root: bytes, proof: Sequence[bytes]):
old_size = old_tree_size
new_size = new_tree_size
if ((old_size < 0) or (new_size < 0)):
raise ValueError('Negative tree size')
if (old_size > new_size):
raise ValueError(('Older tree has bigger size (%d vs %d), did you supply inputs in the wrong order?' % (old_size, new_size)))
if (old_size == new_size):
if (old_root == new_root):
if proof:
logging.debug('Trees are identical, ignoring proof')
return True
else:
raise error.ConsistencyError('Inconsistency: different root hashes for the same tree size')
if (old_size == 0):
if proof:
logging.debug('Ignoring non-empty consistency proof for empty tree.')
return True
node = (old_size - 1)
last_node = (new_size - 1)
while (node % 2):
node
last_node
p = iter(proof)
try:
if node:
new_hash = old_hash = next(p)
else:
new_hash = old_hash = old_root
while node:
if (node % 2):
next_node = next(p)
old_hash = self.hasher.hash_children(next_node, old_hash)
new_hash = self.hasher.hash_children(next_node, new_hash)
elif (node < last_node):
new_hash = self.hasher.hash_children(new_hash, next(p))
node
last_node
while last_node:
n = next(p)
new_hash = self.hasher.hash_children(new_hash, n)
last_node
if (new_hash != new_root):
raise error.ProofError(('Bad Merkle proof: second root hash does not match. Expected hash: %s , computed hash: %s' % (hexlify(new_root).strip(), hexlify(new_hash).strip())))
elif (old_hash != old_root):
raise error.ConsistencyError(('Inconsistency: first root hash does not match. Expected hash: %s, computed hash: %s' % (hexlify(old_root).strip(), hexlify(old_hash).strip())))
except StopIteration:
raise error.ProofError('Merkle proof is too short')
try:
next(p)
except StopIteration:
pass
else:
logging.debug('Proof has extra nodes')
return True
|
Verify the consistency between two root hashes.
old_tree_size must be <= new_tree_size.
Args:
old_tree_size: size of the older tree.
new_tree_size: size of the newer_tree.
old_root: the root hash of the older tree.
new_root: the root hash of the newer tree.
proof: the consistency proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ConsistencyError: the proof indicates an inconsistency
(this is usually really serious!).
ProofError: the proof is invalid.
ValueError: supplied tree sizes are invalid.
|
codesearchnet
|
def _ParseFilterOptions(self, options):
names = ['artifact_filters', 'date_filters', 'filter_file']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=names)
extensions_string = self.ParseStringOption(options, 'extensions_string')
self._ParseExtensionsString(extensions_string)
names_string = getattr(options, 'names_string', None)
self._ParseNamesString(names_string)
signature_identifiers = getattr(options, 'signature_identifiers', None)
try:
self._ParseSignatureIdentifiers(
self._data_location, signature_identifiers)
except (IOError, ValueError) as exception:
raise errors.BadConfigOption(exception)
if self._artifact_filters or self._filter_file:
self.has_filters = True
else:
self.has_filters = self._filter_collection.HasFilters()
|
Parses the filter options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
|
juraj-google-style
|
def __init__(self, xid=None, flags=ConfigFlag.OFPC_FRAG_NORMAL,
miss_send_len=ControllerMaxLen.OFPCML_NO_BUFFER):
super().__init__(xid)
self.flags = flags
self.miss_send_len = miss_send_len
|
Create a SwitchConfig with the optional parameters below.
Args:
xid (int): xid to be used on the message header.
flags (ConfigFlag): OFPC_* flags.
miss_send_len (int): UBInt16 max bytes of new flow that the
datapath should send to the controller.
|
juraj-google-style
|
def __init__(self, root):
self._root_ref = root if isinstance(root, weakref.ref) else weakref.ref(root)
|
Configure the trackable view.
Args:
root: A `Trackable` object whose variables (including the variables of
dependencies, recursively) should be saved. May be a weak reference.
|
github-repos
|
def __call__(self, shape, dtype=None, **kwargs):
_validate_kwargs(self.__class__.__name__, kwargs)
dtype = _assert_float_dtype(_get_dtype(dtype))
scale = self.scale
fan_in, fan_out = _compute_fans(shape)
if _PARTITION_SHAPE in kwargs:
shape = kwargs[_PARTITION_SHAPE]
if self.mode == 'fan_in':
scale /= max(1.0, fan_in)
elif self.mode == 'fan_out':
scale /= max(1.0, fan_out)
else:
scale /= max(1.0, (fan_in + fan_out) / 2.0)
if self.distribution == 'truncated_normal':
stddev = math.sqrt(scale) / 0.8796256610342398
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == 'untruncated_normal':
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
|
Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported. If not specified, `tf.keras.backend.floatx()` is used, which
default to `float32` unless you configured it otherwise (via
`tf.keras.backend.set_floatx(float_dtype)`)
**kwargs: Additional keyword arguments.
|
github-repos
|
def ParseFileTransfer(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
query_hash = hash(query)
source_dict = cache.GetResults('source')
if not source_dict:
results = database.Query(self.QUERY_SOURCE_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'source', 'pk_id', ('skypeid', 'skypename'))
source_dict = cache.GetResults('source')
dest_dict = cache.GetResults('destination')
if not dest_dict:
results = database.Query(self.QUERY_DEST_FROM_TRANSFER)
cache.CacheQueryResults(
results, 'destination', 'parent_id', ('skypeid', 'skypename'))
dest_dict = cache.GetResults('destination')
source = 'Unknown'
destination = 'Unknown'
parent_id = self._GetRowValue(query_hash, row, 'parent_id')
partner_dispname = self._GetRowValue(query_hash, row, 'partner_dispname')
partner_handle = self._GetRowValue(query_hash, row, 'partner_handle')
if parent_id:
destination = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)
skype_id, skype_name = source_dict.get(parent_id, [None, None])
if skype_name:
source = '{0:s} <{1:s}>'.format(skype_id, skype_name)
else:
source = '{0:s} <{1:s}>'.format(partner_handle, partner_dispname)
pk_id = self._GetRowValue(query_hash, row, 'pk_id')
if pk_id:
skype_id, skype_name = dest_dict.get(pk_id, [None, None])
if skype_name:
destination = '{0:s} <{1:s}>'.format(skype_id, skype_name)
filename = self._GetRowValue(query_hash, row, 'filename')
filesize = self._GetRowValue(query_hash, row, 'filesize')
try:
file_size = int(filesize, 10)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'unable to convert file size: {0!s} of file: {1:s}'.format(
filesize, filename))
file_size = 0
event_data = SkypeTransferFileEventData()
event_data.destination = destination
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.source = source
event_data.transferred_filename = filename
event_data.transferred_filepath = self._GetRowValue(
query_hash, row, 'filepath')
event_data.transferred_filesize = file_size
status = self._GetRowValue(query_hash, row, 'status')
starttime = self._GetRowValue(query_hash, row, 'starttime')
if status == 2:
if starttime:
event_data.action_type = 'SENDSOLICITUDE'
date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
elif status == 8:
if starttime:
event_data.action_type = 'GETSOLICITUDE'
date_time = dfdatetime_posix_time.PosixTime(timestamp=starttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
accepttime = self._GetRowValue(query_hash, row, 'accepttime')
if accepttime:
event_data.action_type = 'ACCEPTED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=accepttime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
finishtime = self._GetRowValue(query_hash, row, 'finishtime')
if finishtime:
event_data.action_type = 'FINISHED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=finishtime)
event = time_events.DateTimeValuesEvent(
date_time, 'File transfer from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a file transfer.
There is no direct relationship between who sends the file and
who accepts the file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
cache (Optional[SQLiteCache]): cache.
database (Optional[SQLiteDatabase]): database.
|
juraj-google-style
|
def dzip(items1, items2, cls=dict):
try:
len(items1)
except TypeError:
items1 = list(items1)
try:
len(items2)
except TypeError:
items2 = list(items2)
if ((len(items1) == 0) and (len(items2) == 1)):
items2 = []
if ((len(items2) == 1) and (len(items1) > 1)):
items2 = (items2 * len(items1))
if (len(items1) != len(items2)):
raise ValueError(('out of alignment len(items1)=%r, len(items2)=%r' % (len(items1), len(items2))))
return cls(zip(items1, items2))
|
Zips elementwise pairs between items1 and items2 into a dictionary. Values
from items2 can be broadcast onto items1.
Args:
items1 (Iterable): full sequence
items2 (Iterable): can either be a sequence of one item or a sequence
of equal length to `items1`
cls (Type[dict]): dictionary type to use. Defaults to dict, but could
be ordered dict instead.
Returns:
dict: similar to dict(zip(items1, items2))
Example:
>>> assert dzip([1, 2, 3], [4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([1, 2, 3], [4, 4, 4]) == {1: 4, 2: 4, 3: 4}
>>> assert dzip([], [4]) == {}
|
codesearchnet
|
def sample(self, num_rows):
sampled_values = []
for i in range(num_rows):
sampled_values.append(self._sample_row())
return pd.DataFrame(sampled_values, columns=self.columns)
|
Sample new rows.
Args:
num_rows(int): Number of rows to sample
Returns:
pandas.DataFrame
|
juraj-google-style
|
class BundleFactory(object):
def __init__(self, stacked: bool) -> None:
self._stacked = stacked
def create_bundle(self, output_pcollection: Union[pvalue.PBegin, pvalue.PCollection]) -> '_Bundle':
return _Bundle(output_pcollection, self._stacked)
def create_empty_committed_bundle(self, output_pcollection: Union[pvalue.PBegin, pvalue.PCollection]) -> '_Bundle':
bundle = self.create_bundle(output_pcollection)
bundle.commit(None)
return bundle
|
For internal use only; no backwards-compatibility guarantees.
BundleFactory creates output bundles to be used by transform evaluators.
Args:
stacked: whether or not to stack the WindowedValues within the bundle
in case consecutive ones share the same timestamp and windows.
DirectRunnerOptions.direct_runner_use_stacked_bundle controls this option.
|
github-repos
|
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if tf.compat.dimension_value(input_shape[-1]) is None:
raise ValueError(
'The innermost dimension of `input_shape` must be defined, '
'but saw: {}'.format(input_shape))
return input_shape[:-1].concatenate(self.units)
|
Computes the output shape of the layer.
Args:
input_shape: Shape tuple (tuple of integers) or list of shape tuples
(one per output tensor of the layer). Shape tuples can include None for
free dimensions, instead of an integer.
Returns:
output_shape: A tuple representing the output shape.
Raises:
ValueError: If innermost dimension of `input_shape` is not defined.
|
juraj-google-style
|
def experimental_local_results(self, value):
return super(OneDeviceStrategy, self).experimental_local_results(value)
|
Returns the list of all local per-replica values contained in `value`.
In `OneDeviceStrategy`, the `value` is always expected to be a single
value, so the result is just the value in a tuple.
Args:
value: A value returned by `experimental_run()`, `run()`,
`extended.call_for_each_replica()`, or a variable created in `scope`.
Returns:
A tuple of values contained in `value`. If `value` represents a single
value, this returns `(value,).`
|
github-repos
|
def list(self, pattern='*'):
if (self._group_dict is None):
self._group_dict = collections.OrderedDict(((group.id, group) for group in self._client.list_groups()))
return [group for group in self._group_dict.values() if fnmatch.fnmatch(group.display_name, pattern)]
|
Returns a list of groups that match the filters.
Args:
pattern: An optional pattern to filter the groups based on their display
name. This can include Unix shell-style wildcards. E.g.
``"Production*"``.
Returns:
A list of Group objects that match the filters.
|
codesearchnet
|
def one_step(self, current_state, previous_kernel_results):
with tf.compat.v1.name_scope(name=mcmc_util.make_name(self.name, 'slice', 'one_step'), values=[self.step_size, self.max_doublings, self._seed_stream, current_state, previous_kernel_results.target_log_prob]):
with tf.compat.v1.name_scope('initialize'):
[current_state_parts, step_sizes, current_target_log_prob] = _prepare_args(self.target_log_prob_fn, current_state, self.step_size, previous_kernel_results.target_log_prob, maybe_expand=True)
max_doublings = tf.convert_to_tensor(value=self.max_doublings, dtype=tf.int32, name='max_doublings')
independent_chain_ndims = distribution_util.prefer_static_rank(current_target_log_prob)
[next_state_parts, next_target_log_prob, bounds_satisfied, direction, upper_bounds, lower_bounds] = _sample_next(self.target_log_prob_fn, current_state_parts, step_sizes, max_doublings, current_target_log_prob, independent_chain_ndims, seed=self._seed_stream())
def maybe_flatten(x):
return (x if mcmc_util.is_list_like(current_state) else x[0])
return [maybe_flatten(next_state_parts), SliceSamplerKernelResults(target_log_prob=next_target_log_prob, bounds_satisfied=bounds_satisfied, direction=direction, upper_bounds=upper_bounds, lower_bounds=lower_bounds)]
|
Runs one iteration of Slice Sampler.
Args:
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions
index independent chains,
`r = tf.rank(target_log_prob_fn(*current_state))`.
previous_kernel_results: `collections.namedtuple` containing `Tensor`s
representing values from previous calls to this function (or from the
`bootstrap_results` function.)
Returns:
next_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) after taking exactly one step. Has same type and
shape as `current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
TypeError: if `not target_log_prob.dtype.is_floating`.
|
codesearchnet
|
def _base_query(self, session):
return session.query(ORMTargetMarker).filter((ORMTargetMarker.name == self.name)).filter((ORMTargetMarker.params == self.params))
|
Base query for a target.
Args:
session: database session to query in
|
codesearchnet
|
def get_scaled_loss(self, loss):
if callable(loss):
def new_loss():
loss_val = loss()
return loss_val * math_ops.cast(self.loss_scale, loss_val.dtype)
return new_loss
else:
return loss * math_ops.cast(self.loss_scale, loss.dtype)
|
Scales the loss by the loss scale.
This method is only needed if you compute gradients manually, e.g. with
`tf.GradientTape`. In that case, call this method to scale the loss before
passing the loss to `tf.GradientTape`. If you use
`LossScaleOptimizer.minimize` or `LossScaleOptimizer.get_gradients`, loss
scaling is automatically applied and this method is unneeded.
If this method is called, `get_unscaled_gradients` should also be called.
See the `tf.keras.mixed_precision.LossScaleOptimizer` doc for
an example.
Args:
loss: The loss, which will be multiplied by the loss scale. Can either be
a tensor or a callable returning a tensor.
Returns:
`loss` multiplied by `LossScaleOptimizer.loss_scale`.
|
github-repos
|
def _GenOpenApiSpecCallback(args, openapi_func=_GenOpenApiSpec):
openapi_paths = openapi_func(args.service, args.output,
hostname=args.hostname,
application_path=args.application,
x_google_api_name=args.x_google_api_name)
for openapi_path in openapi_paths:
print 'OpenAPI spec written to %s' % openapi_path
|
Generate OpenAPI (Swagger) specs to files.
Args:
args: An argparse.Namespace object to extract parameters from
openapi_func: A function that generates OpenAPI specs and stores them to
files, accepting a list of service names and an output directory.
|
juraj-google-style
|
def list_workers(config, *, filter_by_queues=None):
celery_app = create_app(config)
worker_stats = celery_app.control.inspect().stats()
queue_stats = celery_app.control.inspect().active_queues()
if (worker_stats is None):
return []
workers = []
for (name, w_stat) in worker_stats.items():
queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]
add_worker = (filter_by_queues is None)
if (not add_worker):
for queue in queues:
if (queue.name in filter_by_queues):
add_worker = True
break
if add_worker:
workers.append(WorkerStats.from_celery(name, w_stat, queues))
return workers
|
Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
|
codesearchnet
|
def destroy(self):
if (not self._is_live()):
raise RuntimeError('A unit must be submitted to fleet before it can destroyed.')
return self._client.destroy_unit(self.name)
|
Remove a unit from the fleet cluster
Returns:
True: The unit was removed
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
|
codesearchnet
|
def _tp__get_typed_properties(self):
try:
return tuple((getattr(self, p) for p in self._tp__typed_properties))
except AttributeError:
raise NotImplementedError
|
Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
|
codesearchnet
|
def create(self, master_course_id, coach_email, max_students_allowed, title, modules=None):
payload = {'master_course_id': master_course_id, 'coach_email': coach_email, 'max_students_allowed': max_students_allowed, 'display_name': title}
if (modules is not None):
payload['course_modules'] = modules
resp = self.requester.post(parse.urljoin(self.base_url, '/api/ccx/v0/ccx/'), json=payload)
try:
resp.raise_for_status()
except:
log.error(resp.json())
raise
return resp.json()['ccx_course_id']
|
Creates a CCX
Args:
master_course_id (str): edx course id of the master course
coach_email (str): email of the user to make a coach. This user must exist on edx.
max_students_allowed (int): Maximum number of students to allow in this ccx.
title (str): Title of the CCX to be created
modules (optional list): A list of locator_ids (str) for the modules to enable.
Returns:
ccx_id (str): The ID of the ccx.
|
codesearchnet
|
def parse_name(name):
bucket = None
obj = None
m = re.match(_STORAGE_NAME, name)
if m:
bucket = m.group(1)
obj = m.group(2)
if obj is not None:
obj = obj[1:]
else:
m = re.match('(' + _OBJECT_NAME + ')', name)
if m:
obj = m.group(1)
return bucket, obj
|
Parse a gs:// URL into the bucket and object names.
Args:
name: a GCS URL of the form gs://bucket or gs://bucket/object
Returns:
The bucket name (with no gs:// prefix), and the object name if present. If the name
could not be parsed returns None for both.
|
juraj-google-style
|
def start_listing(self, request: Request) -> ListingResponse:
if (self._session_state != SessionState.ready):
raise RuntimeError('Session not ready')
response = ListingResponse()
(yield from self._prepare_fetch(request, response))
(yield from self._open_data_stream())
mlsd_command = Command('MLSD', self._request.file_path)
list_command = Command('LIST', self._request.file_path)
try:
(yield from self._begin_stream(mlsd_command))
self._listing_type = 'mlsd'
except FTPServerError as error:
if (error.reply_code in (ReplyCodes.syntax_error_command_unrecognized, ReplyCodes.command_not_implemented)):
self._listing_type = None
else:
raise
if (not self._listing_type):
(yield from self._begin_stream(list_command))
self._listing_type = 'list'
_logger.debug('Listing type is %s', self._listing_type)
self._session_state = SessionState.directory_request_sent
return response
|
Fetch a file listing.
Args:
request: Request.
Returns:
A listing response populated with the initial data connection
reply.
Once the response is received, call :meth:`download_listing`.
Coroutine.
|
codesearchnet
|
def get_version_string(version):
version_len = len(version)
if version_len == 3:
version_string = '%d.%d.%d' % version
elif version_len == 4:
version_string = '%d.%d.%d-%s' % version
else:
raise Exception(
'Version tuple is non-semver-compliant {} length!'.format(version_len)
)
return version_string
|
Translate a version tuple into a string.
Specify the __version__ as a tuple for more precise comparisons, and
translate it to __version_string__ for when that's needed.
This function exists primarily for easier unit testing.
Args:
version (Tuple[int, int, int, str]): three ints and an optional string.
Returns:
version_string (str): the tuple translated into a string per semver.org
|
juraj-google-style
|
def _summary(self, name, tensor):
if tensor.shape.ndims == 0:
return tf.summary.scalar(name, tensor)
else:
return tf.summary.histogram(name, tensor)
|
Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
|
juraj-google-style
|
def __init__(self, debug=False):
self.debug = debug
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='instance-setup', debug=self.debug, facility=facility)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
self.metadata_dict = None
self.instance_config = instance_config.InstanceConfig(logger=self.logger)
if self.instance_config.GetOptionBool('InstanceSetup', 'network_enabled'):
self.metadata_dict = self.watcher.GetMetadata()
instance_config_metadata = self._GetInstanceConfig()
self.instance_config = instance_config.InstanceConfig(
logger=self.logger, instance_config_metadata=instance_config_metadata)
if self.instance_config.GetOptionBool('InstanceSetup', 'set_host_keys'):
host_key_types = self.instance_config.GetOptionString(
'InstanceSetup', 'host_key_types')
self._SetSshHostKeys(host_key_types=host_key_types)
if self.instance_config.GetOptionBool('InstanceSetup', 'set_boto_config'):
self._SetupBotoConfig()
if self.instance_config.GetOptionBool(
'InstanceSetup', 'optimize_local_ssd'):
self._RunScript('google_optimize_local_ssd')
if self.instance_config.GetOptionBool('InstanceSetup', 'set_multiqueue'):
self._RunScript('google_set_multiqueue')
try:
self.instance_config.WriteConfig()
except (IOError, OSError) as e:
self.logger.warning(str(e))
|
Constructor.
Args:
debug: bool, True if debug output should write to the console.
|
juraj-google-style
|
def _normalize_angle(angle, range, step):
while (angle <= range[0]):
angle += step
while (angle >= range[1]):
angle -= step
return angle
|
Finds an angle that matches the given one modulo step.
Increments and decrements the given value with a given step.
Args:
range: a 2-tuple of min and max target values.
step: tuning step.
Returns:
Normalized value within a given range.
|
codesearchnet
|
def __rfloordiv__(self, other):
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value
|
Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
|
juraj-google-style
|
def to_tensor(pic):
if (not (_is_pil_image(pic) or _is_numpy_image(pic))):
raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
if (pic.ndim == 2):
pic = pic[(:, :, None)]
img = torch.from_numpy(pic.transpose((2, 0, 1)))
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
if ((accimage is not None) and isinstance(pic, accimage.Image)):
nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
pic.copyto(nppic)
return torch.from_numpy(nppic)
if (pic.mode == 'I'):
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif (pic.mode == 'I;16'):
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
elif (pic.mode == 'F'):
img = torch.from_numpy(np.array(pic, np.float32, copy=False))
elif (pic.mode == '1'):
img = (255 * torch.from_numpy(np.array(pic, np.uint8, copy=False)))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if (pic.mode == 'YCbCr'):
nchannel = 3
elif (pic.mode == 'I;16'):
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
See ``ToTensor`` for more details.
Args:
pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Returns:
Tensor: Converted image.
|
codesearchnet
|
def residual_block(cnn, depth, stride, pre_activation):
input_layer = cnn.top_layer
in_size = cnn.top_size
if (in_size != depth):
shortcut = cnn.apool(1, 1, stride, stride, input_layer=input_layer, num_channels_in=in_size)
padding = ((depth - in_size)
if (cnn.channel_pos == 'channels_last'):
shortcut = tf.pad(shortcut, [[0, 0], [0, 0], [0, 0], [padding, padding]])
else:
shortcut = tf.pad(shortcut, [[0, 0], [padding, padding], [0, 0], [0, 0]])
else:
shortcut = input_layer
if pre_activation:
res = cnn.batch_norm(input_layer)
res = tf.nn.relu(res)
else:
res = input_layer
cnn.conv(depth, 3, 3, stride, stride, input_layer=res, num_channels_in=in_size, use_batch_norm=True, bias=None)
if pre_activation:
res = cnn.conv(depth, 3, 3, 1, 1, activation=None, use_batch_norm=False, bias=None)
output = (shortcut + res)
else:
res = cnn.conv(depth, 3, 3, 1, 1, activation=None, use_batch_norm=True, bias=None)
output = tf.nn.relu((shortcut + res))
cnn.top_layer = output
cnn.top_size = depth
|
Residual block with identity short-cut.
Args:
cnn: the network to append residual blocks.
depth: the number of output filters for this residual block.
stride: Stride used in the first layer of the residual block.
pre_activation: use pre_activation structure or not.
|
codesearchnet
|
def __init__(self, key_uri_supplier):
self._key_uri_supplier = key_uri_supplier
self._jwks_cache = cache.make_region().configure(
u"dogpile.cache.memory", expiration_time=datetime.timedelta(minutes=5))
|
Constructs an instance of JwksSupplier.
Args:
key_uri_supplier: a KeyUriSupplier instance that returns the `jwks_uri`
based on the given issuer.
|
juraj-google-style
|
def MakePmfFromDict(d, name=''):
pmf = Pmf(d, name)
pmf.Normalize()
return pmf
|
Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
name: string name for this PMF
Returns:
Pmf object
|
codesearchnet
|
def __init__(self, env, actions):
super(BinarySpaceToDiscreteSpaceEnv, self).__init__(env)
self.action_space = gym.spaces.Discrete(len(actions))
self._action_map = {}
self._action_meanings = {}
for action, button_list in enumerate(actions):
byte_action = 0
for button in button_list:
byte_action |= self._button_map[button]
self._action_map[action] = byte_action
self._action_meanings[action] = ' '.join(button_list)
|
Initialize a new binary to discrete action space wrapper.
Args:
env (gym.Env): the environment to wrap
actions (list): an ordered list of actions (as lists of buttons).
The index of each button list is its discrete coded value
Returns:
None
|
juraj-google-style
|
def relative_probability(self, l1, l2, c1, c2):
if self.site_energies:
site_delta_E = (self.site_energies[l2] - self.site_energies[l1])
else:
site_delta_E = 0.0
if self.nn_energy:
delta_nn = ((c2 - c1) - 1)
site_delta_E += (delta_nn * self.nn_energy)
return metropolis(site_delta_E)
|
The relative probability for a jump between two sites with specific site types and coordination numbers.
Args:
l1 (Str): Site label for the initial site.
l2 (Str): Site label for the final site.
c1 (Int): Coordination number for the initial site.
c2 (Int): Coordination number for the final site.
Returns:
(Float): The relative probability of this jump occurring.
|
codesearchnet
|
def total_purge_developed_repo(repodir):
assert (repodir is not None)
import utool as ut
import os
repo = ut.util_git.Repo(dpath=repodir)
user = os.environ['USER']
fmtdict = dict(user=user, modname=repo.modname, reponame=repo.reponame, dpath=repo.dpath, global_site_pkgs=ut.get_global_dist_packages_dir(), local_site_pkgs=ut.get_local_dist_packages_dir(), venv_site_pkgs=ut.get_site_packages_dir())
commands = [_.format(**fmtdict) for _ in ['pip uninstall {modname}', 'sudo -H pip uninstall {modname}', 'sudo pip uninstall {modname}', 'easy_install -m {modname}', 'cd {dpath} && python setup.py develop --uninstall', 'sudo chown -R {user}:{user} {dpath}']]
print('Normal uninstall commands')
print('\n'.join(commands))
possible_link_paths = [_.format(**fmtdict) for _ in ['{dpath}/{modname}.egg-info', '{dpath}/build', '{venv_site_pkgs}/{reponame}.egg-info', '{local_site_pkgs}/{reponame}.egg-info', '{venv_site_pkgs}/{reponame}.egg-info']]
from os.path import exists, basename
existing_link_paths = [path for path in possible_link_paths]
print('
for path in existing_link_paths:
if exists(path):
if (ut.get_file_info(path)['owner'] != user):
print('sudo /bin/rm -rf {path}'.format(path=path))
else:
print('/bin/rm -rf {path}'.format(path=path))
print('
easyinstall_paths = [_.format(**fmtdict) for _ in ['{venv_site_pkgs}/easy-install.pth', '{local_site_pkgs}/easy-install.pth', '{venv_site_pkgs}/easy-install.pth']]
for path in easyinstall_paths:
if exists(path):
easy_install_list = ut.readfrom(path, verbose=False).strip().split('\n')
easy_install_list_ = [basename(p) for p in easy_install_list]
index1 = ut.listfind(easy_install_list_, repo.reponame)
index2 = ut.listfind(easy_install_list_, repo.modname)
if ((index1 is not None) or (index2 is not None)):
print(('Found at index1=%r, index=%r' % (index1, index2)))
if (ut.get_file_info(path)['owner'] != user):
print('sudo gvim {path}'.format(path=path))
else:
print('gvim {path}'.format(path=path))
checkcmds = [_.format(**fmtdict) for _ in ['python -c "import {modname}; print({modname}.__file__)"']]
import sys
assert (repo.modname not in sys.modules)
print('
for cmd in checkcmds:
print(cmd)
|
r"""
Outputs commands to help purge a repo
Args:
repodir (str): path to developed repository
CommandLine:
python -m utool.util_sysreq total_purge_installed_repo --show
Ignore:
repodir = ut.truepath('~/code/Lasagne')
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_sysreq import * # NOQA
>>> import utool as ut
>>> repodir = ut.get_argval('--repodir', default=None)
>>> result = total_purge_installed_repo(repodir)
|
codesearchnet
|
def update_subscription(self, *, subscription_id, credit_card_token):
payload = {'creditCardToken': credit_card_token}
fmt = 'subscriptions/{}'.format(subscription_id)
return self.client._put((self.url + fmt), json=payload, headers=self.get_headers())
|
Update information associated with the specified subscription. At the moment it is only possible
to update the token of the credit card to which the charge of the subscription is made.
Args:
subscription_id: Identification of the subscription.
credit_card_token:
Returns:
|
codesearchnet
|
def add_event(self, event):
if not self._closed:
event_pb = event.SerializeToString()
self._session.run(self._add_event_op, feed_dict={self._event_placeholder: event_pb})
|
Adds an event to the event file.
Args:
event: An `Event` protocol buffer.
|
github-repos
|
def get_canonical_import(import_set):
import_list = sorted(import_set, key=lambda imp_and_priority: (-imp_and_priority[1], imp_and_priority[0]))
return import_list[0][0]
|
Obtain one single import from a set of possible sources of a symbol.
One symbol might come from multiple places as it is being imported and
reexported. To simplify API changes, we always use the same import for the
same module, and give preference based on higher priority and alphabetical
ordering.
Args:
import_set: (set) Imports providing the same symbol. This is a set of tuples
in the form (import, priority). We want to pick an import with highest
priority.
Returns:
A module name to import
|
github-repos
|
def run_query_series(queries, conn):
results = []
for item in queries:
qry = item
kwargs = {}
if isinstance(item, tuple):
qry = item[0]
kwargs = item[1]
result = conn.update_query(qry, **kwargs)
results.append(result)
return results
|
Iterates through a list of queries and runs them through the connection
Args:
-----
queries: list of strings or tuples containing (query_string, kwargs)
conn: the triplestore connection to use
|
juraj-google-style
|
def GetMapLocation(self):
raise NotImplementedError('%s must implement this method!' % self.__class__.__name__)
|
Return the location of the Map in this cache.
This is used by automount maps so far, and must be implemented in the
child class only if it is to support automount maps.
Raises:
NotImplementedError: We should have been implemented by child.
|
github-repos
|
def get_sequence_sliding_window_properties(self, scale, window, representative_only=True):
if representative_only:
if not self.representative_sequence:
log.warning('{}: no representative sequence set, cannot get sequence properties'.format(self.id))
return
if not self.representative_sequence.seq:
log.warning('{}: representative sequence {} set, but no sequence stored. '
'Cannot get sequence properties.'.format(self.id, self.representative_sequence.id))
return
self.representative_sequence.get_sliding_window_properties(scale=scale, window=window)
if not representative_only:
for s in self.sequences:
if not s.seq:
log.warning('{}: no sequence stored. '
'Cannot get sequence properties.'.format(s.id))
continue
else:
s.get_sliding_window_properties(scale=scale, window=window)
|
Run Biopython ProteinAnalysis with a sliding window to calculate a given property.
Results are stored in the protein's respective SeqProp objects at ``.letter_annotations``
Args:
scale (str): Scale name
window (int): Sliding window size
representative_only (bool): If analysis should only be run on the representative sequence
|
juraj-google-style
|
def reserve_ids(self, token, channel, quantity):
quantity = str(quantity)
url = self.url("{}/{}/reserve/{}/".format(token, channel, quantity))
req = self.remote_utils.get_url(url)
if req.status_code is not 200:
raise RemoteDataNotFoundError('Invalid req: ' + req.status_code)
out = req.json()
return [out[0] + i for i in range(out[1])]
|
Requests a list of next-available-IDs from the server.
Arguments:
quantity (int): The number of IDs to reserve
Returns:
int[quantity]: List of IDs you've been granted
|
juraj-google-style
|
def match_urls_to_resources(self, url_values):
valid_values = {}
for resource in self.Meta.related_resources:
for k, v in url_values.items():
resource_url = resource.get_resource_url(
resource, resource.Meta.base_url)
if isinstance(v, list):
if all([resource_url in i for i in v]):
self.set_related_method(resource, v)
valid_values[k] = v
elif resource_url in v:
self.set_related_method(resource, v)
valid_values[k] = v
return valid_values
|
For the list of valid URLs, try and match them up
to resources in the related_resources attribute.
Args:
url_values: A dictionary of keys and URL strings that
could be related resources.
Returns:
valid_values: The values that are valid
|
juraj-google-style
|
def Get(self, request, global_params=None):
config = self.GetMethodConfig('Get')
return self._RunMethod(config, request, global_params=global_params)
|
Gets the specified routine resource by routine ID.
Args:
request: (BigqueryRoutinesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Routine) The response message.
|
github-repos
|
def SetUseSSL(self, use_ssl):
self._use_ssl = use_ssl
logger.debug('Elasticsearch use_ssl: {0!s}'.format(use_ssl))
|
Sets the use of ssl.
Args:
use_ssl (bool): enforces use of ssl.
|
juraj-google-style
|
def set_flat(self, new_weights):
self._check_sess()
shapes = [v.get_shape().as_list() for v in self.variables.values()]
arrays = unflatten(new_weights, shapes)
placeholders = [
self.placeholders[k] for k, v in self.variables.items()
]
self.sess.run(
list(self.assignment_nodes.values()),
feed_dict=dict(zip(placeholders, arrays)))
|
Sets the weights to new_weights, converting from a flat array.
Note:
You can only set all weights in the network using this function,
i.e., the length of the array must match get_flat_size.
Args:
new_weights (np.ndarray): Flat array containing weights.
|
juraj-google-style
|
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
if self.is_requestable():
self._check_required_fields('dataset-requestable', ignore_fields)
else:
self._check_required_fields('dataset', ignore_fields)
if ((len(self.resources) == 0) and (not allow_no_resources)):
raise HDXError('There are no resources! Please add at least one resource!')
for resource in self.resources:
ignore_fields = ['package_id']
resource.check_required_fields(ignore_fields=ignore_fields)
|
Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None
|
codesearchnet
|
def get_colour_handler(extranames: List[str]=None, with_process_id: bool=False, with_thread_id: bool=False, stream: TextIO=None) -> logging.StreamHandler:
fmt = '%(white)s%(asctime)s.%(msecs)03d'
if (with_process_id or with_thread_id):
procinfo = []
if with_process_id:
procinfo.append('p%(process)d')
if with_thread_id:
procinfo.append('t%(thread)d')
fmt += ' [{}]'.format('.'.join(procinfo))
extras = ((':' + ':'.join(extranames)) if extranames else '')
fmt += ' %(name)s{extras}:%(levelname)s: '.format(extras=extras)
fmt += '%(reset)s%(log_color)s%(message)s'
cf = ColoredFormatter(fmt, datefmt=LOG_DATEFMT, reset=True, log_colors=LOG_COLORS, secondary_log_colors={}, style='%')
ch = logging.StreamHandler(stream)
ch.setFormatter(cf)
return ch
|
Gets a colour log handler using a standard format.
Args:
extranames: additional names to append to the logger's name
with_process_id: include the process ID in the logger's name?
with_thread_id: include the thread ID in the logger's name?
stream: ``TextIO`` stream to send log output to
Returns:
the :class:`logging.StreamHandler`
|
codesearchnet
|
def grid_destroy_from_name(job_name):
jobs = grid_reload_from_name(job_name)
for job in jobs:
job.delete()
logger.info(('Killing the job (%s, %s)' % (job.site, job.uid)))
|
Destroy all the jobs with a given name.
Args:
job_name (str): the job name
|
codesearchnet
|
def __init__(self, message, error_list, launched_job):
super(JobError, self).__init__(message)
self.message = message
self.error_list = error_list
self.launched_job = launched_job
|
Create a JobError to indicate something went wrong.
Args:
message: user-friendly message
error_list: what went wrong
launched_job: if the job is launched, but has errors in
"--wait"ing on the tasks.
|
juraj-google-style
|
def handle_worker_messages(self, timeout):
msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20)
for msg in msgs:
self.handle_single_message(msg)
|
Read messages that are placed in self.incoming_mailbox,
and then update the job states corresponding to each message.
Args:
timeout: How long to wait for an incoming message, if the mailbox is empty right now.
Returns: None
|
juraj-google-style
|
def condensed(network, state):
result = []
covered_nodes = set()
for c in reversed(sorted(complexes(network, state))):
if not any(n in covered_nodes for n in c.subsystem.node_indices):
result.append(c)
covered_nodes = covered_nodes | set(c.subsystem.node_indices)
return result
|
Return a list of maximal non-overlapping complexes.
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Returns:
list[SystemIrreducibilityAnalysis]: A list of |SIA| for non-overlapping
complexes with maximal |big_phi| values.
|
juraj-google-style
|
def enable(self, key_id, **kwargs):
path = ('%s/%s/enable' % (self.path, key_id))
self.gitlab.http_post(path, **kwargs)
|
Enable a deploy key for a project.
Args:
key_id (int): The ID of the key to enable
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabProjectDeployKeyError: If the key could not be enabled
|
codesearchnet
|
def create_error_response(code, message, status=status.BAD_REQUEST):
errors = dict(code=code, message=message)
return Response(errors=errors, status=status)
|
Create a fail response.
Args:
code (str): the code of the error. The title should be lowercase and
underscore separated.
message (dict, list, str): the message of the error.
This can be a list, dictionary or simple string.
status (int): the status code. Defaults to 400.
Returns:
Response: the response with the error. The format of the error is the
following: code and message. The code could be `user_error` or
`internal_error`. The message contains either a string, or a list
or a dictionary. If not specify, the status will be a 400.
|
codesearchnet
|
def __init__(self, use_variable):
super().__init__()
w_val = np.random.randn(128, 32).astype('f4')
if use_variable:
self.w = variables.Variable(w_val)
else:
self.w = w_val
|
Initializes a GatherModel.
Args:
use_variable: If True, creates a variable for weight.
|
github-repos
|
def get_scene(self, label: str) -> Scene:
return self._get_resource(label, self._scenes, "scene")
|
Gets a scene by label
Args:
label (str): The label for the scene to fetch
Returns:
Scene instance
|
juraj-google-style
|
def default_compute_objective(metrics: dict[str, float]) -> float:
metrics = copy.deepcopy(metrics)
loss = metrics.pop('eval_loss', None)
_ = metrics.pop('epoch', None)
speed_metrics = [m for m in metrics.keys() if m.endswith('_runtime') or m.endswith('_per_second') or m.endswith('_compilation_time')]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
|
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
|
github-repos
|
def stop(self, wait=True):
for context in self._applications.values():
context.run_unload_hook()
self._stats_job.stop()
if (self._mem_job is not None):
self._mem_job.stop()
self._cleanup_job.stop()
if (self._ping_job is not None):
self._ping_job.stop()
self._clients.clear()
|
Stop the Bokeh Server application.
Args:
wait (bool): whether to wait for orderly cleanup (default: True)
Returns:
None
|
codesearchnet
|
def __init__(self, host: str, port: int, command: Optional[str]=None, batch_size: int=100):
self._host = host
self._port = port
self._command = command
self._batch_size = batch_size
|
Args:
host (str): The redis host
port (int): The redis port
command (str): command to be executed with redis client
batch_size(int): Number of key, values pairs to write at once
Returns:
:class:`~apache_beam.transforms.ptransform.PTransform`
|
github-repos
|
def _update_service_current_state(service: ServiceState):
LOG.debug('Setting current state from target state for %s', service.id)
service.update_current_state(service.target_state)
|
Update the current state of a service.
Updates the current state of services after their target state has changed.
Args:
service (ServiceState): Service state object to update
|
codesearchnet
|
def _collect_leaf_level_keys(cross):
leaf_level_keys = []
for k in cross.keys:
if isinstance(k, CrossedColumn):
leaf_level_keys.extend(_collect_leaf_level_keys(k))
else:
leaf_level_keys.append(k)
return leaf_level_keys
|
Collects base keys by expanding all nested crosses.
Args:
cross: A `CrossedColumn`.
Returns:
A list of strings or `CategoricalColumn` instances.
|
github-repos
|
def _get_or_make_slot(self, var, val, slot_name, op_name):
named_slots = self._slot_dict(slot_name)
if _var_key(var) not in named_slots:
new_slot_variable = slot_creator.create_slot(var, val, op_name, copy_xla_sharding=True)
self._restore_slot_variable(slot_name=slot_name, variable=var, slot_variable=new_slot_variable)
named_slots[_var_key(var)] = new_slot_variable
return named_slots[_var_key(var)]
|
Find or create a slot for a variable.
Args:
var: A `Variable` object.
val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
|
github-repos
|
def parse_responses(self):
response_dict = {'multicast_ids': [], 'success': 0, 'failure': 0, 'canonical_ids': 0, 'results': [], 'topic_message_id': None}
for response in self.send_request_responses:
if (response.status_code == 200):
if (('content-length' in response.headers) and (int(response.headers['content-length']) <= 0)):
raise FCMServerError('FCM server connection error, the response is empty')
else:
parsed_response = response.json()
multicast_id = parsed_response.get('multicast_id', None)
success = parsed_response.get('success', 0)
failure = parsed_response.get('failure', 0)
canonical_ids = parsed_response.get('canonical_ids', 0)
results = parsed_response.get('results', [])
message_id = parsed_response.get('message_id', None)
if message_id:
success = 1
if multicast_id:
response_dict['multicast_ids'].append(multicast_id)
response_dict['success'] += success
response_dict['failure'] += failure
response_dict['canonical_ids'] += canonical_ids
response_dict['results'].extend(results)
response_dict['topic_message_id'] = message_id
elif (response.status_code == 401):
raise AuthenticationError('There was an error authenticating the sender account')
elif (response.status_code == 400):
raise InvalidDataError(response.text)
else:
raise FCMServerError('FCM server is temporarily unavailable')
return response_dict
|
Parses the json response sent back by the server and tries to get out the important return variables
Returns:
dict: multicast_ids (list), success (int), failure (int), canonical_ids (int),
results (list) and optional topic_message_id (str but None by default)
Raises:
FCMServerError: FCM is temporary not available
AuthenticationError: error authenticating the sender account
InvalidDataError: data passed to FCM was incorrecly structured
|
codesearchnet
|
def infer_location(self, location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit):
self.location_from = infer_location(self.points[0], location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit)
self.location_to = infer_location(self.points[(- 1)], location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit)
return self
|
In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self
|
codesearchnet
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
use_zeromq = getattr(options, 'use_zeromq', True)
setattr(configuration_object, '_use_zeromq', use_zeromq)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
|
juraj-google-style
|
def is_possible_workdir(path):
res = False
trails = ['initialized', 'uuid']
try:
res = all(
os.path.isfile(os.path.join(path, 'current', trail))
for trail in trails
)
except:
pass
return res
|
A quick method to suggest if the path is a possible workdir.
This does not guarantee that the workdir is not malformed, only that by
simple heuristics it might be one.
For a full check use :func:`is_workdir`.
Args:
path(str): Path
Returns:
bool: True if ``path`` might be a work dir.
|
juraj-google-style
|
def copy(x):
if any_symbolic_tensors((x,)):
return Copy().symbolic_call(x)
return backend.numpy.copy(x)
|
Returns a copy of `x`.
Args:
x: Input tensor.
Returns:
A copy of `x`.
|
github-repos
|
def get_userid_from_botid(self, botid):
botinfo = self.slack_client.api_call('bots.info', bot=botid)
if botinfo['ok'] is True:
return botinfo['bot'].get('user_id')
else:
return botid
|
Perform a lookup of bots.info to resolve a botid to a userid
Args:
botid (string): Slack botid to lookup.
Returns:
string: userid value
|
juraj-google-style
|
def set_monitor_timeout(timeout, power='ac', scheme=None):
return _set_powercfg_value(scheme=scheme, sub_group='SUB_VIDEO', setting_guid='VIDEOIDLE', power=power, value=timeout)
|
Set the monitor timeout in minutes for the given power scheme
Args:
timeout (int):
The amount of time in minutes before the monitor will timeout
power (str):
Set the value for AC or DC power. Default is ``ac``. Valid options
are:
- ``ac`` (AC Power)
- ``dc`` (Battery)
scheme (str):
The scheme to use, leave as ``None`` to use the current. Default is
``None``. This can be the GUID or the Alias for the Scheme. Known
Aliases are:
- ``SCHEME_BALANCED`` - Balanced
- ``SCHEME_MAX`` - Power saver
- ``SCHEME_MIN`` - High performance
Returns:
bool: ``True`` if successful, otherwise ``False``
CLI Example:
.. code-block:: bash
# Sets the monitor timeout to 30 minutes
salt '*' powercfg.set_monitor_timeout 30
|
codesearchnet
|
def list(self):
resp = self.client.api.plugins()
return [self.prepare_model(r) for r in resp]
|
List plugins installed on the server.
Returns:
(list of :py:class:`Plugin`): The plugins.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
|
codesearchnet
|
def export_aliases(export_path=None, exclusions=None):
if (not export_path):
export_path = os.path.abspath(ALIAS_FILE_NAME)
alias_table = get_alias_table()
for exclusion in (exclusions or []):
if (exclusion not in alias_table.sections()):
raise CLIError(ALIAS_NOT_FOUND_ERROR.format(exclusion))
alias_table.remove_section(exclusion)
_commit_change(alias_table, export_path=export_path, post_commit=False)
logger.warning(POST_EXPORT_ALIAS_MSG, export_path)
|
Export all registered aliases to a given path, as an INI configuration file.
Args:
export_path: The path of the alias configuration file to export to.
exclusions: Space-separated aliases excluded from export.
|
codesearchnet
|
def _ParsePage(self, parser_mediator, file_offset, page_data):
page_header_map = self._GetDataTypeMap('binarycookies_page_header')
try:
page_header = self._ReadStructureFromByteStream(page_data, file_offset, page_header_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to map page header data at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
for record_offset in page_header.offsets:
if parser_mediator.abort:
break
self._ParseRecord(parser_mediator, page_data, record_offset)
|
Parses a page.
Args:
parser_mediator (ParserMediator): parser mediator.
file_offset (int): offset of the data relative from the start of
the file-like object.
page_data (bytes): page data.
Raises:
ParseError: when the page cannot be parsed.
|
codesearchnet
|
def create_executable_script(filepath, body, program=None):
program = program or "python"
if callable(body):
from rez.utils.sourcecode import SourceCode
code = SourceCode(func=body)
body = code.source
if not body.endswith('\n'):
body += '\n'
with open(filepath, 'w') as f:
f.write("
f.write(body)
if os.name == "posix":
os.chmod(filepath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
| stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
|
Create an executable script.
Args:
filepath (str): File to create.
body (str or callable): Contents of the script. If a callable, its code
is used as the script body.
program (str): Name of program to launch the script, 'python' if None
|
juraj-google-style
|
def _split_into_groups(n, max_group_size, mesh_dim_size):
if ((n % mesh_dim_size) != 0):
raise ValueError(('n=%d is not a multiple of mesh_dim_size=%d' % (n, mesh_dim_size)))
num_groups = max(1, (n
while (((num_groups % mesh_dim_size) != 0) or ((n % num_groups) != 0)):
num_groups += 1
group_size = (n
tf.logging.info(('_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d) = (num_groups=%d group_size=%d)' % (n, max_group_size, mesh_dim_size, num_groups, group_size)))
return (num_groups, group_size)
|
Helper function for figuring out how to split a dimensino into groups.
We have a dimension with size n and we want to split it into
two dimensions: n = num_groups * group_size
group_size should be the largest possible value meeting the constraints:
group_size <= max_group_size
(num_groups = n/group_size) is a multiple of mesh_dim_size
Args:
n: an integer
max_group_size: an integer
mesh_dim_size: an integer
Returns:
num_groups: an integer
group_size: an integer
Raises:
ValueError: if n is not a multiple of mesh_dim_size
|
codesearchnet
|
class DepthAnythingFeatureFusionLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.projection = nn.Conv2d(config.fusion_hidden_size, config.fusion_hidden_size, kernel_size=1, bias=True)
self.residual_layer1 = DepthAnythingPreActResidualLayer(config)
self.residual_layer2 = DepthAnythingPreActResidualLayer(config)
def forward(self, hidden_state, residual=None, size=None):
if residual is not None:
if hidden_state.shape != residual.shape:
residual = nn.functional.interpolate(residual, size=(hidden_state.shape[2], hidden_state.shape[3]), mode='bilinear', align_corners=False)
hidden_state = hidden_state + self.residual_layer1(residual)
hidden_state = self.residual_layer2(hidden_state)
modifier = {'scale_factor': 2} if size is None else {'size': size}
hidden_state = nn.functional.interpolate(hidden_state, **modifier, mode='bilinear', align_corners=True)
hidden_state = self.projection(hidden_state)
return hidden_state
|
Feature fusion layer, merges feature maps from different stages.
Args:
config (`[DepthAnythingConfig]`):
Model configuration class defining the model architecture.
|
github-repos
|
class LogElements(PTransform):
class _LoggingFn(DoFn):
def __init__(self, prefix='', with_timestamp=False, with_window=False, level=None):
super().__init__()
self.prefix = prefix
self.with_timestamp = with_timestamp
self.with_window = with_window
self.level = level
def process(self, element, timestamp=DoFn.TimestampParam, window=DoFn.WindowParam, **kwargs):
log_line = self.prefix + str(element)
if self.with_timestamp:
log_line += ', timestamp=' + repr(timestamp.to_rfc3339())
if self.with_window:
log_line += ', window(start=' + window.start.to_rfc3339()
log_line += ', end=' + window.end.to_rfc3339() + ')'
if self.level == logging.DEBUG:
logging.debug(log_line)
elif self.level == logging.INFO:
logging.info(log_line)
elif self.level == logging.WARNING:
logging.warning(log_line)
elif self.level == logging.ERROR:
logging.error(log_line)
elif self.level == logging.CRITICAL:
logging.critical(log_line)
else:
print(log_line)
yield element
def __init__(self, label=None, prefix='', with_timestamp=False, with_window=False, level=None):
super().__init__(label)
self.prefix = prefix
self.with_timestamp = with_timestamp
self.with_window = with_window
self.level = level
def expand(self, input):
return input | ParDo(self._LoggingFn(self.prefix, self.with_timestamp, self.with_window, self.level))
|
PTransform for printing the elements of a PCollection.
Args:
label (str): (optional) A custom label for the transform.
prefix (str): (optional) A prefix string to prepend to each logged element.
with_timestamp (bool): (optional) Whether to include element's timestamp.
with_window (bool): (optional) Whether to include element's window.
level: (optional) The logging level for the output (e.g. `logging.DEBUG`,
`logging.INFO`, `logging.WARNING`, `logging.ERROR`). If not specified,
the log is printed to stdout.
|
github-repos
|
def create_timer(cb: Callable[([float], None)], interval: float, delay_policy: TimerDelayPolicy=TimerDelayPolicy.DEFAULT, loop: Optional[asyncio.BaseEventLoop]=None) -> asyncio.Task:
if (not loop):
loop = asyncio.get_event_loop()
async def _timer():
fired_tasks = []
try:
while True:
if (delay_policy == TimerDelayPolicy.CANCEL):
for t in fired_tasks:
if (not t.done()):
t.cancel()
(await t)
fired_tasks.clear()
else:
fired_tasks[:] = [t for t in fired_tasks if (not t.done())]
t = loop.create_task(cb(interval=interval))
fired_tasks.append(t)
(await asyncio.sleep(interval))
except asyncio.CancelledError:
for t in fired_tasks:
t.cancel()
(await asyncio.gather(*fired_tasks))
return loop.create_task(_timer())
|
Schedule a timer with the given callable and the interval in seconds.
The interval value is also passed to the callable.
If the callable takes longer than the timer interval, all accumulated
callable's tasks will be cancelled when the timer is cancelled.
Args:
cb: TODO - fill argument descriptions
Returns:
You can stop the timer by cancelling the returned task.
|
codesearchnet
|
def build_from_items(self, items: list[_ItemType] | None) -> imports_map.ImportsMap | None:
if not items:
return None
imports_multimap = self._build_multimap(items)
assert imports_multimap is not None
return self._finalize(imports_multimap)
|
Create a file mapping from a list of (short path, path) tuples.
Builds a dict of short_path to full name
(e.g. "path/to/file.py" =>
"$GENDIR/rulename~~pytype-gen/path_to_file.py~~pytype"
Args:
items: A list of (short_path, full_path) tuples.
Returns:
Dict of .py short_path to list of .pytd path or None if no items
|
github-repos
|
def mg_refractive(m, mix):
if (len(m) == 2):
cF = (((float(mix[1]) / (mix[0] + mix[1])) * ((m[1] ** 2) - (m[0] ** 2))) / ((m[1] ** 2) + (2 * (m[0] ** 2))))
er = (((m[0] ** 2) * (1.0 + (2.0 * cF))) / (1.0 - cF))
m = np.sqrt(er)
else:
m_last = mg_refractive(m[(- 2):], mix[(- 2):])
mix_last = (mix[(- 2)] + mix[(- 1)])
m = mg_refractive((m[:(- 2)] + (m_last,)), (mix[:(- 2)] + (mix_last,)))
return m
|
Maxwell-Garnett EMA for the refractive index.
Args:
m: Tuple of the complex refractive indices of the media.
mix: Tuple of the volume fractions of the media, len(mix)==len(m)
(if sum(mix)!=1, these are taken relative to sum(mix))
Returns:
The Maxwell-Garnett approximation for the complex refractive index of
the effective medium
If len(m)==2, the first element is taken as the matrix and the second as
the inclusion. If len(m)>2, the media are mixed recursively so that the
last element is used as the inclusion and the second to last as the
matrix, then this mixture is used as the last element on the next
iteration, and so on.
|
codesearchnet
|
def pretty_str(something, indent=0):
if isinstance(something, CodeEntity):
return something.pretty_str(indent=indent)
else:
return (' ' * indent) + repr(something)
|
Return a human-readable string representation of an object.
Uses `pretty_str` if the given value is an instance of
`CodeEntity` and `repr` otherwise.
Args:
something: Some value to convert.
Kwargs:
indent (int): The amount of spaces to use as indentation.
|
juraj-google-style
|
def plot_series(filename, plot_kwargs=None):
import matplotlib.pyplot as plt
if (plot_kwargs is None):
plot_kwargs = {}
data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])
index = data['k']
values = data['v']
plt.plot(index, values, **plot_kwargs)
|
Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
|
codesearchnet
|
def empty_like(array, dtype=None, keepmeta=True):
if keepmeta:
return dc.empty(array.shape, dtype, tcoords=array.dca.tcoords, chcoords=array.dca.chcoords, scalarcoords=array.dca.scalarcoords, attrs=array.attrs, name=array.name)
else:
return dc.empty(array.shape, dtype)
|
Create an array of empty with the same shape and type as the input array.
Args:
array (xarray.DataArray): The shape and data-type of it define
these same attributes of the output array.
dtype (data-type, optional): If spacified, this function overrides
the data-type of the output array.
keepmeta (bool, optional): Whether *coords, attrs, and name of the input
array are kept in the output one. Default is True.
Returns:
array (decode.array): Decode array without initializing entries.
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.