code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def add_op_callback(self, callback):
if callback not in self._thread_local_data.op_callbacks:
self._thread_local_data.op_callbacks.append(callback)
|
Add a post-op callback to the context.
A post-op callback is invoked immediately after an eager operation or
function has finished execution or after a op has been added to a graph,
providing access to the op's type, name input and output tensors. Multiple
op callbacks can be added, in which case the callbacks will be invoked in
the order in which they are added.
Args:
callback: a callable of the signature `f(op_type, inputs, attrs, outputs,
op_name=None, graph=None)`. See doc strings in `op_callbacks.py` for
details on the function signature and its semantics.
|
github-repos
|
def append_to_list(self, key, *value, pipeline=False):
if pipeline:
self._pipeline.rpush(key, *value)
else:
self._db.rpush(key, *value)
|
Add new element to the end of the list stored at key.
Args:
key (str): Key where the list is stored
value: Value to add to the list
pipeline (bool): True, start a transaction block. Default false.
|
codesearchnet
|
def set_compare_estimator_and_feature_spec(self, estimator, feature_spec):
self.delete('compare_custom_predict_fn')
self.store('compare_estimator_and_spec', {'estimator': estimator, 'feature_spec': feature_spec})
self.set_compare_inference_address('estimator')
if (not self.has_compare_model_name()):
self.set_compare_model_name('2')
return self
|
Sets a second model for inference as a TF Estimator.
If you wish to compare the results of two models in WIT, use this method
to setup the details of the second model.
Instead of using TF Serving to host a model for WIT to query, WIT can
directly use a TF Estimator object as the model to query. In order to
accomplish this, a feature_spec must also be provided to parse the
example protos for input into the estimator.
Args:
estimator: The TF Estimator which will be used for model inference.
feature_spec: The feature_spec object which will be used for example
parsing.
Returns:
self, in order to enabled method chaining.
|
codesearchnet
|
def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs, grouped_updates=None, grouped_session_args=None, with_loss_tensor=False):
all_inputs = flatten_per_replica_values(distribution_strategy, grouped_inputs)
all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs, with_loss_tensor)
if grouped_updates:
all_updates = flatten_per_replica_values(distribution_strategy, grouped_updates)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get('feed_dict')
if grouped_feed_dict:
all_session_args['feed_dict'] = flatten_per_replica_values(distribution_strategy, grouped_feed_dict)
grouped_fetches = grouped_session_args.get('fetches')
if grouped_fetches:
all_session_args['fetches'] = flatten_per_replica_values(distribution_strategy, grouped_fetches)
return (all_inputs, all_outputs, all_updates, all_session_args)
|
Unwrap the list of values contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_inputs: PerReplica inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
grouped_updates: PerReplica updates returned from the train or test function
that we ran on each device.
grouped_session_args: PerReplica session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica parameters.
|
github-repos
|
def to_pytd_type_of_instance(self, val: abstract.BaseValue) -> pytd.Type:
if val is self._ctx.consts.Any:
return pytd.AnythingType()
elif val is self._ctx.consts[None]:
return pytd.NamedType('builtins.NoneType')
elif isinstance(val, abstract.Union):
return pytd_utils.JoinTypes((self.to_pytd_type_of_instance(v) for v in val.options))
elif isinstance(val, abstract.SimpleClass):
return pytd.NamedType(val.name)
else:
raise NotImplementedError(f'to_pytd_type_of_instance() not implemented for {val.__class__.__name__}: {val}')
|
Returns the type of an instance of the abstract value, as a pytd node.
For example, if the abstract value is:
InterpreterClass(C)
then to_pytd_type_of_instance() produces:
pytd.NamedType(C)
Args:
val: The abstract value.
|
github-repos
|
def add_inputs(self, *args, **kwargs):
if 'names' in kwargs:
return [self._inputs.add(arg, name=name) for arg, name in zip(args, kwargs['names'])]
else:
return [self._inputs.add(arg) for arg in args]
|
Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
**kwargs: This allows 'names' which should be a list of names.
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
|
github-repos
|
def _GetIntegerValue(self, row, value_name):
value = row.get(value_name, None)
try:
return int(value, 10)
except (TypeError, ValueError):
return None
|
Converts a specific value of the row to an integer.
Args:
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
value_name (str): name of the value within the row.
Returns:
int: value or None if the value cannot be converted.
|
juraj-google-style
|
def on_epoch_begin(self, epoch, logs=None):
|
Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
|
github-repos
|
def detect_functions_called(contract):
result = []
for func in contract.all_functions_called:
for node in func.nodes:
for ir in node.irs:
if isinstance(ir, (InternalCall, SolidityCall)):
result.append(ir.function)
return result
|
Returns a list of InternallCall, SolidityCall
calls made in a function
Returns:
(list): List of all InternallCall, SolidityCall
|
codesearchnet
|
def make(cls, name: str, *, def_opcode: 'opcodes.Opcode', code: 'blocks.OrderedCode', f_locals: _instances.LazyConcreteDict, f_globals: _instances.LazyConcreteDict, defaults, kw_defaults, closure, annotations: 'dict[str, _base.BaseValue]', ctx: 'context.Context'):
annotations = annotations or {}
overloads = ctx.vm.frame.overloads[name]
if f_locals == ctx.convert.unsolvable:
local_members = {}
else:
local_members = f_locals.members
key = (name, code, _hash_all_dicts((f_globals.members, set(code.names)), (local_members, set(local_members) - set(code.varnames)), ({key: ctx.program.NewVariable([value], [], ctx.root_node) for key, value in annotations.items()}, None), (dict(enumerate((ctx.program.NewVariable([f], [], ctx.root_node) for f in overloads))), None), (dict(enumerate(defaults)), None), (dict(enumerate(closure or ())), None)))
if key not in ctx.function_cache:
ctx.function_cache[key] = cls(name, def_opcode, code, f_locals, f_globals, defaults, kw_defaults, closure, annotations, overloads, ctx)
elif closure:
ctx.function_cache[key].closure = closure
f = ctx.function_cache[key]
ctx.vm.frame.functions_created_in_frame[f.name.rsplit('.')[-1]].append(f)
return f
|
Get an InterpreterFunction.
Things like anonymous functions and generator expressions are created
every time the corresponding code executes. Caching them makes it easier
to detect when the environment hasn't changed and a function call can be
optimized away.
Arguments:
name: Function name.
def_opcode: The opcode for the def statement
code: A code object.
f_locals: The locals used for name resolution.
f_globals: The globals used for name resolution.
defaults: Default arguments.
kw_defaults: Default arguments for kwonly parameters.
closure: The free variables this closure binds to.
annotations: Function annotations. Dict of name -> BaseValue.
ctx: context.Context instance.
Returns:
An InterpreterFunction.
|
github-repos
|
def hex_is_dark(hexx, percent=50):
r, g, b = hex_to_rgb(hexx)
luma = (0.2126 * r + 0.7152 * g + 0.0722 * b) / 2.55
return (luma < percent)
|
Function to decide if a hex colour is dark.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
bool: The colour's brightness is less than the given percent.
|
juraj-google-style
|
def debye_integral(y):
factor = 3. / y ** 3
if y < 155:
integral = quadrature(lambda x: x ** 3 / (np.exp(x) - 1.), 0, y)
return list(integral)[0] * factor
else:
return 6.493939 * factor
|
Debye integral. Eq(5) in doi.org/10.1016/j.comphy.2003.12.001
Args:
y (float): debye temperature/T, upper limit
Returns:
float: unitless
|
juraj-google-style
|
def check_type(value, type_def):
if (type_def == 'integer'):
try:
int(value)
return True
except ValueError:
return (isinstance(value, six.integer_types) and (not isinstance(value, bool)))
elif (type_def == 'number'):
return (isinstance(value, (six.integer_types, float)) and (not isinstance(value, bool)))
elif (type_def == 'string'):
return isinstance(value, (six.text_type, six.string_types, datetime.datetime))
elif (type_def == 'boolean'):
return (isinstance(value, bool) or (isinstance(value, (six.text_type, six.string_types)) and (value.lower() in ['true', 'false'])))
else:
return False
|
Check if the value is in the type given in type_def.
Args:
value: the var to test.
type_def: string representing the type in swagger.
Returns:
True if the type is correct, False otherwise.
|
codesearchnet
|
def _arguments(code, module):
arg_parser = CommandParser.create('')
try:
builtins = {'source': _table, 'datestring': _datestring}
env = {}
env.update(builtins)
exec(code, env)
for key in env:
if key in builtins or key[0] == '_':
continue
val = env[key]
key = '--%s' % key
if isinstance(val, bool):
if val:
arg_parser.add_argument(key, default=val, action='store_true')
else:
arg_parser.add_argument(key, default=val, action='store_false')
elif isinstance(val, basestring) or isinstance(val, int) or isinstance(val, float) \
or isinstance(val, int):
arg_parser.add_argument(key, default=val)
elif isinstance(val, list):
arg_parser.add_argument(key, default=val, nargs='+')
elif isinstance(val, tuple):
arg_parser.add_argument(key, default=list(val), nargs='+')
elif isinstance(val, dict) and 'type' in val:
if val['type'] == 'datestring':
arg_parser.add_argument(key, default='',
type=_make_string_formatter(val['format'],
offset=val['offset']))
elif val['type'] == 'table':
if val['format'] is not None:
arg_parser.add_argument(key, default='',
type=_make_table_formatter(val['format'],
offset=val['offset']))
else:
arg_parser.add_argument(key, default=val['name'], type=_make_table)
else:
raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))
else:
raise Exception('Cannot generate argument for %s of type %s' % (key, type(val)))
except Exception as e:
print("%%sql arguments: %s from code '%s'" % (str(e), str(code)))
return arg_parser
|
Define pipeline arguments.
Args:
code: the Python code to execute that defines the arguments.
|
juraj-google-style
|
def __init__(self, wrapped: message.Message, unused_context: Context) -> None:
self.wrapped = wrapped
|
Initializes a new PrimitiveWrapper with wrapped.
Args:
wrapped: The primitive message to wrap.
|
github-repos
|
def TempDirPath(suffix='', prefix='tmp'):
precondition.AssertType(suffix, Text)
precondition.AssertType(prefix, Text)
return tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=_TempRootPath())
|
Creates a temporary directory based on the environment configuration.
The directory will be placed in folder as specified by the `TEST_TMPDIR`
environment variable if available or fallback to `Test.tmpdir` of the current
configuration if not.
Args:
suffix: A suffix to end the directory name with.
prefix: A prefix to begin the directory name with.
Returns:
An absolute path to the created directory.
|
codesearchnet
|
def read(name, default=None, allow_none=False, fallback=None):
raw_value = environ.get(name)
if raw_value is None and fallback is not None:
if not isinstance(fallback, builtins.list) and not isinstance(fallback, builtins.tuple):
fallback = [fallback]
for fall in fallback:
raw_value = environ.get(fall)
if raw_value is not None:
break
if raw_value or raw_value == '':
return raw_value
elif default is not None or allow_none:
return default
else:
raise KeyError('Set the "{0}" environment variable'.format(name))
|
Read the raw env value.
Read the raw environment variable or use the default. If the value is not
found and no default is set throw an exception.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional)
fallback: A list of fallback env variables to try and read if the primary environment
variable is unavailable.
|
juraj-google-style
|
def check_semidefinite_positiveness(A):
B = empty_like(A)
B[:] = A
B[diag_indices_from(B)] += sqrt(finfo(float).eps)
try:
cholesky(B)
except LinAlgError:
return False
return True
|
Check if ``A`` is a semi-definite positive matrix.
Args:
A (array_like): Matrix.
Returns:
bool: ``True`` if ``A`` is definite positive; ``False`` otherwise.
|
codesearchnet
|
def WriteEventBody(self, event):
for field_name in self._fields:
if field_name == 'datetime':
output_value = self._FormatDateTime(event)
else:
output_value = self._dynamic_fields_helper.GetFormattedField(
event, field_name)
output_value = self._RemoveIllegalXMLCharacters(output_value)
column_index = self._fields.index(field_name)
self._column_widths.setdefault(column_index, 0)
if field_name == 'datetime':
column_width = min(
self._MAX_COLUMN_WIDTH, len(self._timestamp_format) + 2)
else:
column_width = min(self._MAX_COLUMN_WIDTH, len(output_value) + 2)
self._column_widths[column_index] = max(
self._MIN_COLUMN_WIDTH, self._column_widths[column_index],
column_width)
self._sheet.set_column(
column_index, column_index, self._column_widths[column_index])
if (field_name == 'datetime'
and isinstance(output_value, datetime.datetime)):
self._sheet.write_datetime(
self._current_row, column_index, output_value)
else:
self._sheet.write(self._current_row, column_index, output_value)
self._current_row += 1
|
Writes the body of an event object to the spreadsheet.
Args:
event (EventObject): event.
|
juraj-google-style
|
def CredibleInterval(pmf, percentage=90):
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
|
Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
|
juraj-google-style
|
def __init__(self, enum_values, case_sensitive=True):
if not enum_values:
raise ValueError(
'enum_values cannot be empty, found "{}"'.format(enum_values))
super(EnumParser, self).__init__()
self.enum_values = enum_values
self.case_sensitive = case_sensitive
|
Initializes EnumParser.
Args:
enum_values: [str], a non-empty list of string values in the enum.
case_sensitive: bool, whether or not the enum is to be case-sensitive.
Raises:
ValueError: When enum_values is empty.
|
juraj-google-style
|
def parse_date(value):
if not value:
return None
if isinstance(value, datetime.date):
return value
return parse_datetime(value).date()
|
Attempts to parse `value` into an instance of ``datetime.date``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string, datetime.date, or
datetime.datetime value.
|
juraj-google-style
|
def can_match(cls, pattern: Pattern) -> bool:
if not isinstance(pattern.expression, Operation) or isinstance(pattern.expression, CommutativeOperation):
return False
if op_len(pattern.expression) < 3:
return False
first, *_, last = op_iter(pattern.expression)
try:
cls._check_wildcard_and_get_name(first)
cls._check_wildcard_and_get_name(last)
except ValueError:
return False
return True
|
Check if a pattern can be matched with a sequence matcher.
Args:
pattern:
The pattern to check.
Returns:
True, iff the pattern can be matched with a sequence matcher.
|
juraj-google-style
|
def write(self, output='jsonstat'):
if output == 'jsonstat':
return json.dumps(self)
elif output == 'dataframe_list':
df_list = []
unnest_collection(self, df_list)
return df_list
else:
raise ValueError(
"Allowed arguments are 'jsonstat' or 'dataframe_list'")
|
Writes data from a Collection object to JSONstat or list of \
Pandas Dataframes.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter.
|
juraj-google-style
|
def map_tensor_fn(self, fn: Callable[[torch.Tensor], torch.Tensor]) -> Rigid:
new_rots = self._rots.map_tensor_fn(fn)
new_trans = torch.stack(list(map(fn, torch.unbind(self._trans, dim=-1))), dim=-1)
return Rigid(new_rots, new_trans)
|
Apply a Tensor -> Tensor function to underlying translation and rotation tensors, mapping over the
translation/rotation dimensions respectively.
Args:
fn:
A Tensor -> Tensor function to be mapped over the Rigid
Returns:
The transformed Rigid object
|
github-repos
|
def __init__(self, underlying_runner=None, render_option=None, skip_display=True, force_compute=True, blocking=True):
self._underlying_runner = underlying_runner or direct_runner.DirectRunner()
self._render_option = render_option
self._in_session = False
self._skip_display = skip_display
self._force_compute = force_compute
self._blocking = blocking
|
Constructor of InteractiveRunner.
Args:
underlying_runner: (runner.PipelineRunner)
render_option: (str) this parameter decides how the pipeline graph is
rendered. See display.pipeline_graph_renderer for available options.
skip_display: (bool) whether to skip display operations when running the
pipeline. Useful if running large pipelines when display is not
needed.
force_compute: (bool) whether sequential pipeline runs can use cached data
of PCollections computed from the previous runs including show API
invocation from interactive_beam module. If True, always run the whole
pipeline and compute data for PCollections forcefully. If False, use
available data and run minimum pipeline fragment to only compute data
not available.
blocking: (bool) whether the pipeline run should be blocking or not.
|
github-repos
|
def item_status(self, **kwargs):
path = self._get_id_path('item_status')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
Check to see if a movie id is already added to a list.
Args:
movie_id: The id of the movie.
Returns:
A dict respresentation of the JSON returned from the API.
|
codesearchnet
|
def _FormatArgToken(self, token_data):
return {
'string': token_data.argument_value.rstrip('\x00'),
'num_arg': token_data.argument_index,
'is': token_data.argument_name}
|
Formats an argument token as a dictionary of values.
Args:
token_data (bsm_token_data_arg32|bsm_token_data_arg64): AUT_ARG32 or
AUT_ARG64 token data.
Returns:
dict[str, str]: token values.
|
juraj-google-style
|
def create(self, key, value, lease='1h'):
return self._client.write(key, value, lease=lease)
|
Create key/value pair in Vault.
Args:
key (string): The data key.
value (string): The data value.
lease (string): The least time.
|
juraj-google-style
|
def single_qubit_matrix_to_gates(mat: np.ndarray, tolerance: float=0) -> List[ops.SingleQubitGate]:
rotations = single_qubit_matrix_to_pauli_rotations(mat, tolerance)
return [(cast(ops.SingleQubitGate, pauli) ** ht) for (pauli, ht) in rotations]
|
Implements a single-qubit operation with few gates.
Args:
mat: The 2x2 unitary matrix of the operation to implement.
tolerance: A limit on the amount of error introduced by the
construction.
Returns:
A list of gates that, when applied in order, perform the desired
operation.
|
codesearchnet
|
def _send_rpc(self, client, uuid, address, rpc, payload, timeout, key):
conn_id = self._validate_connection('send_rpc', uuid, key)
if (conn_id is None):
return
conn_data = self._connections[uuid]
conn_data['last_touch'] = monotonic()
slug = self._build_device_slug(uuid)
try:
resp = (yield self._manager.send_rpc(conn_id, address, (rpc >> 8), (rpc & 255), bytes(payload), timeout))
except Exception as exc:
self._logger.error(('Error in manager send rpc: %s' % str(exc)))
resp = {'success': False, 'reason': ('Internal error: %s' % str(exc))}
payload = {'client': client, 'type': 'response', 'operation': 'rpc'}
payload['success'] = resp['success']
if (resp['success'] is False):
payload['failure_reason'] = resp['reason']
else:
payload['status'] = resp['status']
payload['payload'] = binascii.hexlify(resp['payload'])
self._publish_response(slug, payload)
|
Send an RPC to a connected device
Args:
client (string): The client that sent the rpc request
uuid (int): The id of the device we're opening the interface on
address (int): The address of the tile that we want to send the RPC to
rpc (int): The id of the rpc that we want to send.
payload (bytearray): The payload of arguments that we want to send
timeout (float): The number of seconds to wait for the response
key (string): The key to authenticate the caller
|
codesearchnet
|
def build_url(self, data):
query_part_one = []
query_part_two = []
keys_to_be_removed = []
for key, value in data.items():
if key not in ['version', 'restApi', 'resourcePath']:
if key == 'mapArea':
query_part_one.append(','.join(str(val) for val in value))
keys_to_be_removed.append(key)
elif key == 'includeLocationCodes':
query_part_one.append(value)
keys_to_be_removed.append(key)
else:
if isinstance(value, list):
value = ','.join(str(val) for val in value)
query_part_two.append('{0}={1}'.format(key, value))
keys_to_be_removed.append(key)
for k in keys_to_be_removed:
del data[k]
data['query'] = '{0}?{1}'.format('/'.join(query_part_one),
'&'.join(query_part_two))
return data
|
This method occurs after dumping the data into the class.
Args:
data (dict): dictionary of all the query values
Returns:
data (dict): ordered dict of all the values
|
juraj-google-style
|
def history(self, hash):
txs = self._t.get(hash, max_transactions=10000)['transactions']
tree = defaultdict(list)
number_editions = 0
for tx in txs:
_tx = self._t.get(tx['txid'])
txid = _tx['txid']
verb_str = BlockchainSpider.check_script(_tx['vouts'])
verb = Spoolverb.from_verb(verb_str)
from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx)
timestamp_utc = _tx['time']
action = verb.action
edition_number = 0
if action != 'EDITIONS':
edition_number = verb.edition_number
else:
number_editions = verb.num_editions
tree[edition_number].append({'txid': txid,
'verb': verb_str,
'from_address': from_address,
'to_address': to_address,
'piece_address': piece_address,
'timestamp_utc': timestamp_utc,
'action': action,
'number_editions': number_editions,
'edition_number': edition_number})
for edition, chain in tree.items():
[d.update({'number_editions': number_editions}) for d in chain]
return dict(tree)
|
Retrieve the ownership tree of all editions of a piece given the hash.
Args:
hash (str): Hash of the file to check. Can be created with the
:class:`File` class
Returns:
dict: Ownsership tree of all editions of a piece.
.. note:: For now we only support searching the blockchain by
the piece hash.
|
juraj-google-style
|
def track(self, event_key, user_id, attributes=None, event_tags=None):
if not self.is_valid:
self.logger.error(enums.Errors.INVALID_DATAFILE.format('track'))
return
if not validator.is_non_empty_string(event_key):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('event_key'))
return
if not isinstance(user_id, string_types):
self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id'))
return
if not self._validate_user_inputs(attributes, event_tags):
return
event = self.config.get_event(event_key)
if not event:
self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key))
return
conversion_event = self.event_builder.create_conversion_event(event_key, user_id, attributes, event_tags)
self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id))
self.logger.debug('Dispatching conversion event to URL %s with params %s.' % (
conversion_event.url,
conversion_event.params
))
try:
self.event_dispatcher.dispatch_event(conversion_event)
except:
self.logger.exception('Unable to dispatch conversion event!')
self.notification_center.send_notifications(enums.NotificationTypes.TRACK, event_key, user_id,
attributes, event_tags, conversion_event)
|
Send conversion event to Optimizely.
Args:
event_key: Event key representing the event which needs to be recorded.
user_id: ID for user.
attributes: Dict representing visitor attributes and values which need to be recorded.
event_tags: Dict representing metadata associated with the event.
|
juraj-google-style
|
def _sync_content_metadata(self, serialized_data, http_method):
try:
(status_code, response_body) = getattr(self, ('_' + http_method))(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.course_api_path), serialized_data, self.CONTENT_PROVIDER_SCOPE)
except requests.exceptions.RequestException as exc:
raise ClientError('DegreedAPIClient request failed: {error} {message}'.format(error=exc.__class__.__name__, message=str(exc)))
if (status_code >= 400):
raise ClientError('DegreedAPIClient request failed with status {status_code}: {message}'.format(status_code=status_code, message=response_body))
|
Synchronize content metadata using the Degreed course content API.
Args:
serialized_data: JSON-encoded object containing content metadata.
http_method: The HTTP method to use for the API request.
Raises:
ClientError: If Degreed API request fails.
|
codesearchnet
|
def ParseOptions(cls, options, configuration_object):
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
storage_format = cls._ParseStringOption(options, 'storage_format')
if not storage_format:
raise errors.BadConfigOption('Unable to determine storage format.')
if storage_format not in definitions.STORAGE_FORMATS:
raise errors.BadConfigOption(
'Unsupported storage format: {0:s}'.format(storage_format))
setattr(configuration_object, '_storage_format', storage_format)
|
Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
BadConfigOption: if the storage format is not defined or supported.
|
juraj-google-style
|
def occupations( self, site_label ):
return sum( atom.site.label == site_label for atom in self.atoms )
|
Number of these atoms occupying a specific site type.
Args:
site_label (Str): Label for the site type being considered.
Returns:
(Int): Number of atoms occupying sites of type `site_label`.
|
juraj-google-style
|
def InventoryReceived(self, inventory):
if inventory.Hash.ToBytes() in self._MissedBlocks:
self._MissedBlocks.remove(inventory.Hash.ToBytes())
if inventory is MinerTransaction:
return False
if type(inventory) is Block:
if BC.Default() is None:
return False
if BC.Default().ContainsBlock(inventory.Index):
return False
if not BC.Default().AddBlock(inventory):
return False
else:
if not inventory.Verify(self.MemPool.values()):
return False
|
Process a received inventory.
Args:
inventory (neo.Network.Inventory): expect a Block type.
Returns:
bool: True if processed and verified. False otherwise.
|
juraj-google-style
|
def extractDays(self, inp):
inp = self._preprocess(inp)
def extractDayOfWeek(dayMatch):
if dayMatch.group(5) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(5))
elif dayMatch.group(6) in self.__daysOfWeek__:
return self.__daysOfWeek__.index(dayMatch.group(6))
def extractMonth(dayMatch):
if dayMatch.group(7) in self.__months__:
return self.__months__.index(dayMatch.group(7)) + 1
elif dayMatch.group(7) in self.__shortMonths__:
return self.__shortMonths__.index(dayMatch.group(7)) + 1
def extractDay(dayMatch):
combined = dayMatch.group(8) + dayMatch.group(9)
if combined in self.__dateDescriptors__:
return self.__dateDescriptors__[combined]
elif dayMatch.group(8) in self.__dateDescriptors__:
return self.__dateDescriptors__[dayMatch.group(8)]
elif int(dayMatch.group(8)) in self.__dateDescriptors__.values():
return int(dayMatch.group(8))
def extractDaysFrom(dayMatch):
if not dayMatch.group(1):
return 0
def numericalPrefix(dayMatch):
prefix = inp.split(dayMatch.group(1))[0].strip().split(' ')
prefix.reverse()
prefix = list(filter(lambda s: s != 'and', prefix))
service = NumberService()
num = prefix[0]
if service.isValid(num):
for n in prefix[1:]:
inc = n + " " + num
if service.isValid(inc):
num = inc
else:
break
return service.parse(num)
return 1
factor = numericalPrefix(dayMatch)
if dayMatch.group(2) == 'week':
return factor * 7
elif dayMatch.group(2) == 'day':
return factor * 1
def handleMatch(dayMatch):
def safe(exp):
try:
return exp()
except:
return False
days_from = safe(lambda: extractDaysFrom(dayMatch))
today = safe(lambda: dayMatch.group(3) in self.__todayMatches__)
tomorrow = safe(lambda: dayMatch.group(3)
in self.__tomorrowMatches__)
next_week = safe(lambda: dayMatch.group(4) == 'next')
day_of_week = safe(lambda: extractDayOfWeek(dayMatch))
month = safe(lambda: extractMonth(dayMatch))
day = safe(lambda: extractDay(dayMatch))
if not dayMatch:
return None
elif today:
d = self.now
elif tomorrow:
d = self.now + datetime.timedelta(days=1)
elif type(day_of_week) == int:
current_day_of_week = self.now.weekday()
num_days_away = (day_of_week - current_day_of_week) % 7
if next_week:
num_days_away += 7
d = self.now + \
datetime.timedelta(days=num_days_away)
elif month and day:
d = datetime.datetime(
self.now.year, month, day,
self.now.hour, self.now.minute)
if days_from:
d += datetime.timedelta(days=days_from)
return d
matches = self._dayRegex.finditer(inp)
return [handleMatch(dayMatch) for dayMatch in matches]
|
Extracts all day-related information from an input string.
Ignores any information related to the specific time-of-day.
Args:
inp (str): Input string to be parsed.
Returns:
A list of datetime objects containing the extracted date from the
input snippet, or an empty list if none found.
|
juraj-google-style
|
def download_listing(self, file: Optional[IO],
duration_timeout: Optional[float]=None) -> \
ListingResponse:
if self._session_state != SessionState.directory_request_sent:
raise RuntimeError('File request not sent')
self._session_state = SessionState.file_request_sent
yield from self.download(file=file, rewind=False,
duration_timeout=duration_timeout)
try:
if self._response.body.tell() == 0:
listings = ()
elif self._listing_type == 'mlsd':
self._response.body.seek(0)
machine_listings = wpull.protocol.ftp.util.parse_machine_listing(
self._response.body.read().decode('utf-8',
errors='surrogateescape'),
convert=True, strict=False
)
listings = list(
wpull.protocol.ftp.util.machine_listings_to_file_entries(
machine_listings
))
else:
self._response.body.seek(0)
file = io.TextIOWrapper(self._response.body, encoding='utf-8',
errors='surrogateescape')
listing_parser = ListingParser(file=file)
listings = list(listing_parser.parse_input())
_logger.debug('Listing detected as %s', listing_parser.type)
file.detach()
except (ListingError, ValueError) as error:
raise ProtocolError(*error.args) from error
self._response.files = listings
self._response.body.seek(0)
self._session_state = SessionState.response_received
return self._response
|
Read file listings.
Args:
file: A file object or asyncio stream.
duration_timeout: Maximum time in seconds of which the
entire file must be read.
Returns:
A Response populated the file listings
Be sure to call :meth:`start_file_listing` first.
Coroutine.
|
juraj-google-style
|
def _print(self, *args):
def _format(name, arr):
title = '
tlen = len(title)
print('-' * tlen)
print(title)
print('-' * tlen)
print(' Total
if arr:
for item in arr:
detail = ''
if isinstance(item[1], list):
for itm in item[1]:
detail += str(itm) + ', '
detail = detail[:-2]
else:
detail = str(item[1])
print(" %s ('%s')\n" % (str(item[0]), detail))
else:
print(' No %s' % name)
print('\n')
for p_item in args:
if p_item == 'failures':
_format('Failures', self.failures)
elif p_item == 'successes':
_format('Successes', self.successes)
elif p_item == 'failure_msgs':
_format('Failure Messages', self.error_msg)
elif p_item == 'warning_msgs':
_format('Warning Messages', self.warning_msg)
else:
raise Exception('[Error] Wrong input provided for %s.' % _get_func_name())
|
Prints compatibility check status and failure or warning messages.
Prints to console without using `logging`.
Args:
*args: String(s) that is one of:
[`failures`, # all failures
`successes`, # all successes
`failure_msgs`, # failure message(s) recorded upon failure(s)
`warning_msgs`] # warning message(s) recorded upon warning(s)
Raises:
Exception: If *args not in:
[`failures`, `successes`, `failure_msgs`, `warning_msg`]
|
github-repos
|
def make_reply(self):
return Message(to=str(self.sender), sender=str(self.to), body=self.body, thread=self.thread, metadata=self.metadata)
|
Creates a copy of the message, exchanging sender and receiver
Returns:
spade.message.Message: a new message with exchanged sender and receiver
|
codesearchnet
|
def has_shell_command(self, command) -> bool:
try:
output = self.shell(['command', '-v', command]).decode('utf-8').strip()
return command in output
except AdbError:
return False
|
Checks to see if a given check command exists on the device.
Args:
command: A string that is the name of the command to check.
Returns:
A boolean that is True if the command exists and False otherwise.
|
github-repos
|
def update_fetch_positions(self, partitions):
for tp in partitions:
if not self._subscriptions.is_assigned(tp):
log.warning("partition %s is not assigned - skipping offset"
" update", tp)
continue
elif self._subscriptions.is_fetchable(tp):
log.warning("partition %s is still fetchable -- skipping offset"
" update", tp)
continue
if self._subscriptions.is_offset_reset_needed(tp):
self._reset_offset(tp)
elif self._subscriptions.assignment[tp].committed is None:
self._subscriptions.need_offset_reset(tp)
self._reset_offset(tp)
else:
committed = self._subscriptions.assignment[tp].committed
log.debug("Resetting offset for partition %s to the committed"
" offset %s", tp, committed)
self._subscriptions.seek(tp, committed)
|
Update the fetch positions for the provided partitions.
Arguments:
partitions (list of TopicPartitions): partitions to update
Raises:
NoOffsetForPartitionError: if no offset is stored for a given
partition and no reset policy is available
|
juraj-google-style
|
def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern):
submissions = self._storage_client.list_blobs(
prefix=os.path.join(self._round_name, dir_suffix))
return {
id_pattern.format(idx): SubmissionDescriptor(
path=s, participant_id=participant_from_submission_path(s))
for idx, s in enumerate(submissions)
}
|
Loads list of submissions from the directory.
Args:
dir_suffix: suffix of the directory where submissions are stored,
one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR
or DEFENSE_SUBDIR.
id_pattern: pattern which is used to generate (internal) IDs
for submissins. One of the following constants: ATTACK_ID_PATTERN,
TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN.
Returns:
dictionary with all found submissions
|
juraj-google-style
|
def create(self, validated_data):
email_query = models.EmailAddress.objects.filter(email=self.validated_data['email'])
if email_query.exists():
email = email_query.get()
email.send_duplicate_notification()
else:
email = super(EmailSerializer, self).create(validated_data)
email.send_confirmation()
user = validated_data.get('user')
query = models.EmailAddress.objects.filter(is_primary=True, user=user)
if (not query.exists()):
email.set_primary()
return email
|
Create a new email and send a confirmation to it.
Returns:
The newly creating ``EmailAddress`` instance.
|
codesearchnet
|
def execute_by_options(args):
if (args['subcommand'] == 'sphinx'):
s = Sphinx(proj_info)
if args['quickstart']:
s.quickstart()
elif args['gen_code_api']:
s.gen_code_api()
elif args['rst2html']:
s.rst2html()
pass
elif (args['subcommand'] == 'offline_dist'):
pod = PyOfflineDist()
if args['freeze_deps']:
pod.freeze_deps()
elif args['download_deps']:
pod.download_deps()
elif args['install_deps']:
pod.install_deps()
elif args['clean_deps']:
pod.clean_deps()
elif args['mkbinary']:
pod.pyinstaller_mkbinary(args['mkbinary'])
elif args['clean_binary']:
pod.clean_binary()
pass
|
execute by argument dictionary
Args:
args (dict): command line argument dictionary
|
codesearchnet
|
def valueReadPreprocessor(valueString, replaceParamsFile=None):
if type(valueString) is bool:
log.warning("Only numerical variable types can be handled by the valueReadPreprocessor function.")
return valueString
processedValue = valueString
if replaceParamsFile is not None and valueString is not None:
if '[' in valueString or ']' in valueString:
processedValue = '{0}'.format(REPLACE_NO_VALUE)
for targetParam in replaceParamsFile.targetParameters:
if targetParam.targetVariable == valueString:
processedValue = '{0}'.format(-1 * targetParam.id)
break
return processedValue
|
Apply global pre-processing to values during reading throughout the project.
Args:
valueString (str): String representing the value to be preprocessed.
replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if
replacement variables are included in the project.
Returns:
str: Processed value as a string
|
juraj-google-style
|
def plot_series(filename, plot_kwargs=None):
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])
index = data['k']
values = data['v']
plt.plot(index, values, **plot_kwargs)
|
Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is required.
|
juraj-google-style
|
def console_wait_for_keypress(flush: bool) -> Key:
key = Key()
lib.TCOD_console_wait_for_keypress_wrapper(key.key_p, flush)
return key
|
Block until the user presses a key, then returns a new Key.
Args:
flush bool: If True then the event queue is cleared before waiting
for the next event.
Returns:
Key: A new Key instance.
.. deprecated:: 9.3
Use the :any:`tcod.event.wait` function to wait for events.
|
codesearchnet
|
def resname_in_proximity(resname, model, chains, resnums, threshold=5):
residues = [r for r in model.get_residues() if (r.get_resname() == resname)]
chains = ssbio.utils.force_list(chains)
resnums = ssbio.utils.force_list(resnums)
for chain in chains:
for resnum in resnums:
my_residue_last_atom = model[chain][resnum].child_list[(- 1)]
for rz in residues:
distance = (rz.child_list[(- 1)] - my_residue_last_atom)
if (distance < threshold):
return True
return False
|
Search within the proximity of a defined list of residue numbers and their chains for any specifed residue name.
Args:
resname (str): Residue name to search for in proximity of specified chains + resnums
model: Biopython Model object
chains (str, list): Chain ID or IDs to check
resnums (int, list): Residue numbers within the chain to check
threshold (float): Cutoff in Angstroms for returning True if a RESNAME is near
Returns:
bool: True if a RESNAME is within the threshold cutoff
|
codesearchnet
|
def front(self, n):
new_dtypes = (
self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n]
)
if self._is_transposed:
result = self.__constructor__(
self.data.transpose().take(0, n).transpose(),
self.index,
self.columns[:n],
new_dtypes,
)
result._is_transposed = True
else:
result = self.__constructor__(
self.data.take(1, n), self.index, self.columns[:n], new_dtypes
)
return result
|
Returns the first n columns.
Args:
n: Integer containing the number of columns to return.
Returns:
DataManager containing the first n columns of the original DataManager.
|
juraj-google-style
|
def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray:
return labels2onehot(proba2labels(proba, confident_threshold, classes), classes)
|
Convert vectors of probabilities to one-hot representations using confident threshold
Args:
proba: samples where each sample is a vector of probabilities to belong with given classes
confident_threshold: boundary of probability to belong with a class
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples
|
codesearchnet
|
def send(self, message):
body = {
'notificationType': self._notification_type,
'priority': self._priority,
'isOrganization': self._is_organization,
'message': message,
}
if self._recipients:
body['recipients'] = self._recipients
self._tcex.log.debug('notification body: {}'.format(json.dumps(body)))
resource = resource = self._tcex.resource('Notification')
resource.http_method = 'POST'
resource.body = json.dumps(body)
results = resource.request()
if results.get('response').status_code == 200:
response = results.get('response').json()
elif results.get('response').status_code == 400:
err = 'Failed to send notification ({})'.format(results.get('response').text)
self._tcex.log.error(err)
response = results.get('response').json()
else:
err = 'Failed to send notification ({})'.format(results.get('response').text)
self._tcex.log.error(err)
raise RuntimeError(err)
return response
|
Send our message
Args:
message (str): The message to be sent.
Returns:
requests.models.Response: The response from the request.
|
juraj-google-style
|
def tabledata_insert_all(self, table_name, rows):
url = ((Api._ENDPOINT + (Api._TABLES_PATH % table_name)) + '/insertAll')
data = {'kind': 'bigquery
return datalab.utils.Http.request(url, data=data, credentials=self._credentials)
|
Issues a request to insert data into a table.
Args:
table_name: the name of the table as a tuple of components.
rows: the data to populate the table, as a list of dictionaries.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation.
|
codesearchnet
|
def format_arguments(*args):
positional_args = []
kwargs = {}
split_key = None
for arg in args:
if arg.startswith('--'):
arg = arg[2:]
if '=' in arg:
key, value = arg.split('=', 1)
kwargs[key.replace('-', '_')] = value
else:
split_key = arg.replace('-', '_')
elif split_key:
kwargs[split_key] = arg
split_key = None
else:
positional_args.append(arg)
return positional_args, kwargs
|
Converts a list of arguments from the command line into a list of
positional arguments and a dictionary of keyword arguments.
Handled formats for keyword arguments are:
* --argument=value
* --argument value
Args:
*args (list): a list of arguments
Returns:
([positional_args], {kwargs})
|
juraj-google-style
|
def retry_api_check(exception):
if isinstance(exception, apiclient.errors.HttpError):
if exception.resp.status in TRANSIENT_HTTP_ERROR_CODES:
_print_error('Retrying...')
return True
if isinstance(exception, socket.error):
if exception.errno in TRANSIENT_SOCKET_ERROR_CODES:
_print_error('Retrying...')
return True
if isinstance(exception, oauth2client.client.AccessTokenRefreshError):
_print_error('Retrying...')
return True
if isinstance(exception, SSLError):
_print_error('Retrying...')
return True
if isinstance(exception, ServerNotFoundError):
_print_error('Retrying...')
return True
return False
|
Return True if we should retry. False otherwise.
Args:
exception: An exception to test for transience.
Returns:
True if we should retry. False otherwise.
|
juraj-google-style
|
def process_gatt_service(services, event):
length = (len(event.payload) - 5)
(handle, start, end, uuid) = unpack(('<BHH%ds' % length), event.payload)
uuid = process_uuid(uuid)
services[uuid] = {'uuid_raw': uuid, 'start_handle': start, 'end_handle': end}
|
Process a BGAPI event containing a GATT service description and add it to a dictionary
Args:
services (dict): A dictionary of discovered services that is updated with this event
event (BGAPIPacket): An event containing a GATT service
|
codesearchnet
|
def set_ignores(self, folder, *patterns):
if not patterns:
return {}
data = {'ignore': list(patterns)}
return self.post('ignores', params={'folder': folder}, data=data)
|
Applies ``patterns`` to ``folder``'s ``.stignore`` file.
Args:
folder (str):
patterns (str):
Returns:
dict
|
juraj-google-style
|
def ParseTextToDicts(self, *args, **kwargs):
result_lists = self.ParseText(*args, **kwargs)
result_dicts = []
for row in result_lists:
result_dicts.append(dict(zip(self.header, row)))
return result_dicts
|
Calls ParseText and turns the result into list of dicts.
List items are dicts of rows, dict key is column header and value is column
value.
Args:
text: (str), Text to parse with embedded newlines.
eof: (boolean), Set to False if we are parsing only part of the file.
Suppresses triggering EOF state.
Raises:
TextFSMError: An error occurred within the FSM.
Returns:
List of dicts.
|
codesearchnet
|
def set_shape(self, shape):
raise NotImplementedError
|
Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
|
github-repos
|
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):
sender = (('"%s" <%s>' % (sender_name, sender_email)) if sender_name else sender_email)
if (not current_app.testing):
try:
from flask_mail import Message
message = Message(subject, sender=sender, recipients=[recipient], html=html_message, body=text_message)
self.mail.send(message)
except (socket.gaierror, socket.error) as e:
raise EmailError('SMTP Connection error: Check your MAIL_SERVER and MAIL_PORT settings.')
except smtplib.SMTPAuthenticationError:
raise EmailError('SMTP Authentication error: Check your MAIL_USERNAME and MAIL_PASSWORD settings.')
|
Send email message via Flask-Mail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text.
|
codesearchnet
|
def _generate_state(self, trans):
state = PDAState()
state.id = self.nextstate()
state.type = 2
state.sym = state.id
state.trans = trans.copy()
self.toadd.append(state)
return state.id
|
Creates a new POP state (type - 2) with the same transitions.
The POPed symbol is the unique number of the state.
Args:
trans (dict): Transition dictionary
Returns:
Int: The state identifier
|
juraj-google-style
|
def __init__(self, scope, parent, paren=False):
CodeLiteral.__init__(self, scope, parent, None, 'null', paren)
|
Constructor for null literals.
Args:
scope (CodeEntity): The program scope where this object belongs.
parent (CodeEntity): This object's parent in the program tree.
Kwargs:
paren (bool): Whether the null literal is enclosed in parentheses.
|
juraj-google-style
|
def affine_coupling(name, x, mid_channels=512, activation="relu",
reverse=False, dropout=0.0):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1)
z1 = x1
log_scale_and_shift = conv_stack(
"nn", x1, mid_channels, x_shape[-1], activation=activation,
dropout=dropout)
shift = log_scale_and_shift[:, :, :, 0::2]
scale = tf.nn.sigmoid(log_scale_and_shift[:, :, :, 1::2] + 2.0)
if not reverse:
z2 = (x2 + shift) * scale
else:
z2 = x2 / scale - shift
objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3])
if reverse:
objective *= -1
return tf.concat([z1, z2], axis=3), objective
|
Reversible affine coupling layer.
Args:
name: variable scope.
x: 4-D Tensor.
mid_channels: number of channels in the coupling layer.
activation: Can be either "relu" or "gatu".
reverse: Forward or reverse operation.
dropout: default, 0.0
Returns:
output: x shifted and scaled by an affine transformation.
objective: log-determinant of the jacobian
|
juraj-google-style
|
def remove_attribute(self, attr):
update = [fapi._attr_rem(attr)]
r = fapi.update_workspace_attributes(self.namespace, self.name,
update, self.api_url)
self.data["workspace"]["attributes"].pop(attr, None)
fapi._check_response_code(r, 200)
|
Remove attribute from a workspace.
Args:
attr (str): attribute name
|
juraj-google-style
|
def ReadPathInfoHistory(self, client_id, path_type, components):
histories = self.ReadPathInfosHistories(client_id, path_type, [components])
return histories[components]
|
Reads a collection of hash and stat entry for given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path history for.
components: A tuple of path components corresponding to path to retrieve
information for.
Returns:
A list of `rdf_objects.PathInfo` ordered by timestamp in ascending order.
|
juraj-google-style
|
def _format_batch_statuses(statuses, batch_ids, tracker):
proto_statuses = []
for batch_id in batch_ids:
if statuses[batch_id] == \
client_batch_submit_pb2.ClientBatchStatus.INVALID:
invalid_txns = tracker.get_invalid_txn_info(batch_id)
for txn_info in invalid_txns:
try:
txn_info['transaction_id'] = txn_info.pop('id')
except KeyError as e:
LOGGER.debug(e)
else:
invalid_txns = None
proto_statuses.append(
client_batch_submit_pb2.ClientBatchStatus(
batch_id=batch_id,
status=statuses[batch_id],
invalid_transactions=invalid_txns))
return proto_statuses
|
Takes a statuses dict and formats it for transmission with Protobuf and
ZMQ.
Args:
statuses (dict of int): Dict with batch ids as the key, status as value
batch_ids (list of str): The batch ids in their original order
tracker (BatchTracker): A batch tracker with access to invalid info
|
juraj-google-style
|
def easeInOutQuart(n):
_checkRange(n)
n = (2 * n)
if (n < 1):
return (0.5 * (n ** 4))
else:
n = (n - 2)
return ((- 0.5) * ((n ** 4) - 2))
|
A quartic tween function that accelerates, reaches the midpoint, and then decelerates.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
codesearchnet
|
def cmd_ssh(options):
import os
import subprocess
from os.path import expanduser
options.inst_state = 'running'
(i_info, param_str) = gather_data(options)
(tar_inst, tar_idx) = determine_inst(i_info, param_str, options.command)
home_dir = expanduser('~')
if (options.user is None):
tar_aminame = awsc.get_one_aminame(i_info[tar_idx]['ami'])
options.user = cmd_ssh_user(tar_aminame, i_info[tar_idx]['tag']['Name'])
else:
debg.dprint('LoginUser set by user: ', options.user)
os_spec = {'nt': ['powershell plink', '\\', 'ppk']}
c_itm = os_spec.get(os.name, ['ssh', '/', 'pem'])
cmd_ssh_run = c_itm[0]
if (not options.nopem):
cmd_ssh_run += ' -i {0}{1}.aws{1}{2}.{3}'.format(home_dir, c_itm[1], i_info[tar_idx]['ssh_key'], c_itm[2])
else:
debg.dprint('Connect string: ', 'ssh {}@{}'.format(options.user, i_info[tar_idx]['pub_dns_name']))
cmd_ssh_run += ' {0}@{1}'.format(options.user, i_info[tar_idx]['pub_dns_name'])
print(cmd_ssh_run)
subprocess.call(cmd_ssh_run, shell=True)
|
Connect to the specified instance via ssh.
Finds instances that match the user specified args that are also
in the 'running' state. The target instance is determined, the
required connection information is retreived (IP, key and ssh
user-name), then an 'ssh' connection is made to the instance.
Args:
options (object): contains args and data from parser
|
codesearchnet
|
def _render_node_traceback(self, node_name):
lines = [RL(''), RL(''), RL('Traceback of node construction:', 'bold')]
try:
node_stack = self._debug_dump.node_traceback(node_name)
for depth, (file_path, line, function_name, text) in enumerate(node_stack):
lines.append('%d: %s' % (depth, file_path))
attribute = debugger_cli_common.MenuItem('', 'ps %s -b %d' % (file_path, line)) if text else None
line_number_line = RL(' ')
line_number_line += RL('Line: %d' % line, attribute)
lines.append(line_number_line)
lines.append(' Function: %s' % function_name)
lines.append(' Text: ' + ('"%s"' % text if text else 'None'))
lines.append('')
except KeyError:
lines.append('(Node unavailable in the loaded Python graph)')
except LookupError:
lines.append('(Unavailable because no Python graph has been loaded)')
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
|
Render traceback of a node's creation in Python, if available.
Args:
node_name: (str) name of the node.
Returns:
A RichTextLines object containing the stack trace of the node's
construction.
|
github-repos
|
def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool=False):
if direction not in ['inputs', 'outputs']:
raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
name = 'past_key_values' if direction == 'inputs' else 'present'
for i in range(self.num_layers):
inputs_or_outputs[f'{name}.{i}.key'] = {0: 'batch', 2: 'past_sequence + sequence'}
if inverted_values_shape:
inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
inputs_or_outputs[f'{name}.{i}.value'] = {0: 'batch', 2: 'past_sequence + sequence'}
|
Fill the input_or_outputs mapping with past_key_values dynamic axes considering.
Args:
inputs_or_outputs: The mapping to fill.
direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the
output mapping, this is important for axes naming.
inverted_values_shape:
If `True`, store values on dynamic axis 1, else on axis 2.
|
github-repos
|
def all_subnets_longer_prefix(ip_net, cidr):
subnets_list = list()
while int(cidr) <= 32:
try:
subnets_list.append('%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr))
except Exception as e:
LOGGER.critical('Function all_subnets_longer_prefix {item}'.format(item=e))
pass
cidr = str(int(cidr) + 1)
return subnets_list
|
Function to return every subnet a ip can belong to with a longer prefix
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 0 to 32
Returns: returns a list of subnets
|
juraj-google-style
|
def _parse_trunk_allowed_vlans(self, config):
match = re.search(r'switchport trunk allowed vlan (.+)$', config, re.M)
return dict(trunk_allowed_vlans=match.group(1))
|
Scans the specified config and parse the trunk allowed vlans value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport trunk
allowed vlans value. The dict returned is intended to be
merged into the resource dict
|
juraj-google-style
|
def layout(self, dimensions=None, **kwargs):
dimensions = self._valid_dimensions(dimensions)
if len(dimensions) == self.ndims:
with item_check(False):
return NdLayout(self, **kwargs).reindex(dimensions)
return self.groupby(dimensions, container_type=NdLayout, **kwargs)
|
Group by supplied dimension(s) and lay out groups
Groups data by supplied dimension(s) laying the groups along
the dimension(s) out in a NdLayout.
Args:
dimensions: Dimension(s) to group by
Returns:
NdLayout with supplied dimensions
|
juraj-google-style
|
def clipping_params(ts, capacity=100):
ts_sorted = ts.order(ascending=False)
i, t0, t1, integral = 1, None, None, 0
while integral <= capacity and i+1 < len(ts):
i += 1
t0_within_capacity = t0
t1_within_capacity = t1
t0 = min(ts_sorted.index[:i])
t1 = max(ts_sorted.index[:i])
integral = integrated_change(ts[t0:t1])
print i, t0, ts[t0], t1, ts[t1], integral
if t0_within_capacity and t1_within_capacity:
return t0_within_capacity, t1_within_capacity
|
Start and end index that clips the price/value of a time series the most
Assumes that the integrated maximum includes the peak (instantaneous maximum).
Arguments:
ts (TimeSeries): Time series to attempt to clip to as low a max value as possible
capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series)
Returns:
2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase
|
juraj-google-style
|
def _GetVisitSource(self, visit_identifier, cache, database):
sync_cache_results = cache.GetResults('sync')
if not sync_cache_results:
result_set = database.Query(self._SYNC_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))
sync_cache_results = cache.GetResults('sync')
if sync_cache_results and visit_identifier:
results = sync_cache_results.get(visit_identifier, None)
if results:
return results[0]
return None
|
Retrieves a visit source type based on the identifier.
Args:
visit_identifier (str): identifier from the visits table for the
particular record.
cache (SQLiteCache): cache which contains cached results from querying
the visit_source table.
database (SQLiteDatabase): database.
Returns:
int: visit source type or None if no visit source type was found for
the identifier.
|
juraj-google-style
|
def export_template(access_token, subscription_id, rgname):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/exportTemplate',
'?api-version=', RESOURCE_API])
rg_body = {'options':'IncludeParameterDefaultValue', 'resources':['*']}
body = json.dumps(rg_body)
return do_post(endpoint, body, access_token)
|
Capture the specified resource group as a template
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. JSON body.
|
juraj-google-style
|
def determine_encoding(path, default=None):
byte_order_marks = (('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)))
try:
with open(path, 'rb') as infile:
raw = infile.read(4)
except IOError:
return default
for (encoding, boms) in byte_order_marks:
if any((raw.startswith(bom) for bom in boms)):
return encoding
return default
|
Determines the encoding of a file based on byte order marks.
Arguments:
path (str): The path to the file.
default (str, optional): The encoding to return if the byte-order-mark
lookup does not return an answer.
Returns:
str: The encoding of the file.
|
codesearchnet
|
def set_presence(self, state=None, status=None, priority=None):
state = (state if (state is not None) else self.state)
status = (status if (status is not None) else self.status)
priority = (priority if (priority is not None) else self.priority)
self.presenceserver.set_presence(state, status, priority)
|
Change the presence broadcast by the client.
If the client is currently connected, the new presence is broadcast immediately.
Args:
state(aioxmpp.PresenceState, optional): New presence state to broadcast (Default value = None)
status(dict or str, optional): New status information to broadcast (Default value = None)
priority (int, optional): New priority for the resource (Default value = None)
|
codesearchnet
|
def __pad_value(value, pad_len_multiple, pad_char):
assert (pad_len_multiple > 0)
assert (len(pad_char) == 1)
padding_length = ((pad_len_multiple - (len(value) % pad_len_multiple)) % pad_len_multiple)
return (value + (pad_char * padding_length))
|
Add padding characters to the value if needed.
Args:
value: The string value to be padded.
pad_len_multiple: Pad the result so its length is a multiple
of pad_len_multiple.
pad_char: The character to use for padding.
Returns:
The string value with padding characters added.
|
codesearchnet
|
def ready(self, node_id, metadata_priority=True):
self.maybe_connect(node_id)
return self.is_ready(node_id, metadata_priority=metadata_priority)
|
Check whether a node is connected and ok to send more requests.
Arguments:
node_id (int): the id of the node to check
metadata_priority (bool): Mark node as not-ready if a metadata
refresh is required. Default: True
Returns:
bool: True if we are ready to send to the given node
|
juraj-google-style
|
def get_compatible_func(op, func):
op_signature = _remove_annotation(tf_inspect.signature(op))
func_signature = _remove_annotation(tf_inspect.signature(func))
if op_signature == func_signature:
return func
op_pos_names = _get_required_param_names(op_signature)
func_pos_names = _get_required_param_names(func_signature)
if op_pos_names != func_pos_names:
raise AssertionError(f"The decorated function's non-default arguments must be identical to that of the overridden op. func has {func_pos_names}. op has {op_pos_names}.")
func_missing_params = {}
for name in set(op_signature.parameters.keys()) - set(func_signature.parameters.keys()):
p = op_signature.parameters[name]
if p.default is p.empty:
raise AssertionError(f"The decorated function's signature must implement all of the non-default arguments of the overridden op. Argument `{name}` is unimplemented.")
func_missing_params[name] = p
def compatible_func(*args, **kwargs):
bound = op_signature.bind(*args, **kwargs)
for name, param in func_missing_params.items():
if name not in bound.arguments:
continue
value = bound.arguments.pop(name)
if value is not param.default:
raise AssertionError(f'Dispatched op is called with argument `{name}` set to a non-default value, which is not supported by the decorated function')
return func(*bound.args, **bound.kwargs)
return compatible_func
|
Returns a compatible function.
Args:
op: a callable with whose signature the returned function is compatible.
func: a callable which is called by the returned function.
Returns:
a compatible function, which conducts the actions of `func` but can
be called like `op`, given that:
- the list of required arguments in `func` and `op` are the same.
- there is no override of the default arguments of `op` that are not
supported by `func`.
|
github-repos
|
def generate_chrome_trace_format(self, show_dataflow: bool=True, show_memory: bool=False, op_time: str='schedule') -> str:
step_stats_analysis = self.analyze_step_stats(show_dataflow=show_dataflow, show_memory=show_memory, op_time=op_time)
return step_stats_analysis.chrome_trace.format_to_string(pretty=True)
|
Produces a trace in Chrome Trace Format.
Args:
show_dataflow: (Optional.) If True, add flow events to the trace
connecting producers and consumers of tensors.
show_memory: (Optional.) If True, add object snapshot events to the trace
showing the sizes and lifetimes of tensors.
op_time: (Optional.) How the execution time of op is shown in timeline.
Possible values are "schedule", "gpu" and "all".
"schedule" will show op from the time it is scheduled to the end of
the scheduling.
Notice by the end of its scheduling its async kernels may not start
yet. It is shown using the default value from step_stats.
"gpu" will show op with the execution time of its kernels on GPU.
"all" will show op from the start of its scheduling to the end of
its last kernel.
Returns:
A JSON formatted string in Chrome Trace format.
|
github-repos
|
def get_lock_state_transaction(self, transaction_id):
response = None
try:
response = requests.get(urls.get_lockstate_transaction(self._giid, transaction_id), headers={'Accept': 'application/json, text/javascript, */*; q=0.01', 'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text)
|
Get lock state transaction status
Args:
transaction_id: Transaction ID received from set_lock_state
|
codesearchnet
|
def __init__(self, validate_args=False, name="cholesky_outer_product"):
self._graph_parents = []
self._name = name
super(CholeskyOuterProduct, self).__init__(
forward_min_event_ndims=2,
validate_args=validate_args,
name=name)
|
Instantiates the `CholeskyOuterProduct` bijector.
Args:
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
|
juraj-google-style
|
def _get_internal_slot(slot_key=None, filler_pipeline_key=None, slot_dict=None):
if (slot_dict is None):
slot_dict = {}
slot_record = slot_dict.get(slot_key)
if (slot_record is None):
raise PipelineStatusError(('Could not find data for output slot key "%s".' % slot_key))
output = {}
if (slot_record.status == _SlotRecord.FILLED):
output['status'] = 'filled'
output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time)
output['value'] = slot_record.value
filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(slot_record)
else:
output['status'] = 'waiting'
if filler_pipeline_key:
output['fillerPipelineId'] = filler_pipeline_key.name()
return output
|
Gets information about a _SlotRecord for display in UI.
Args:
slot_key: The db.Key of the slot to fetch.
filler_pipeline_key: In the case the slot has not yet been filled, assume
that the given db.Key (for a _PipelineRecord) will be the filler of
the slot in the future.
slot_dict: The slot JSON dictionary.
Returns:
Dictionary with the keys:
status: Slot status: 'filled' or 'waiting'
fillTimeMs: Time in milliseconds since the epoch of when it was filled.
value: The current value of the slot, which is a slot's JSON dictionary.
fillerPipelineId: The pipeline ID of what stage has or should fill
this slot.
Raises:
PipelineStatusError if any input is bad.
|
codesearchnet
|
def __init__(self, name: str, path: str):
self._test_suite = self.create_test_suite(name, path)
|
Initializes the YamlExamplesTestSuite.
Args:
name: The name of the test suite. This will be used as the class name
for the dynamically generated test suite.
path: A string representing the path or glob pattern to search for
YAML example files.
|
github-repos
|
def __call__(self, **kwargs):
assert self._last_report_time is not None, (
"StatusReporter._start() must be called before the first "
"report __call__ is made to ensure correct runtime metrics.")
report_time = time.time()
if TIME_THIS_ITER_S not in kwargs:
kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time
self._last_report_time = report_time
self._queue.put(kwargs.copy(), block=True)
self._continue_semaphore.acquire()
|
Report updated training status.
Pass in `done=True` when the training job is completed.
Args:
kwargs: Latest training result status.
Example:
>>> reporter(mean_accuracy=1, training_iteration=4)
>>> reporter(mean_accuracy=1, training_iteration=4, done=True)
Raises:
StopIteration: A StopIteration exception is raised if the trial has
been signaled to stop.
|
juraj-google-style
|
def ProduceAnalysisReport(self, plugin):
analysis_report = plugin.CompileReport(self)
if (not analysis_report):
return
analysis_report.time_compiled = timelib.Timestamp.GetNow()
plugin_name = getattr(analysis_report, 'plugin_name', plugin.plugin_name)
if plugin_name:
analysis_report.plugin_name = plugin_name
if self._event_filter_expression:
analysis_report.filter_string = self._event_filter_expression
self._storage_writer.AddAnalysisReport(analysis_report)
self.number_of_produced_analysis_reports += 1
self.number_of_produced_event_tags = self._storage_writer.number_of_event_tags
self.last_activity_timestamp = time.time()
|
Produces an analysis report.
Args:
plugin (AnalysisPlugin): plugin.
|
codesearchnet
|
def _CreateCampaign(client, budget):
campaign_service = client.GetService('CampaignService')
operations = [{
'operator': 'ADD',
'operand': {
'name': 'Interplanetary Cruise
'status': 'PAUSED',
'advertisingChannelType': 'SEARCH',
'biddingStrategyConfiguration': {
'biddingStrategyType': 'MANUAL_CPC',
},
'budget': budget,
'settings': [{
'xsi_type': 'DynamicSearchAdsSetting',
'domainName': 'example.com',
'languageCode': 'en'
}],
'startDate': (datetime.datetime.now() +
datetime.timedelta(1)).strftime('%Y%m%d'),
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
}
}]
campaign = campaign_service.mutate(operations)['value'][0]
campaign_id = campaign['id']
print 'Campaign with ID "%d" and name "%s" was added.' % (
campaign_id, campaign['name'])
return campaign_id
|
Creates the campaign.
Args:
client: an AdWordsClient instance.
budget: a suds.sudsobject.Object representation of a created budget.
Returns:
An integer campaign ID.
|
juraj-google-style
|
def _ReadN(self, n):
ret = ""
while True:
chunk = self._read_file.read(n - len(ret))
ret += chunk
if len(ret) == n or not chunk:
return ret
|
Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
|
juraj-google-style
|
def _binding_to_coroutine(state, b, bad_bindings, ret, top, ctx):
if b not in bad_bindings:
ret.PasteBinding(b)
return state
if ctx.matcher(state.node).match_var_against_type(b.variable, ctx.convert.generator_type, {}, {b.variable: b}) is not None:
ret_param = b.data.get_instance_type_parameter(abstract_utils.V)
coroutine = abstract.Coroutine(ctx, ret_param, state.node)
ret.AddBinding(coroutine, [b], state.node)
return state
if not top:
ret.PasteBinding(b)
return state
_, await_method = ctx.attribute_handler.get_attribute(state.node, b.data, '__await__', b)
if await_method is None or not await_method.bindings:
ret.PasteBinding(b)
return state
state, await_obj = ctx.vm.call_function_with_state(state, await_method, ())
state, subret = to_coroutine(state, await_obj, False, ctx)
ret.PasteVariable(subret)
return state
|
Helper for _to_coroutine.
Args:
state: The current state.
b: A cfg.Binding.
bad_bindings: Bindings that are not coroutines.
ret: A return variable that this helper will add to.
top: Whether this is the top-level recursive call.
ctx: The current context.
Returns:
The state.
|
github-repos
|
def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.return_dict
if attention_mask is None:
attention_mask = jnp.ones_like(input_ids)
rngs = {}
if dropout_rng is not None:
rngs['dropout'] = dropout_rng
def _encoder_forward(module, input_ids, attention_mask, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_ids, attention_mask, **kwargs)
return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)
|
Returns:
Example:
```python
>>> from transformers import AutoTokenizer, FlaxT5ForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small")
>>> model = FlaxT5ForConditionalGeneration.from_pretrained("google-t5/t5-small")
>>> text = "My friends are cool but they eat too many carbs."
>>> inputs = tokenizer(text, return_tensors="np")
>>> encoder_outputs = model.encode(**inputs)
```
|
github-repos
|
def save_to_mat_file(self, parameter_space, result_parsing_function, filename, runs):
for key in parameter_space:
if (not isinstance(parameter_space[key], list)):
parameter_space[key] = [parameter_space[key]]
dimension_labels = ([{key: str(parameter_space[key])} for key in parameter_space.keys() if (len(parameter_space[key]) > 1)] + [{'runs': range(runs)}])
return savemat(filename, {'results': self.get_results_as_numpy_array(parameter_space, result_parsing_function, runs=runs), 'dimension_labels': dimension_labels})
|
Return the results relative to the desired parameter space in the form
of a .mat file.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
filename (path): name of output .mat file.
runs (int): number of runs to gather for each parameter
combination.
|
codesearchnet
|
def find_pad_index(self, array):
try:
return list(array).index(self.pad_value)
except ValueError:
return len(array)
|
Find padding index.
Args:
array (list): integer list.
Returns:
idx: padding index.
Examples:
>>> array = [1, 2, 0]
>>> self.find_pad_index(array)
2
|
codesearchnet
|
def display(port=None, height=None):
_display(port=port, height=height, print_message=True, display_handle=None)
|
Display a TensorBoard instance already running on this machine.
Args:
port: The port on which the TensorBoard server is listening, as an
`int`, or `None` to automatically select the most recently
launched TensorBoard.
height: The height of the frame into which to render the TensorBoard
UI, as an `int` number of pixels, or `None` to use a default value
(currently 800).
|
juraj-google-style
|
async def _notify_event_internal(self, conn_string, name, event):
try:
self._currently_notifying = True
conn_id = self._get_conn_id(conn_string)
event_maps = self._monitors.get(conn_string, {})
wildcard_maps = self._monitors.get(None, {})
wildcard_handlers = wildcard_maps.get(name, {})
event_handlers = event_maps.get(name, {})
for (handler, func) in itertools.chain(event_handlers.items(), wildcard_handlers.items()):
try:
result = func(conn_string, conn_id, name, event)
if inspect.isawaitable(result):
(await result)
except:
self._logger.warning('Error calling notification callback id=%s, func=%s', handler, func, exc_info=True)
finally:
for action in self._deferred_adjustments:
self._adjust_monitor_internal(*action)
self._deferred_adjustments = []
self._currently_notifying = False
|
Notify that an event has occured.
This method will send a notification and ensure that all callbacks
registered for it have completed by the time it returns. In
particular, if the callbacks are awaitable, this method will await
them before returning. The order in which the callbacks are called
is undefined.
This is a low level method that is not intended to be called directly.
You should use the high level public notify_* methods for each of the
types of events to ensure consistency in how the event objects are
created.
Args:
conn_string (str): The connection string for the device that the
event is associated with.
name (str): The name of the event. Must be in SUPPORTED_EVENTS.
event (object): The event object. The type of this object will
depend on what is being notified.
|
codesearchnet
|
def to_csv(self, filename=None, as_text=True, use_descriptions=False, dlm=',', header=True):
if (filename is None):
if (not as_text):
raise StriplogError('You must provide a filename or set as_text to True.')
else:
as_text = False
if as_text:
output = StringIO()
else:
output = open(filename, 'w')
fieldnames = ['Top', 'Base', 'Component']
writer = csv.DictWriter(output, delimiter=dlm, fieldnames=fieldnames, quoting=csv.QUOTE_MINIMAL)
if header:
writer.writeheader()
for i in self.__list:
if (use_descriptions and i.description):
text = i.description
elif i.primary:
text = i.primary.summary()
else:
text = ''
data = {j: k for (j, k) in zip(fieldnames, [i.top.z, i.base.z, text])}
writer.writerow(data)
if as_text:
return output.getvalue()
else:
output.close
return None
|
Returns a CSV string built from the summaries of the Intervals.
Args:
use_descriptions (bool): Whether to use descriptions instead
of summaries, if available.
dlm (str): The delimiter.
header (bool): Whether to form a header row.
Returns:
str: A string of comma-separated values.
|
codesearchnet
|
def get_vnet(access_token, subscription_id, resource_group, vnet_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Network/virtualNetworks/', vnet_name,
'?api-version=', NETWORK_API])
return do_get(endpoint, access_token)
|
Get details about the named virtual network.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vnet_name (str): Name of the VNet.
Returns:
HTTP response. VNet JSON body.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.