code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def enable_imu_streaming(self, enabled_imus, enabled_sensors=SENSOR_ALL):
imus_enabled = 0
for imu in enabled_imus:
imus_enabled |= (1 << imu)
if (enabled_sensors == 0):
logger.warn('Not enabling IMUs, no sensors enabled!')
return False
if (not self.dongle._enable_imu_streaming(s... | Configures and enables IMU sensor data streaming.
NOTE: only one streaming mode can be active at any time, so e.g. if you
want to stream IMU data, you must disable SK8-ExtAna streaming first.
Args:
enabled_imus (list): a list of distinct ints in the range `0`-`4`
inclusive identifying the IMU. `0` is the SK8 itself, ... | codesearchnet |
def Begin(self, function_name):
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name | Start analyzing function body.
Args:
function_name: The name of the function being tracked. | codesearchnet |
def input(self):
if not self._inbound_nodes:
raise AttributeError('Layer ' + self.name + ' is not connected, no input to return.')
return self._get_node_attribute_at_index(0, 'input_tensors', 'input') | Retrieves the input tensor(s) of a layer.
Only applicable if the layer has exactly one input,
i.e. if it is connected to one incoming layer.
Returns:
Input tensor or list of input tensors.
Raises:
RuntimeError: If called in Eager mode.
AttributeError: If no inbound nodes are found. | github-repos |
def get_i_name(self, num, is_oai=None):
if (num not in (1, 2)):
raise ValueError('`num` parameter have to be 1 or 2!')
if (is_oai is None):
is_oai = self.oai_marc
i_name = ('ind' if (not is_oai) else 'i')
return (i_name + str(num)) | This method is used mainly internally, but it can be handy if you work
with with raw MARC XML object and not using getters.
Args:
num (int): Which indicator you need (1/2).
is_oai (bool/None): If None, :attr:`.oai_marc` is
used.
Returns:
str: current name of ``i1``/``ind1`` parameter based on \
:attr:`oai_marc` prope... | codesearchnet |
async def get_in_tree_template(link):
context = link.context
source_url = get_source_url(link)
if (not source_url.endswith(('.yml', '.yaml'))):
raise CoTError("{} source url {} doesn't end in .yml or .yaml!".format(link.name, source_url))
tmpl = (await load_json_or_yaml_from_url(context, source_... | Get the in-tree json-e template for a given link.
By convention, this template is SOURCE_REPO/.taskcluster.yml.
Args:
link (LinkOfTrust): the parent link to get the source url from.
Raises:
CoTError: on non-yaml `source_url`
KeyError: on non-well-formed source template
Returns:
dict: the first task in the template. | codesearchnet |
def assert_shape_match(shape1, shape2):
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if ((shape1.ndims is None) or (shape2.ndims is None)):
raise ValueError(('Shapes must have known rank. Got %s and %s.' % (shape1.ndims, shape2.ndims)))
shape1.assert_same_rank(shape2)
shap... | Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None) | codesearchnet |
def plot_densities(self, ax=None, **kwargs):
ax, fig, plt = get_ax_fig_plt(ax)
ax.grid(True)
ax.set_xlabel('r [Bohr]')
for i, den_name in enumerate(["ae_core_density", "pseudo_core_density"]):
rden = getattr(self, den_name)
label = "$n_c$" if i... | Plot the PAW densities.
Args:
ax: matplotlib :class:`Axes` or None if a new figure should be created.
Returns:
`matplotlib` figure | juraj-google-style |
def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False):
if(layer_index < 0 or layer_index > self.num_hidden_layers):
raise ValueError('Invalid layer index')
layer_type = self.layer_types[layer_index]
weight = self.weights[layer_index]
if is_abs:
weight = tf.abs(w... | Performs forward pass through the layer weights at layer_index.
Args:
vector: vector that has to be passed through in forward pass
layer_index: index of the layer
is_transpose: whether the weights of the layer have to be transposed
is_abs: whether to take the absolute value of the weights
Returns:
tensor that corresp... | juraj-google-style |
def compress_artifact_if_supported(artifact_path):
(content_type, encoding) = guess_content_type_and_encoding(artifact_path)
log.debug('"{}" is encoded with "{}" and has mime/type "{}"'.format(artifact_path, encoding, content_type))
if ((encoding is None) and (content_type in _GZIP_SUPPORTED_CONTENT_TYPE)):... | Compress artifacts with GZip if they're known to be supported.
This replaces the artifact given by a gzip binary.
Args:
artifact_path (str): the path to compress
Returns:
content_type, content_encoding (tuple): Type and encoding of the file. Encoding equals 'gzip' if compressed. | codesearchnet |
def _version_from_file(path_to_version, default_version=DEFAULT_VERSION):
version_filepath = os.path.join(path_to_version, 'version.txt')
if (not os.path.isfile(version_filepath)):
warnings.warn('Unable to resolve current version', exceptions.ProsperDefaultVersionWarning)
return default_version
... | for PyPI installed versions, just get data from file
Args:
path_to_version (str): abspath to dir where version.txt exists
default_version (str): fallback version in case of error
Returns:
str: current working version | codesearchnet |
def DeregisterPathSpec(cls, path_spec_type):
type_indicator = path_spec_type.TYPE_INDICATOR
if (type_indicator not in cls._path_spec_types):
raise KeyError('Path specification type: {0:s} not set.'.format(type_indicator))
del cls._path_spec_types[type_indicator]
if (type_indicator in cls._system... | Deregisters a path specification.
Args:
path_spec_type (type): path specification type.
Raises:
KeyError: if path specification is not registered. | codesearchnet |
def __init__(self, macs=[], bt_device=''):
self._run_flag = RunFlag()
self._subjects = []
m = Manager()
q = m.Queue()
self._shared_data = m.dict()
self._shared_data['run_flag'] = True
notify_thread = Thread(target=RuuviTagReactive._d... | Start background process for get_datas and async task for notifying all subscribed observers
Args:
macs (list): MAC addresses
bt_device (string): Bluetooth device id | juraj-google-style |
def member_create(self, params, member_id):
member_config = params.get('rsParams', {})
server_id = params.pop('server_id', None)
version = params.pop('version', self._version)
proc_params = {'replSet': self.repl_id}
proc_params.update(params.get('procParams', {}))
... | start new mongod instances as part of replica set
Args:
params - member params
member_id - member index
return member config | juraj-google-style |
def __init__(self, words=None):
words = self.sanitize_words(words)
self.word_id = {w:i for i, w in enumerate(words)}
self.id_word = {i:w for w,i in iteritems(self.word_id)} | Build attributes word_id and id_word from input.
Args:
words (list): list of sorted words according to frequency. | juraj-google-style |
def cast_to_common_dtype(tensors):
highest_float = None
highest_float_size = -1
for x in tensors:
dtype = backend.standardize_dtype(x.dtype)
if is_float(dtype):
if highest_float is None or dtype_size(dtype) > highest_float_size:
highest_float = dtype
... | Cast a list of tensors to a common dtype.
If any tensor is floating-point, they will all be casted to the most-precise
floating-point dtype. Otherwise the tensors are not casted.
Args:
tensors: A list of tensors.
Returns:
Same list, casted to a common dtype. | github-repos |
def create_game(self, map_name, bot_difficulty=sc_pb.VeryEasy, bot_race=sc_common.Random, bot_first=False):
self._controller.ping()
map_inst = maps.get(map_name)
map_data = map_inst.data(self._run_config)
if (map_name not in self._saved_maps):
self._controller.save_map(map_inst.path, map_data)
... | Create a game, one remote agent vs the specified bot.
Args:
map_name: The map to use.
bot_difficulty: The difficulty of the bot to play against.
bot_race: The race for the bot.
bot_first: Whether the bot should be player 1 (else is player 2). | codesearchnet |
def _commit_change(alias_table, export_path=None, post_commit=True):
with open(export_path or GLOBAL_ALIAS_PATH, 'w+') as alias_config_file:
alias_table.write(alias_config_file)
if post_commit:
alias_config_file.seek(0)
alias_config_hash = hashlib.sha1(alias_config_file.... | Record changes to the alias table.
Also write new alias config hash and collided alias, if any.
Args:
alias_table: The alias table to commit.
export_path: The path to export the aliases to. Default: GLOBAL_ALIAS_PATH.
post_commit: True if we want to perform some extra actions after writing alias to file. | juraj-google-style |
def SetIndexName(self, index_name):
self._index_name = index_name
logger.debug('Elasticsearch index name: {0:s}'.format(index_name)) | Set the index name.
Args:
index_name (str): name of the index. | juraj-google-style |
def value(self):
return self._snapshot | Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a ref... | github-repos |
def __init__(self, cache_address):
super(CacheAddress, self).__init__()
self.block_number = None
self.block_offset = None
self.block_size = None
self.filename = None
self.value = cache_address
if cache_address & 0x80000000:
self.is_initialized = 'True'
else:
self.is_ini... | Initializes a cache address.
Args:
cache_address (int): cache address. | juraj-google-style |
def VerifyStructure(self, parser_mediator, line):
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.SECURITYD_LINE.parseString(line)
except pyparsing.ParseException:
logger.debug('Not a MacOS securityd log file')
return False
time... | Verify that this file is a securityd log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not. | juraj-google-style |
def get_next_as_list(self, name=None):
del name
with ops.device(self._worker):
return self._format_data_list_with_options(self._iterator.get_next()) | Get next element from the underlying iterator.
Runs the iterator get_next() within a device scope. Since this doesn't use
get_next_as_optional(), it is considerably faster than get_next_as_list(),
but it raises EOFError if any of the device doesn't get any data.
Args:
name: not used.
Returns:
A list consisting of th... | github-repos |
def delete_media_service_rg(access_token, subscription_id, rgname, msname):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', rgname,
'/providers/microsoft.media/mediaservices/', msname,
... | Delete a media service.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
msname (str): Media service name.
Returns:
HTTP response. | juraj-google-style |
def _format_parameter_error_message(name: str, sig: Signature, num_params: int) -> str:
if (num_params == 0):
plural = 's'
missing = 2
arguments = "'slack' and 'event'"
else:
plural = ''
missing = 1
arguments = "'event'"
return f'{name}{sig} missing {missing} ... | Format an error message for missing positional arguments.
Args:
name: The function name.
sig: The function's signature.
num_params: The number of function parameters.
Returns:
str: A formatted error message. | codesearchnet |
def UpdateLease(self, duration):
if (not self.locked):
raise LockError(('Object must be locked to update the lease: %s.' % self.urn))
if (self.CheckLease() == 0):
self._RaiseLockError('UpdateLease')
self.transaction.UpdateLease(duration) | Updates the lease and flushes the object.
The lease is set to expire after the "duration" time from the present
moment.
This method is supposed to be used when operation that requires locking
may run for a time that exceeds the lease time specified in OpenWithLock().
See flows/hunts locking for an example.
Args:
dura... | codesearchnet |
def get_timing_signal(length, min_timescale=1, max_timescale=10000.0, num_timescales=16):
positions = to_float(tf.range(length))
log_timescale_increment = (math.log((max_timescale / min_timescale)) / (num_timescales - 1))
inv_timescales = (min_timescale * tf.exp((to_float(tf.range(num_timescales)) * (- log_... | Create Tensor of sinusoids of different frequencies.
Args:
length: Length of the Tensor to create, i.e. Number of steps.
min_timescale: a float
max_timescale: a float
num_timescales: an int
Returns:
Tensor of shape (length, 2*num_timescales) | codesearchnet |
def relevant_connections(n, _from, to):
cm = np.zeros((n, n))
if not _from or not to:
return cm
cm[np.ix_(_from, to)] = 1
return cm | Construct a connectivity matrix.
Args:
n (int): The dimensions of the matrix
_from (tuple[int]): Nodes with outgoing connections to ``to``
to (tuple[int]): Nodes with incoming connections from ``_from``
Returns:
np.ndarray: An |n x n| connectivity matrix with the |i,jth| entry is
``1`` if |i| is in ``_from`` and |j| ... | juraj-google-style |
def add_virtual_loss(self, up_to):
self.losses_applied += 1
loss = self.position.to_play
self.W += loss
if ((self.parent is None) or (self is up_to)):
return
self.parent.add_virtual_loss(up_to) | Propagate a virtual loss up to the root node.
Args:
up_to: The node to propagate until. (Keep track of this! You'll
need it to reverse the virtual loss later.) | codesearchnet |
def calc_stats(prices):
if isinstance(prices, pd.Series):
return PerformanceStats(prices)
elif isinstance(prices, pd.DataFrame):
return GroupStats(*[prices[x] for x in prices.columns])
else:
raise NotImplementedError('Unsupported type') | Calculates performance stats of a given object.
If object is Series, a PerformanceStats object is
returned. If object is DataFrame, a GroupStats object
is returned.
Args:
* prices (Series, DataFrame): Set of prices | juraj-google-style |
def _project_THn(self, hist: Hist) -> Any:
projection_axes = [axis.axis_type.value for axis in self.projection_axes]
if (len(projection_axes) == 2):
projection_axes.reverse()
args = (projection_axes + ['E'])
logger.debug(f'hist: {hist.GetName()} args: {args}')
if (len(projection_axes) > 3):
... | Perform the actual THn -> THn or TH1 projection.
This projection could be to 1D, 2D, 3D, or ND.
Args:
hist (ROOT.THnBase): Histogram from which the projections should be performed.
Returns:
ROOT.THnBase or ROOT.TH1: The projected histogram. | codesearchnet |
def lineReceived(self, line):
while self._in_header:
if line:
self._headers.append(line)
else:
(http, status, message) = self._headers[0].split(' ', 2)
status = int(status)
if (status == 200):
self.factory.get_stream().connected()
... | Callback issued by twisted when new line arrives.
Args:
line (str): Incoming line | codesearchnet |
def depth_may_average_ground_temperature(self, value=None):
if (value is not None):
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_may_average_ground_temperature`'.format(value))
self._depth_may_average_gr... | Corresponds to IDD Field `depth_may_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_may_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value | codesearchnet |
def forward(self, hidden_states: torch.Tensor):
ln_outputs = self.layernorm_before_ffn(hidden_states)
outputs = self.ffn(ln_outputs)
if self.dropout is not None:
outputs = self.dropout(outputs)
hidden_states = hidden_states + outputs
return hidden_states | Args:
hidden_states (`torch.Tensor` of shape `(batch, len_seq, dim_model)`):
Hidden states before feed forward layer. | github-repos |
def get_filtered_normalized_events(self):
user_image = google_v2_operations.get_action_image(self._op, _ACTION_USER_COMMAND)
need_ok = google_v2_operations.is_success(self._op)
events = {}
for event in google_v2_operations.get_events(self._op):
if self._filter(event):
continue
... | Filter the granular v2 events down to events of interest.
Filter through the large number of granular events returned by the
pipelines API, and extract only those that are interesting to a user. This
is implemented by filtering out events which are known to be uninteresting
(i.e. the default actions run for every job)... | codesearchnet |
def tokens(self):
return self._tokens | Access the tokens contained within this line.
The caller must not modify the tokens list returned by this method.
Returns:
List of tokens in this line. | github-repos |
def info(self, show_defaults=False):
pprinter = PrettyPrinter(show_options=True, show_defaults=show_defaults)
print(pprinter.pprint(self._obj)) | Prints a repr of the object including any applied options.
Args:
show_defaults: Whether to include default options | juraj-google-style |
def get_all(self, seq_set: SequenceSet) -> Sequence[Tuple[(int, CachedMessage)]]:
if seq_set.uid:
all_uids = (seq_set.flatten(self.max_uid) & self._uids)
return [(seq, self._cache[uid]) for (seq, uid) in enumerate(self._sorted, 1) if (uid in all_uids)]
else:
all_seqs = seq_set.flatten(se... | Return the cached messages, and their sequence numbers, for the
given sequence set.
Args:
seq_set: The message sequence set. | codesearchnet |
def _yellowfin(self):
yellowfin_ops = []
curv_range_ops = self._curvature_range()
yellowfin_ops += curv_range_ops
grad_var_ops = self._grad_variance()
yellowfin_ops += grad_var_ops
dist_to_opt_ops = self._dist_to_opt()
yellowfin_ops += dist_to_opt_ops
self._mu = tf.identity(tf.cond(self.... | YellowFin auto-tuning optimizer based on momentum SGD.
Returns:
YF ops
(Curvature range,
Grad_variance,
Dist_to_opt,
Single-Step,
Auto-Tuning) | codesearchnet |
def __init__(self, app=None):
self._key = None
self._endpoint_uri = None
self._channel = None
self._requests_middleware = None
self._trace_log_handler = None
self._exception_telemetry_client = None
if app:
self.init_app(app) | Initialize a new instance of the extension.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | juraj-google-style |
def convert_slow_tokenizer(transformer_tokenizer, from_tiktoken=False) -> Tokenizer:
tokenizer_class_name = transformer_tokenizer.__class__.__name__
if tokenizer_class_name in SLOW_TO_FAST_CONVERTERS and (not from_tiktoken):
converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name]
return ... | Utilities to convert a slow tokenizer instance in a fast tokenizer instance.
Args:
transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]):
Instance of a slow tokenizer to convert in the backend tokenizer for
[`~tokenization_utils_base.PreTrainedTokenizerFast`].
from_tiktoken (bool, optional): Whether... | github-repos |
def convert_predictions_to_image_summaries(hook_args):
decode_hparams = hook_args.decode_hparams
if (not decode_hparams.display_decoded_images):
return []
predictions = hook_args.predictions[0]
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for (ind, predict... | Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara | codesearchnet |
def __add__(self, other):
if (isinstance(other, LazyAllreduceSum) and
self.mesh_impl == other.mesh_impl and
self.mesh_axes == other.mesh_axes):
return LazyAllreduceSum(
self.mesh_impl,
self.mesh_impl.slicewise(
tf.add, self.laid_out_input, other.laid_out_... | Add to another LazyAllreduceSum.
Args:
other: a LazyAllreduceSum or a LaidOutTensor
Returns:
a LazyAllreduceSum or a LaidOutTensor | juraj-google-style |
def cudnn_bi_lstm(units, n_hidden, seq_lengths=None, n_layers=1, trainable_initial_states=False, name='cudnn_bi_gru', reuse=False):
with tf.variable_scope(name, reuse=reuse):
if (seq_lengths is None):
seq_lengths = (tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1])
with... | Fast CuDNN Bi-LSTM implementation
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number of tokens
F - features
n_hidden: dimensionality of hidden state
seq_lengths: number of tokens in each sample in the batch
n_layers: number of layers
trainable_initial_states: whether to create a specia... | codesearchnet |
def findall(self, title=None):
if title is None:
return list(self)
files = backend.iterfiles(self._drive, name=title)
return [self[id] for id, _ in files] | Fetch and return a list of spreadsheets with the given title.
Args:
title(str): title/name of the spreadsheets to return, or ``None`` for all
Returns:
list: list of new SpreadSheet instances (possibly empty) | juraj-google-style |
def get_reconstructed_band_structure(list_bs, efermi=None):
if (efermi is None):
efermi = (sum([b.efermi for b in list_bs]) / len(list_bs))
kpoints = []
labels_dict = {}
rec_lattice = list_bs[0].lattice_rec
nb_bands = min([list_bs[i].nb_bands for i in range(len(list_bs))])
kpoints = np.c... | This method takes a list of band structures and reconstructs
one band structure object from all of them.
This is typically very useful when you split non self consistent
band structure runs in several independent jobs and want to merge back
the results
Args:
list_bs: A list of BandStructure or BandStructureSymmLine o... | codesearchnet |
def deprecated_internal_set_learning_phase(value):
global _GRAPH_LEARNING_PHASES
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
_DUMMY_EAGER_GRAPH.learning_phase_is_set = True
_G... | A deprecated internal implementation of set_learning_phase.
This method is an internal-only version of `set_learning_phase` that
does not raise a deprecation error. It is required because
saved_model needs to keep working with user code that uses the deprecated
learning phase methods until those APIs are fully removed... | github-repos |
def ParseFileObject(self, parser_mediator, file_object):
file_offset = 0
try:
timestamp, event_data = self._ReadEntry(
parser_mediator, file_object, file_offset)
except errors.ParseError as exception:
raise errors.UnableToParseFile(
'Unable to parse first utmp entry wit... | Parses an utmp file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): a file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed. | juraj-google-style |
def from_str(cls, input_string, fmt):
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
if (fmt.lower() == 'xyz'):
m = XYZ.from_string(input_string).molecule
elif (fmt in ['gjf', 'g03', 'g09', 'com', 'inp']):
m = GaussianInput.from_string(input_string).molecu... | Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats support... | codesearchnet |
def steps(self, goal):
path = self.path(goal)
for i in range(len(path) - 1):
yield path[i], path[i + 1] | Get the list of individual relations leading to the targeted node
Args:
goal (str): Name of the targeted node
Return:
list of tuple of Node | juraj-google-style |
def apply_filter(self, expr, value):
if self.skip(value):
return expr
if (not self._valid_value(value)):
msg = 'Invalid value {value} passed to filter {name} - '.format(value=repr(value), name=self.name)
if (self.default is not None):
warn((msg + 'defaulting to {}'.format(sel... | Returns the given expression filtered by the given value.
Args:
expr (xpath.expression.AbstractExpression): The expression to filter.
value (object): The desired value with which the expression should be filtered.
Returns:
xpath.expression.AbstractExpression: The filtered expression. | codesearchnet |
def from_json_str(cls, json_str):
return cls.from_json(json.loads(json_str, cls=JsonDecoder)) | Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string. | codesearchnet |
def new(namespace, name, protected=False, attributes=dict(), api_url=fapi.PROD_API_ROOT):
r = fapi.create_workspace(namespace, name, protected, attributes, api_url)
fapi._check_response_code(r, 201)
return Workspace(namespace, name, api_url) | Create a new FireCloud workspace.
Returns:
Workspace: A new FireCloud workspace
Raises:
FireCloudServerError: API call failed. | codesearchnet |
async def _check_resolver_ans(self, dns_answer_list, record_name, record_data_list, record_ttl, record_type_code):
type_filtered_list = [ans for ans in dns_answer_list if (ans.qtype == record_type_code)]
if (len(type_filtered_list) != len(record_data_list)):
return False
for rec in type_filtered_lis... | Check if resolver answer is equal to record data.
Args:
dns_answer_list (list): DNS answer list contains record objects.
record_name (str): Record name.
record_data_list (list): List of data values for the record.
record_ttl (int): Record time-to-live info.
record_type_code (int): Record type code.
Returns:
boolean i... | codesearchnet |
def verify_repo_matches_url(repo, url):
repo_parts = urlparse(repo)
url_parts = urlparse(url)
errors = []
repo_path_parts = repo_parts.path.split('/')
url_path_parts = url_parts.path.split('/')
if (repo_parts.hostname != url_parts.hostname):
errors.append("verify_repo_matches_url: Hostna... | Verify ``url`` is a part of ``repo``.
We were using ``startswith()`` for a while, which isn't a good comparison.
This function allows us to ``urlparse`` and compare host and path.
Args:
repo (str): the repo url
url (str): the url to verify is part of the repo
Returns:
bool: ``True`` if the repo matches the url. | codesearchnet |
def PyParseJoinList(string, location, tokens):
join_list = []
for token in tokens:
try:
join_list.append(str(token))
except UnicodeDecodeError:
join_list.append(repr(token))
tokens[0] = ''.join(join_list)
del tokens[1:] | Return a joined token from a list of tokens.
This is a callback method for pyparsing setParseAction that modifies
the returned token list to join all the elements in the list to a single
token.
Args:
string (str): original string.
location (int): location in the string where the match was made.
tokens (list[str]): ex... | codesearchnet |
def _Parse(self, template):
if (not template):
raise TextFSMTemplateError('Null template.')
self._ParseFSMVariables(template)
while self._ParseFSMState(template):
pass
self._ValidateFSM() | Parses template file for FSM structure.
Args:
template: Valid template file.
Raises:
TextFSMTemplateError: If template file syntax is invalid. | codesearchnet |
def write_log(self, message):
if (self._is_write_log and self.log_file and (not self.log_file.closed)):
self.log_file.write((message + '\n')) | Write a line to the VM instruction log file.
Args:
message (str): string message to write to file. | codesearchnet |
def transform_list_to_dict(list):
ret = {}
for value in list:
if isinstance(value, dict):
ret.update(value)
else:
ret[str(value)] = True
return ret | Transforms a list into a dictionary, putting values as keys
Args:
id:
Returns:
dict: dictionary built | juraj-google-style |
def indent(s, n_spaces=2, initial=True):
i = ' '*n_spaces
t = s.replace('\n', '\n%s' % i)
if initial:
t = i + t
return t | Indent all new lines
Args:
n_spaces: number of spaces to use for indentation
initial: whether or not to start with an indent | juraj-google-style |
def to_maildir(self, flags: Iterable[Union[(bytes, Flag)]]) -> str:
codes = []
for flag in flags:
if isinstance(flag, bytes):
flag = Flag(flag)
from_sys = self._from_sys.get(flag)
if (from_sys is not None):
codes.append(from_sys)
else:
from_kwd... | Return the string of letter codes that are used to map to defined
IMAP flags and keywords.
Args:
flags: The flags and keywords to map. | codesearchnet |
def _is_flag(cls, arg):
if (arg == '--'):
return False
if (not arg.startswith('-')):
return False
if arg.startswith('--'):
first_char = arg[2]
else:
first_char = arg[1]
if (not first_char.isalpha()):
return False
return True | Check if an argument is a flag.
A flag starts with - or -- and the next character must be a letter
followed by letters, numbers, - or _. Currently we only check the
alpha'ness of the first non-dash character to make sure we're not just
looking at a negative number.
Returns:
bool: Whether the argument is a flag. | codesearchnet |
def regexp(__string: str, __pattern: str, __repl: Union[(Callable, str)], *, count: int=0, flags: int=0) -> str:
return re.sub(__pattern, __repl, __string, count, flags) | Jinja filter for regexp replacements.
See :func:`re.sub` for documentation.
Returns:
Text with substitutions applied | codesearchnet |
def get_and_check_project(valid_vcs_rules, source_url):
project_path = match_url_regex(valid_vcs_rules, source_url, match_url_path_callback)
if (project_path is None):
raise ValueError('Unknown repo for source url {}!'.format(source_url))
project = project_path.split('/')[(- 1)]
return project | Given vcs rules and a source_url, return the project.
The project is in the path, but is the repo name.
`releases/mozilla-beta` is the path; `mozilla-beta` is the project.
Args:
valid_vcs_rules (tuple of frozendicts): the valid vcs rules, per
``match_url_regex``.
source_url (str): the source url to find the project f... | codesearchnet |
def get_image_size(image: np.ndarray, channel_dim: ChannelDimension=None) -> tuple[int, int]:
if channel_dim is None:
channel_dim = infer_channel_dimension_format(image)
if channel_dim == ChannelDimension.FIRST:
return (image.shape[-2], image.shape[-1])
elif channel_dim == ChannelDimension.L... | Returns the (height, width) dimensions of the image.
Args:
image (`np.ndarray`):
The image to get the dimensions of.
channel_dim (`ChannelDimension`, *optional*):
Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.
Returns:
A tuple of the image's height and width. | github-repos |
def wait_until(what, times=-1):
while times:
logger.info('Waiting times left %d', times)
try:
if what() is True:
return True
except:
logger.exception('Wait failed')
else:
logger.warning('Trial[%d] failed', times)
times ... | Wait until `what` return True
Args:
what (Callable[bool]): Call `wait()` again and again until it returns True
times (int): Maximum times of trials before giving up
Returns:
True if success, False if times threshold reached | juraj-google-style |
def from_der(der):
d = get_bytes(der)
if (len(d) < 8):
raise ValueError('DER signature string is too short.')
if (len(d) > 72):
raise ValueError('DER signature string is too long.')
if (d[0] != 48):
raise ValueError('DER signature does not start with 0x30.')
if (d[1] != len(d... | Decodes a Signature that was DER-encoded.
Args:
der (bytes or str): The DER encoding to be decoded.
Returns:
Signature: The deserialized signature. | codesearchnet |
def __init__(self, label=None, edge_length=None):
self.children = list()
self.parent = None
self.label = label
self.edge_length = edge_length | ``Node`` constructor
Args:
``label`` (``str``): Label of this ``Node``
``edge_length`` (``float``): Length of the edge incident to this ``Node``
Returns:
``Node`` object | juraj-google-style |
def ack(self, items):
for item in items:
time_to_ack = item.time_to_ack
if (time_to_ack is not None):
self._manager.ack_histogram.add(time_to_ack)
ack_ids = [item.ack_id for item in items]
request = types.StreamingPullRequest(ack_ids=ack_ids)
self._manager.send(request)
s... | Acknowledge the given messages.
Args:
items(Sequence[AckRequest]): The items to acknowledge. | codesearchnet |
def lower_dict_keys(origin_dict):
if ((not origin_dict) or (not isinstance(origin_dict, dict))):
return origin_dict
return {key.lower(): value for (key, value) in origin_dict.items()} | convert keys in dict to lower case
Args:
origin_dict (dict): mapping data structure
Returns:
dict: mapping with all keys lowered.
Examples:
>>> origin_dict = {
"Name": "",
"Request": "",
"URL": "",
"METHOD": "",
"Headers": "",
"Data": ""
}
>>> lower_dict_keys(origin_dict)
{
"name": "",
"request": "",
"url": "",
"met... | codesearchnet |
def change_password(self, username, newpassword, raise_on_error=False):
response = self._put((self.rest_url + '/user/password'), data=json.dumps({'value': newpassword}), params={'username': username})
if response.ok:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'... | Change new password for a user
Args:
username: The account username.
newpassword: The account new password.
raise_on_error: optional (default: False)
Returns:
True: Succeeded
False: If unsuccessful | codesearchnet |
def _unpack_zip(self, file_obj, path):
old_cwd = os.getcwd()
os.chdir(path)
zip_obj = zipfile.ZipFile(file_obj)
for cnt, zip_info in enumerate(zip_obj.infolist()):
zip_obj.extract(zip_info)
if cnt + 1 > self.max_zipfiles:
os.chdir(old_cw... | Unpack .zip archive in `file_obj` to given `path`. Make sure, that it
fits into limits (see :attr:`._max_zipfiles` for details).
Args:
file_obj (file): Opened file-like object.
path (str): Path into which the .zip will be unpacked.
Raises:
ValueError: If there is too many files in .zip archive. | juraj-google-style |
def yield_typed(obj_or_cls):
if not isinstance(obj_or_cls, type):
obj_or_cls = type(obj_or_cls)
for attrname in dir(obj_or_cls):
if hasattr(obj_or_cls, attrname):
attr = getattr(obj_or_cls, attrname)
if (isinstance(attr, property) and isinstance(attr.__d... | Generator that yields typed object names of the class (or object's class).
Args:
obj_or_cls (object): Class object or instance of class
Returns:
name (array): Names of class attributes that are strongly typed | juraj-google-style |
def is_alive(self, container: Container) -> bool:
uid = container.uid
return ((uid in self.__dockerc) and (self.__dockerc[uid].status == 'running')) | Determines whether a given container is still alive.
Returns:
`True` if the underlying Docker container for the given BugZoo
container is still alive, otherwise `False`. | codesearchnet |
def cast(cls, x, dtype):
return x.astype(dtype) | Cast a tensor to a different dtype.
Only called on a full array as provided by the user.
Args:
x: the tensor to cast.
Returns: the cast tensor. | github-repos |
def monkey_patch(enabled=True):
if enabled:
Image.open = imdirect_open
else:
Image.open = pil_open | Monkey patching PIL.Image.open method
Args:
enabled (bool): If the monkey patch should be activated or deactivated. | codesearchnet |
def memory_zones(self):
count = self.num_memory_zones()
if (count == 0):
return list()
buf = (structs.JLinkMemoryZone * count)()
res = self._dll.JLINK_GetMemZones(buf, count)
if (res < 0):
raise errors.JLinkException(res)
return list(buf) | Gets all memory zones supported by the current target.
Some targets support multiple memory zones. This function provides the
ability to get a list of all the memory zones to facilate using the
memory zone routing functions.
Args:
self (JLink): the ``JLink`` instance
Returns:
A list of all the memory zones as ``JLi... | codesearchnet |
def on_connected(self, connection):
log.info('PikaClient: connected to RabbitMQ')
self.connected = True
self.in_channel = self.connection.channel(self.on_channel_open) | AMQP connection callback.
Creates input channel.
Args:
connection: AMQP connection | juraj-google-style |
def walk_files_for(paths, supported_extensions):
for path in paths:
for (root, _, files) in os.walk(path):
if Application.ignore_path(root.replace(path, '')):
continue
for filename in files:
extension = os.path.splitext(filename)[1]
if ... | Iterating files for given extensions.
Args:
supported_extensions (list): supported file extentsion for which to check loc and com.
Returns:
str: yield each full path and filename found. | codesearchnet |
def take_screenshot(webdriver, file_name):
folder_location = os.path.join(ProjectUtils.get_project_root(), WebScreenShotUtil.SCREEN_SHOT_LOCATION)
WebScreenShotUtil.__capture_screenshot(webdriver, folder_location, (file_name + '.png')) | Captures a screenshot.
Args:
webdriver (WebDriver) - Selenium webdriver.
file_name (str) - File name to save screenshot as. | codesearchnet |
def create_var_in_main(name: str, value: Any, watch: bool=True) -> Tuple[str, Any]:
setattr(importlib.import_module('__main__'), name, value)
if watch:
from apache_beam.runners.interactive import interactive_environment as ie
ie.current_env().watch({name: value})
return (name, value) | Declares a variable in the main module.
Args:
name: the variable name in the main module.
value: the value of the variable.
watch: whether to watch it in the interactive environment.
Returns:
A 2-entry tuple of the variable name and value. | github-repos |
def remove(self, key):
data = self._load_file()
del data[key]
self._save_file(data) | Remove a key from the data store
Args:
key (string): The key to remove
Raises:
KeyError: if the key was not found | juraj-google-style |
def _GetFlagsDefinedByModule(self, module):
if not isinstance(module, str):
module = module.__name__
return list(self.FlagsByModuleDict().get(module, [])) | Returns the list of flags defined by a module.
Args:
module: A module object or a module name (a string).
Returns:
A new list of Flag objects. Caller may update this list as he
wishes: none of those changes will affect the internals of this
FlagValue object. | juraj-google-style |
def extract(self, text: str) -> List[Extraction]:
doc = self._tokenizer.tokenize_to_spacy_doc(text)
self._load_matcher()
matches = [x for x in self._matcher(doc) if x[1] != x[2]]
pos_filtered_matches = []
neg_filtered_matches = []
for idx, start, end in matches... | Extract from text
Args:
text (str): input str to be extracted.
Returns:
List[Extraction]: the list of extraction or the empty list if there are no matches. | juraj-google-style |
def DEFINE_float(flag_name, default_value, docstring, required=False):
_define_helper(flag_name, default_value, docstring, float, required) | Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag. | juraj-google-style |
def SpinTimes(spin, bias):
if not isinstance(spin, int):
raise TypeError('spin must be an int')
if spin == -1:
return Times(Real((-1, 1)), bias)
elif spin == 1:
return bias
else:
raise ValueError('expected spins to be -1., or 1.') | Define our own multiplication for bias times spins. This allows for
cleaner log code as well as value checking.
Args:
spin (int): -1 or 1
bias (:class:`pysmt.shortcuts.Symbol`): The bias
Returns:
spins * bias | juraj-google-style |
async def build_task_dependencies(chain, task, name, my_task_id):
log.info('build_task_dependencies {} {}'.format(name, my_task_id))
if (name.count(':') > chain.context.config['max_chain_length']):
raise CoTError('Too deep recursion!\n{}'.format(name))
sorted_dependencies = find_sorted_task_dependen... | Recursively build the task dependencies of a task.
Args:
chain (ChainOfTrust): the chain of trust to add to.
task (dict): the task definition to operate on.
name (str): the name of the task to operate on.
my_task_id (str): the taskId of the task to operate on.
Raises:
CoTError: on failure. | codesearchnet |
def absnormpath(self, path):
path = self.normcase(path)
cwd = self._matching_string(path, self.cwd)
if (not path):
path = self.path_separator
elif (not self._starts_with_root_path(path)):
root_name = self._matching_string(path, self.root.name)
empty = self._matching_string(path, ... | Absolutize and minimalize the given path.
Forces all relative paths to be absolute, and normalizes the path to
eliminate dot and empty components.
Args:
path: Path to normalize.
Returns:
The normalized path relative to the current working directory,
or the root directory if path is empty. | codesearchnet |
def step(self, actions, step_mul=None):
if (self._state == environment.StepType.LAST):
return self.reset()
skip = (not self._ensure_available_actions)
self._parallel.run(((c.act, f.transform_action(o.observation, a, skip_available=skip)) for (c, f, o, a) in zip(self._controllers, self._features, sel... | Apply actions, step the world forward, and return observations.
Args:
actions: A list of actions meeting the action spec, one per agent.
step_mul: If specified, use this rather than the environment's default.
Returns:
A tuple of TimeStep namedtuples, one per agent. | codesearchnet |
def _LookupClassReferences(serializable_ast, module_map, self_name):
class_lookup = visitors.LookupExternalTypes(module_map, self_name=self_name)
raw_ast = serializable_ast.ast
decorators = {d.type.name for c in raw_ast.classes + raw_ast.functions for d in c.decorators}
for node in serializable_ast.clas... | Fills .cls references in serializable_ast.ast with ones from module_map.
Already filled references are not changed. References to the module self._name
are not filled. Setting self_name=None will fill all references.
Args:
serializable_ast: A SerializableAst instance.
module_map: Used to resolve ClassType.cls links t... | github-repos |
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(RevocationReason, self).read(istream, kmip_version=kmip_version)
tstream = BytearrayStream(istream.read(self.length))
self.revocation_code = RevocationReasonCode()
self.revocation_code.read(tstream, kmip_version=kmip_version)
if... | Read the data encoding the RevocationReason object and decode it
into its constituent parts.
Args:
istream (Stream): A data stream containing encoded object data,
supporting a read method; usually a BytearrayStream object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will ... | codesearchnet |
def Patch(self, request, global_params=None):
config = self.GetMethodConfig('Patch')
return self._RunMethod(config, request, global_params=global_params) | Updates an existing `BitbucketServerConfig`. This API is experimental.
Args:
request: (CloudbuildProjectsLocationsBitbucketServerConfigsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message. | github-repos |
def indent(self, node, dirty=True):
if node.subitems:
return
self._subitems[node.id] = node
node.super_list_item_id = self.id
node.parent_item = self
if dirty:
node.touch(True) | Indent an item. Does nothing if the target has subitems.
Args:
node (gkeepapi.node.ListItem): Item to indent.
dirty (bool): Whether this node should be marked dirty. | codesearchnet |
def serialize_attrs(self, *args):
cls = type(self)
result = {}
for a in args:
if (hasattr(cls, a) and (a not in cls.attrs_forbidden_for_serialization())):
val = getattr(self, a)
if is_list_like(val):
result[a] = list(val)
else:
resu... | Converts and instance to a dictionary with only the specified
attributes as keys
Args:
*args (list): The attributes to serialize
Examples:
>>> customer = Customer.create(name="James Bond", email="007@mi.com",
phone="007", city="London")
>>> customer.serialize_attrs('name', 'email')
{'name': u'James Bond', 'email': u... | codesearchnet |
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_sha... | Adds a bias vector to a tensor.
Args:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector ... | github-repos |
def boolmask(indices, maxval=None):
if (maxval is None):
indices = list(indices)
maxval = (max(indices) + 1)
mask = ([False] * maxval)
for index in indices:
mask[index] = True
return mask | Constructs a list of booleans where an item is True if its position is in
`indices` otherwise it is False.
Args:
indices (list): list of integer indices
maxval (int): length of the returned list. If not specified
this is inferred from `indices`
Note:
In the future the arg `maxval` may change its name to `shape`
Ret... | codesearchnet |
def get_vpc_id(account, region):
url = '{0}/networks/aws'.format(API_URL)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
if not response.ok:
raise SpinnakerVPCNotFound(response.text)
vpcs = response.json()
for vpc in vpcs:
LOG.debug('VPC: %(name)s,... | Get VPC ID configured for ``account`` in ``region``.
Args:
account (str): AWS account name.
region (str): Region name, e.g. us-east-1.
Returns:
str: VPC ID for the requested ``account`` in ``region``.
Raises:
:obj:`foremast.exceptions.SpinnakerVPCIDNotFound`: VPC ID not found for
``account`` in ``region``.
:obj:`for... | juraj-google-style |
def transform(self, target_type: Type[T], value: F, context: PipelineContext = None) -> T:
pass | Transforms an object to a new type.
Args:
target_type: The type to be converted to.
value: The object to be transformed.
context: The context of the transformation (mutable). | juraj-google-style |
def make_mutant_tuples(example_protos, original_feature, index_to_mutate,
viz_params):
mutant_features = make_mutant_features(original_feature, index_to_mutate,
viz_params)
mutant_examples = []
for example_proto in example_protos:
for mutant_f... | Return a list of `MutantFeatureValue`s and a list of mutant Examples.
Args:
example_protos: The examples to mutate.
original_feature: A `OriginalFeatureList` that encapsulates the feature to
mutate.
index_to_mutate: The index of the int64_list or float_list to mutate.
viz_params: A `VizParams` object that contains the... | juraj-google-style |
def get_by_block(self, block_number):
blocklist_snapshot = self.db.prefixed_db(NotificationPrefix.PREFIX_BLOCK).snapshot()
block_bytes = block_number.to_bytes(4, 'little')
results = []
for val in blocklist_snapshot.iterator(prefix=block_bytes, include_key=False):
eve... | Look up notifications for a block
Args:
block_number (int): height of block to search for notifications
Returns:
list: a list of notifications | juraj-google-style |
def _convert_to_eval_metric(metric_fn):
def problem_metric_fn(*args):
"Returns an aggregation of the metric_fn's returned values."
(scores, weights) = metric_fn(*args)
return tf.metrics.mean(scores, weights)
return problem_metric_fn | Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted ... | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.