code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def compile(self,
container: Container,
verbose: bool = False
) -> CompilationOutcome:
bug = self.__installation.bugs[container.bug]
return bug.compiler.compile(self, container, verbose=verbose) | Attempts to compile the program inside a given container.
Params:
verbose: specifies whether to print the stdout and stderr produced
by the compilation command to the stdout. If `True`, then the
stdout and stderr will be printed.
Returns:
a summary of the outcome of the compilation attempt. | juraj-google-style |
def add(self, pattern_txt):
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = (len(pattern_txt) - 1)
while (not pattern_txt[low]):
low += 1
while (not pattern_txt[high]):
high -= 1
min_pattern = pattern_txt[low:(high + 1)]
self.min_patterns[len(min_pattern)] = min_... | Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines. | codesearchnet |
def quote_identifier(identifier: str,
mixed: Union[SQLCompiler, Engine, Dialect]) -> str:
return get_preparer(mixed).quote(identifier) | Converts an SQL identifier to a quoted version, via the SQL dialect in
use.
Args:
identifier: the identifier to be quoted
mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or
:class:`Dialect` object
Returns:
the quoted identifier | juraj-google-style |
def _make_inputs_match(branch_graphs, branch_inputs):
assert len(branch_graphs) == len(branch_inputs)
added_inputs = set()
new_inputs = []
for branch_in in branch_inputs:
for tensor in branch_in:
tensor_id = ops.tensor_id(tensor)
if tensor_id not in added_inputs:
... | Modifies branch_graphs so they have the same input signature.
This method reorders and/or adds parameters to each graph in branch_graphs so
they have the same input signature, and updates the 'inputs' and 'captured'
fields of each graph accordingly. It uses the input tensors from the outer
graph to avoid duplicating s... | github-repos |
def create(self, data={}, **kwargs):
url = self.base_url
return self.post_url(url, data, **kwargs) | Create Virtual Account from given dict
Args:
Param for Creating Virtual Account
Returns:
Virtual Account dict | codesearchnet |
class MeanAbsolutePercentageError(reduction_metrics.MeanMetricWrapper):
def __init__(self, name='mean_absolute_percentage_error', dtype=None):
super().__init__(mean_absolute_percentage_error, name, dtype=dtype)
self._direction = 'down'
def get_config(self):
return {'name': self.name, '... | Computes mean absolute percentage error between `y_true` and `y_pred`.
Formula:
```python
loss = 100 * mean(abs((y_true - y_pred) / y_true))
```
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Examples:
>>> m = keras.metrics.MeanAbsolutePercentageError()
... | github-repos |
def anti_clobber_dir_path(dir_path, suffix='.d'):
dir_path = os.path.normpath(dir_path)
parts = dir_path.split(os.sep)
for index in range(len(parts)):
test_path = os.sep.join(parts[:index + 1])
if os.path.isfile(test_path):
parts[index] += suffix
return os.sep... | Return a directory path free of filenames.
Args:
dir_path (str): A directory path.
suffix (str): The suffix to append to the part of the path that is
a file.
Returns:
str | juraj-google-style |
def _try_recover(self, trial, error_msg):
try:
self.trial_executor.stop_trial(trial, error=(error_msg is not None), error_msg=error_msg, stop_logger=False)
trial.result_logger.flush()
if self.trial_executor.has_resources(trial.resources):
logger.info('Attempting to recover trial ... | Tries to recover trial.
Notifies SearchAlgorithm and Scheduler if failure to recover.
Args:
trial (Trial): Trial to recover.
error_msg (str): Error message from prior to invoking this method. | codesearchnet |
def GetRelativePath(self, path_spec):
location = getattr(path_spec, 'location', None)
if (location is None):
raise errors.PathSpecError('Path specification missing location.')
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(self._file_system.type_indicator):
if (not location.startswi... | Returns the relative path based on a resolved path specification.
The relative path is the location of the upper most path specification.
The the location of the mount point is stripped off if relevant.
Args:
path_spec (PathSpec): path specification.
Returns:
str: corresponding relative path or None if the relative ... | codesearchnet |
def recipients(self, notification_type, recipients, priority='Low'):
self._notification_type = notification_type
self._recipients = recipients
self._priority = priority
self._is_organization = False | Set vars for the passed in data. Used for one or more recipient notification.
.. code-block:: javascript
{
"notificationType": notification_type,
"priority": priority
"isOrganization": false,
"recipients": recipients
}
Args:
notification_type (str): The type of notification being sent.
recipients (str): A comma deli... | codesearchnet |
def apply(self, data, path=None, applicator=None):
if applicator:
applicator.pset = self
else:
applicator = Applicator(self)
return applicator.apply(data, path=path) | Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
Arguments:
data -- dict of data
Returns:
Cleaned data | juraj-google-style |
def _convert_metadata(data):
def compose(val, arguments=None):
if val is None:
return None
if not arguments:
return val
arguments["
return arguments
conspect = data.get("conspect", {})
author_name = data.get("author", {}).get("name")
... | Convert metadata from WA-KAT to Dublin core dictionary like structure,
which may be easily converted to xml using :mod:`xmltodict` module.
Args:
data (dict): Nested WA-KAT data. See tests for example.
Returns:
dict: Dict in dublin core format. | juraj-google-style |
def _InstallRpm(self, path):
pid = os.fork()
if pid == 0:
cmd = "/bin/rpm"
cmd_args = [cmd, "-U", "--replacepkgs", "--replacefiles", path]
env = os.environ.copy()
env.pop("LD_LIBRARY_PATH", None)
env.pop("PYTHON_PATH", None)
os.execve(cmd,... | Client update for rpm based distros.
Upgrading rpms is a bit more tricky than upgrading deb packages since there
is a preinstall script that kills the running GRR daemon and, thus, also
the installer process. We need to make sure we detach the child process
properly and therefore cannot use client_utils_common.Execute... | juraj-google-style |
def visit_membership(self, relation: _evaluation.MembershipRelationNode) -> _sql_data_types.Select:
lhs_result = self.visit(relation.left)
rhs_result = self.visit(relation.right)
in_lhs = lhs_result if isinstance(relation, _evaluation.InNode) else rhs_result
in_rhs = rhs_result if isinstance(relation, _... | Translates a FHIRPath membership relation to Spark SQL.
For the `IN` relation, the LHS operand is assumed to be a collection of a
single value. For 'CONTAINS', the RHS operand is assumed to be a collection
of a single value. Equality is handled in the visit_equality function.
Args:
relation: The FHIRPath AST `Members... | github-repos |
def is_coord_subset(subset, superset, atol=1e-8):
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close) | Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset. | juraj-google-style |
def pop(self):
if not self.layers:
raise TypeError('There are no layers in the model.')
layer = self._self_tracked_trackables.pop()
self._layer_call_argspecs.pop(layer)
if not self.layers:
self.outputs = None
self.inputs = None
self.built = False
self._inferred_in... | Removes the last layer in the model.
Raises:
TypeError: if there are no layers in the model. | github-repos |
def sg_queue_context(sess=None):
r
sess = tf.get_default_session() if sess is None else sess
coord = tf.train.Coordinator()
try:
threads = tf.train.start_queue_runners(sess, coord)
yield
finally:
coord.request_stop()
coord.join(t... | r"""Context helper for queue routines.
Args:
sess: A session to open queues. If not specified, a new session is created.
Returns:
None | juraj-google-style |
def from_string(cls, string_input):
directives = []
tasks = []
charge = None
spin_multiplicity = None
title = None
basis_set = None
basis_set_option = None
theory_directives = {}
geom_options = None
symmetry_options = None
... | Read an NwInput from a string. Currently tested to work with
files generated from this class itself.
Args:
string_input: string_input to parse.
Returns:
NwInput object | juraj-google-style |
def discretize(self, data):
ret = data.copy()
for feature in self.lambdas:
if len(data.shape) == 1:
ret[feature] = int(self.lambdas[feature](ret[feature]))
else:
ret[:, feature] = self.lambdas[feature](
ret[:, feature])... | Discretizes the data.
Args:
data: numpy 2d or 1d array
Returns:
numpy array of same dimension, discretized. | juraj-google-style |
def add_message(self, message_type):
name = self.__normalized_name(message_type)
if name not in self.__schemas:
self.__schemas[name] = None
schema = self.__message_to_schema(message_type)
self.__schemas[name] = schema
return name | Add a new message.
Args:
message_type: protorpc.message.Message class to be parsed.
Returns:
string, The JSON Schema id.
Raises:
KeyError if the Schema id for this message_type would collide with the
Schema id of a different message_type that was already added. | juraj-google-style |
def _CheckStorageMetadata(cls, metadata_values, check_readable_only=False):
format_version = metadata_values.get('format_version', None)
if (not format_version):
raise IOError('Missing format version.')
try:
format_version = int(format_version, 10)
except (TypeError, ValueError):
... | Checks the storage metadata.
Args:
metadata_values (dict[str, str]): metadata values per key.
check_readable_only (Optional[bool]): whether the store should only be
checked to see if it can be read. If False, the store will be checked
to see if it can be read and written to.
Raises:
IOError: if the format version or ... | codesearchnet |
def __write_to_hdf5_light(self, filename_out, *args, **kwargs):
block_size = 0
with h5py.File(filename_out, 'w') as h5:
h5.attrs[b'CLASS'] = b'FILTERBANK'
h5.attrs[b'VERSION'] = b'1.0'
if HAS_BITSHUFFLE:
bs_compression = bitshuffle.h5.H5FILTER
bs_compression_opts ... | Write data to HDF5 file in one go.
Args:
filename_out (str): Name of output file | codesearchnet |
def set_vocabulary(self, vocabulary, idf_weights=None):
if self.output_mode == 'tf_idf':
if idf_weights is None:
raise ValueError("`idf_weights` must be set if output_mode is 'tf_idf'.")
elif idf_weights is not None:
raise ValueError(f"`idf_weights` should only be set if output_mode ... | Sets vocabulary (and optionally document frequency) for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through `adapt`. It should be used
whenever the vocab (and optionally document frequency) information is
already known. If vocabulary data is alre... | github-repos |
def _set_default_attr(self, default_attr):
for attr, val in six.iteritems(default_attr):
if getattr(self, attr, None) is None:
setattr(self, attr, val) | Sets default attributes when None.
Args:
default_attr: dict. Key-val of attr, default-value. | juraj-google-style |
def convert(recursive=False, optional_features=None, user_requested=True, conversion_ctx=ag_ctx.NullCtx()):
def decorator(f):
def wrapper(*args, **kwargs):
options = converter.ConversionOptions(recursive=recursive, user_requested=user_requested, optional_features=optional... | Decorator that compiles a function to use TensorFlow ops.
The decorator is dynamic - it recompiles the target whenever the decorated
function is called. This means the parameter values are known at conversion.
It also means that repeated calls with different types of parameters will be
correctly processed.
Args:
recu... | github-repos |
def load_yaml_by_relpath(cls, directories, rel_path, log_debug=False):
for d in directories:
if (d.startswith(os.path.expanduser('~')) and (not os.path.exists(d))):
os.makedirs(d)
possible_path = os.path.join(d, rel_path)
if os.path.exists(possible_path):
loaded = cls... | Load a yaml file with path that is relative to one of given directories.
Args:
directories: list of directories to search
name: relative path of the yaml file to load
log_debug: log all messages as debug
Returns:
tuple (fullpath, loaded yaml structure) or None if not found | codesearchnet |
def format_tasks(tasks):
return ['%d : %s (%s)' % (task.key.id(),
task.description,
('done' if task.done
else 'created %s' % task.created))
for task in tasks] | Converts a list of tasks to a list of string representations.
Args:
tasks: A list of the tasks to convert.
Returns:
A list of string formatted tasks. | juraj-google-style |
def check_panels(adapter, panels, default_panels=None):
default_panels = default_panels or []
panels_exist = True
for panel in default_panels:
if panel not in panels:
log.warning("Default panels have to be defined in panels")
panels_exist = False
for panel in panels:... | Make sure that the gene panels exist in the database
Also check if the default panels are defined in gene panels
Args:
adapter(MongoAdapter)
panels(list(str)): A list with panel names
Returns:
panels_exists(bool) | juraj-google-style |
def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor | None=None, encoder_hidden_states: tf.Tensor | None=None, encoder_attention_mask: tf.Tensor | None=None, layer_head_mask: tf.Tensor | None=None, cross_attn_layer_head_mask: tf.Tensor | None=None, past_key_value: Tuple[tf.Tensor] | None=None, training: ... | Args:
hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
attention_mask (`tf.Tensor`): attention mask of size
*(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
encoder_hidden_states (`tf.Tensor`):
cross attention input to the layer of ... | github-repos |
def callsign(msg):
if common.typecode(msg) < 1 or common.typecode(msg) > 4:
raise RuntimeError("%s: Not a identification message" % msg)
chars = '
msgbin = common.hex2bin(msg)
csbin = msgbin[40:96]
cs = ''
cs += chars[common.bin2int(csbin[0:6])]
cs += chars[common.bin2int(csb... | Aircraft callsign
Args:
msg (string): 28 bytes hexadecimal message string
Returns:
string: callsign | juraj-google-style |
def save_yaml(dictionary, path, pretty=False, sortkeys=False):
if sortkeys:
dictionary = dict(dictionary)
with open(path, 'w') as f:
if pretty:
pyaml.dump(dictionary, f)
else:
yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddic... | Save dictionary to YAML file preserving order if it is an OrderedDict
Args:
dictionary (Dict): Python dictionary to save
path (str): Path to YAML file
pretty (bool): Whether to pretty print. Defaults to False.
sortkeys (bool): Whether to sort dictionary keys. Defaults to False.
Returns:
None | juraj-google-style |
def pretty_print_counters(counters):
totals = collections.defaultdict(int)
for (name, val) in counters:
prefixes = ([name[:i] for i in xrange(len(name)) if (name[i] == '/')] + [name])
for p in prefixes:
totals[p] += val
parts = []
for (name, val) in sorted(six.iteritems(total... | print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a ... | codesearchnet |
def get_latex_figure_str(fpath_list, caption_str=None, label_str=None,
width_str=r'\textwidth', height_str=None, nCols=None,
dpath=None, colpos_sep=' ', nlsep='',
use_sublbls=None, use_frame=False):
r
import utool as ut
if nCols is ... | r"""
Args:
fpath_list (list):
dpath (str): directory relative to main tex file
Returns:
str: figure_str
CommandLine:
python -m utool.util_latex --test-get_latex_figure_str
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> fpath_list = ['figures/foo.png']
>>> figure_str = get_latex_figure_... | juraj-google-style |
def send_msg_to_webhook(self, message):
payload = {'content': message}
header = {'Content-Type': 'application/json'}
try:
request = requests.post(self.api_url, headers=header, json=payload)
request.raise_for_status()
except Exception as error_msg:
warning_msg = (('EXCEPTION: UNAB... | separated Requests logic for easier testing
Args:
message (str): actual logging string to be passed to REST endpoint
Todo:
* Requests.text/json return for better testing options | codesearchnet |
def instantiate_resolver(self, name, args):
if name not in self._known_resolvers:
raise ArgumentError("Attempting to instantiate unknown dependency resolver", name=name)
return self._known_resolvers[name](args) | Directly instantiate a dependency resolver by name with the given arguments
Args:
name (string): The name of the class that we want to instantiate
args (dict): The arguments to pass to the resolver factory
Returns:
DependencyResolver | juraj-google-style |
def export_node(self, n) -> Dict[str, Union[str, List[str]]]:
node_dict = {
"name": n[0],
"units": _get_units(n[0]),
"dtype": _get_dtype(n[0]),
"arguments": list(self.predecessors(n[0])),
}
if not n[1].get("indicators") is None:
... | Return dict suitable for exporting to JSON.
Args:
n: A dict representing the data in a networkx AnalysisGraph node.
Returns:
The node dict with additional fields for name, units, dtype, and
arguments. | juraj-google-style |
def _CreateOutputFileHandles(self, output_type):
gzip_filehandle_parent = tempfile.NamedTemporaryFile(suffix=output_type)
gzip_filehandle = gzip.GzipFile(gzip_filehandle_parent.name, 'wb', self.GZIP_COMPRESSION_LEVEL, gzip_filehandle_parent)
self.temp_output_trackers[output_type] = TempOutputTracker(output_... | Creates a new gzipped output tempfile for the output type.
We write to JSON data to gzip_filehandle to get compressed data. We hold a
reference to the original filehandle (gzip_filehandle_parent) so we can pass
the gzip data to bigquery.
Args:
output_type: string of export type to be used in filename. e.g.
ExportedFi... | codesearchnet |
def __init__(self, match_type=MatchType.OFPMT_OXM, oxm_match_fields=None):
super().__init__()
self.match_type = match_type
self.oxm_match_fields = oxm_match_fields or OxmMatchFields()
self._update_match_length() | Describe the flow match header structure.
Args:
match_type (MatchType): One of OFPMT_* (MatchType) items.
length (int): Length of Match (excluding padding) followed by
Exactly (length - 4) (possibly 0) bytes containing
OXM TLVs, then exactly ((length + 7)/8*8 - length)
(between 0 and 7) bytes of all-zero bytes.
oxm_fi... | juraj-google-style |
class MaskFormerSwinBackbone(MaskFormerSwinPreTrainedModel, BackboneMixin):
def __init__(self, config: MaskFormerSwinConfig):
super().__init__(config)
super()._init_backbone(config)
self.model = MaskFormerSwinModel(config)
if 'stem' in self.out_features:
raise ValueError... | MaskFormerSwin backbone, designed especially for the MaskFormer framework.
This classes reshapes `hidden_states` from (`batch_size, sequence_length, hidden_size)` to (`batch_size,
num_channels, height, width)`). It also adds additional layernorms after each stage.
Args:
config (`MaskFormerSwinConfig`):
The configurat... | github-repos |
def normalize_full_name_false(decl):
if decl.cache.normalized_full_name_false is None:
decl.cache.normalized_full_name_false = normalize(
declaration_utils.full_name(decl, with_defaults=False))
return decl.cache.normalized_full_name_false | Cached variant of normalize
Args:
decl (declaration.declaration_t): the declaration
Returns:
str: normalized name | juraj-google-style |
def step(self, actions, step_mul=None):
if self._state == environment.StepType.LAST:
return self.reset()
skip = not self._ensure_available_actions
self._parallel.run(
(c.act, f.transform_action(o.observation, a, skip_available=skip))
for c, f, o, a in zip(
self._contr... | Apply actions, step the world forward, and return observations.
Args:
actions: A list of actions meeting the action spec, one per agent.
step_mul: If specified, use this rather than the environment's default.
Returns:
A tuple of TimeStep namedtuples, one per agent. | juraj-google-style |
def parse_result(line):
if line.startswith("Problem"):
raise RuntimeError("Login credentials seems to be wrong")
result = {
'p_value': None,
'gene_symbols': [],
'disease_nr': None,
'disease_source': None,
'description': None,
'raw_line': line
... | Parse the result line of a phenomizer request.
Arguments:
line (str): A raw output line from phenomizer
Returns:
result (dict): A dictionary with the phenomizer info:
{
'p_value': float,
'gene_symbols': list(str),
'disease_nr': int,
'disease_source': str,
'description': str,
'raw_line': str
} | juraj-google-style |
def screenshot(self, filename=None):
image = self.d.screenshot()
if self.rotation:
method = getattr(Image, 'ROTATE_{}'.format(self.rotation*90))
image = image.transpose(method)
if filename:
image.save(filename)
return image | Take ios screenshot
Args:
- filename(string): optional
Returns:
PIL.Image object | juraj-google-style |
def extend_webfont_settings(webfont_settings):
if (not webfont_settings.get('fontdir_path', False)):
raise IcomoonSettingsError("Webfont settings miss the required key item 'fontdir_path'")
if (not webfont_settings.get('csspart_path', False)):
webfont_settings['csspart_path'] = None
return w... | Validate a webfont settings and optionally fill missing ``csspart_path``
option.
Args:
webfont_settings (dict): Webfont settings (an item value from
``settings.ICOMOON_WEBFONTS``).
Returns:
dict: Webfont settings | codesearchnet |
def _get_break_loop_node(break_node):
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while ((not isinstance(parent, loop_nodes)) or (break_node in getattr(parent, 'orelse', []))):
break_node = parent
parent = parent.parent
if (parent is None):
break
... | Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node. | codesearchnet |
def long_id(self, sample):
if self.grid == 'WAC':
lon = self.CENTER_LONGITUDE + (sample - self.SAMPLE_PROJECTION_OFFSET - 1)\
* self.MAP_SCALE * 1e-3 / (self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0))
return lon * 180 / np.pi
else:
... | Return the corresponding longitude
Args:
sample (int): sample number on a line
Returns:
Correponding longidude in degree | juraj-google-style |
def add_user_to_template(self, template_id, account_id=None, email_address=None):
return self._add_remove_user_template(self.TEMPLATE_ADD_USER_URL, template_id, account_id, email_address) | Gives the specified Account access to the specified Template
Args:
template_id (str): The id of the template to give the account access to
account_id (str): The id of the account to give access to the template. The account id prevails if both account_id and email_address are provided.
email_address (str)... | juraj-google-style |
def getlines(self, bufnr=None):
buf = self._vim.buffers[bufnr] if bufnr else self._vim.current.buffer
return buf[:] | Get all lines of a buffer as a list.
Args:
bufnr (Optional[int]): A Vim buffer number, current if ``None``.
Returns:
List[str] | juraj-google-style |
def convert_segmentation_to_rle(segmentation):
segment_ids = torch.unique(segmentation)
run_length_encodings = []
for idx in segment_ids:
mask = torch.where(segmentation == idx, 1, 0)
rle = binary_mask_to_rle(mask)
run_length_encodings.append(rle)
return run_length_encodings | Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
segmentation (`torch.Tensor` or `numpy.array`):
A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
Returns:
`List[List]`: A list of lists, where each list is the run-leng... | github-repos |
def properties(self):
props = {}
for line in self.adb_shell(['getprop']).splitlines():
m = _PROP_PATTERN.match(line)
if m:
props[m.group('key')] = m.group('value')
return props | Android Properties, extracted from `adb shell getprop`
Returns:
dict of props, for
example:
{'ro.bluetooth.dun': 'true'} | codesearchnet |
def _get_audios_and_audio_lengths(self, audios: AudioInput) -> Sequence['torch.Tensor', Sequence[int]]:
requires_backends(self, ['torch'])
if isinstance(audios, np.ndarray):
audios = torch.from_numpy(audios)
elif isinstance(audios, Sequence) and isinstance(audios[0], np.ndarray):
audios = [t... | Coerces audio inputs to torch tensors and extracts audio lengths prior to stacking.
Args:
audios (`AudioInput`):
Audio sequence, numpy array, or torch tensor. | github-repos |
def get_neighbors(self, site, r):
nn = self.get_sites_in_sphere(site.coords, r)
return [(s, dist) for (s, dist) in nn if site != s] | Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance. | juraj-google-style |
def get_message(routing_key, properties, body):
if (properties.headers is None):
_log.error('Message (body=%r) arrived without headers. A publisher is misbehaving!', body)
properties.headers = {}
try:
MessageClass = get_class(properties.headers['fedora_messaging_schema'])
except KeyE... | Construct a Message instance given the routing key, the properties and the
body received from the AMQP broker.
Args:
routing_key (str): The AMQP routing key (will become the message topic)
properties (pika.BasicProperties): the AMQP properties
body (bytes): The encoded message body
Raises:
ValidationError: If Message... | codesearchnet |
def Value(self, p):
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
if p == 0: return self.xs[0]
if p == 1: return self.xs[-1]
index = bisect.bisect(self.ps, p)
if p == self.ps[index - 1]:
return self.xs[index - 1]
... | Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value | juraj-google-style |
def compose(f, *fs):
rfs = list(chain([f], fs))
rfs.reverse()
def composed(*args, **kwargs):
return reduce((lambda result, fn: fn(result)), rfs[1:], rfs[0](*args, **kwargs))
return composed | Compose functions right to left.
compose(f, g, h)(x) -> f(g(h(x)))
Args:
f, *fs: The head and rest of a sequence of callables. The
rightmost function passed can accept any arguments and
the returned function will have the same signature as
this last provided function. All preceding functions
must be unary.
Returns:... | codesearchnet |
def change_kernel(self, kernel, return_dict=True):
if (type(kernel) != Kernel):
raise BadKernelObject('Use Kernel object')
return self._perform_action({'type': 'change_kernel', 'kernel': kernel.id}, return_dict) | Change the kernel to a new one
Args:
kernel : instance of digitalocean.Kernel.Kernel
Optional Args:
return_dict (bool): Return a dict when True (default),
otherwise return an Action.
Returns dict or Action | codesearchnet |
def disable(self):
self.client.api.disable_plugin(self.name)
self.reload() | Disable the plugin.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | codesearchnet |
def has_all_finite_radius_neurites(data_wrapper, threshold=0.0):
db = data_wrapper.data_block
neurite_ids = np.in1d(db[(:, COLS.TYPE)], POINT_TYPE.NEURITES)
zero_radius_ids = (db[(:, COLS.R)] <= threshold)
bad_pts = np.array(db[(neurite_ids & zero_radius_ids)][(:, COLS.ID)], dtype=int).tolist()
retu... | Check that all points with neurite type have a finite radius
Returns:
CheckResult with result and list of IDs of neurite points with zero radius | codesearchnet |
def build(X_df=None, y_df=None):
if X_df is None:
X_df, _ = load_data()
if y_df is None:
_, y_df = load_data()
features = get_contrib_features()
mapper_X = ballet.feature.make_mapper(features)
X = mapper_X.fit_transform(X_df)
encoder_y = get_target_encoder()
y = encode... | Build features and target
Args:
X_df (DataFrame): raw variables
y_df (DataFrame): raw target
Returns:
dict with keys X_df, features, mapper_X, X, y_df, encoder_y, y | juraj-google-style |
def find1(self, kw: YangIdentifier, arg: str = None,
pref: YangIdentifier = None,
required: bool = False) -> Optional["Statement"]:
for sub in self.substatements:
if (sub.keyword == kw and sub.prefix == pref and
(arg is None or sub.argument ==... | Return first substatement with the given parameters.
Args:
kw: Statement keyword (local part for extensions).
arg: Argument (all arguments will match if ``None``).
pref: Keyword prefix (``None`` for built-in statements).
required: Should an exception be raised on failure?
Raises:
StatementNotFound: If `required` is `... | juraj-google-style |
def _validate_first_message(cls, msg):
data = cls._unpack_message(msg)
logger.debug(data)
if (data != cls.RTM_HANDSHAKE):
raise SlackApiError('Unexpected response: {!r}'.format(data))
logger.info('Joined real-time messaging.') | Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake. | codesearchnet |
def attribute(self, attr_type, attr_value, displayed=False, source=None, unique=True, formatter=None):
attr = Attribute(attr_type, attr_value, displayed, source, formatter)
if (unique == 'Type'):
for attribute_data in self._attributes:
if (attribute_data.type == attr_type):
a... | Return instance of Attribute
unique:
* False - Attribute type:value can be duplicated.
* Type - Attribute type has to be unique (e.g., only 1 Description Attribute).
* True - Attribute type:value combo must be unique.
Args:
attr_type (str): The ThreatConnect defined attribute type.
attr_value (str): The value for thi... | codesearchnet |
def DownloadDir(aff4_path, output_dir, bufsize=8192, preserve_path=True):
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
fd = aff4.FACTORY.Open(aff4_path)
for child in fd.OpenChildren():
if preserve_path:
full_dir = utils.JoinPath(output_dir, child.urn.Path())
full_dir = o... | Take an aff4 path and download all files in it to output_dir.
Args:
aff4_path: Any aff4 path as a string
output_dir: A local directory to write to, will be created if not there.
bufsize: Buffer size to use.
preserve_path: If set all paths will be created. Note that this works for
collections as well. It will download... | juraj-google-style |
def get_enterprise_user_id(self, obj):
enterprise_learner = EnterpriseCustomerUser.objects.filter(user_id=obj.id).first()
return (enterprise_learner and enterprise_learner.id) | Get enterprise user id from user object.
Arguments:
obj (User): Django User object
Returns:
(int): Primary Key identifier for enterprise user object. | codesearchnet |
def get(issue_id, issue_type_id):
return db.Issue.find_one(
Issue.issue_id == issue_id,
Issue.issue_type_id == issue_type_id
) | Return issue by ID
Args:
issue_id (str): Unique Issue identifier
issue_type_id (str): Type of issue to get
Returns:
:obj:`Issue`: Returns Issue object if found, else None | juraj-google-style |
def which(self, cmd, parent_environ=None, fallback=False):
env = self.get_environ(parent_environ=parent_environ)
path = which(cmd, env=env)
if (fallback and (path is None)):
path = which(cmd)
return path | Find a program in the resolved environment.
Args:
cmd: String name of the program to find.
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
fallback: If True, and the program is not found in the context,
the current environment will then be searched.
Returns:
Path to the pr... | codesearchnet |
def bytes_to_readable_str(num_bytes, include_b=False):
if num_bytes is None:
return str(num_bytes)
if num_bytes < 1024:
result = "%d" % num_bytes
elif num_bytes < 1048576:
result = "%.2fk" % (num_bytes / float(1 << 10))
elif num_bytes < 1073741824:
result = "%.2fM" % (num_bytes / float(1 << ... | Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit a... | juraj-google-style |
def pack(value, nbits=None):
if (nbits is None):
nbits = (pack_size(value) * BITS_PER_BYTE)
elif (nbits <= 0):
raise ValueError('Given number of bits must be greater than 0.')
buf_size = int(math.ceil((nbits / float(BITS_PER_BYTE))))
buf = (ctypes.c_uint8 * buf_size)()
for (idx, _) i... | Packs a given value into an array of 8-bit unsigned integers.
If ``nbits`` is not present, calculates the minimal number of bits required
to represent the given ``value``. The result is little endian.
Args:
value (int): the integer value to pack
nbits (int): optional number of bits to use to represent the value
Ret... | codesearchnet |
def _ReadPropertySet(self, property_set):
for property_section in property_set.sections:
if (property_section.class_identifier != self._CLASS_IDENTIFIER):
continue
for property_value in property_section.properties:
property_name = self._PROPERTY_NAMES.get(property_value.ident... | Reads properties from a property set.
Args:
property_set (pyolecf.property_set): OLECF property set. | codesearchnet |
def _to_backend_layout(tensor_layout):
if tensor_layout.device_mesh is None:
raise ValueError('Cannot create sharding when device mesh is not set for TensorLayout.')
partition_spec = jax.sharding.PartitionSpec(*tensor_layout.axes)
jax_mesh = tensor_layout.device_mesh.backend_mesh
return jax.shar... | Convert the TensorLayout to JAX backend specific Sharding.
Args:
tensor_layout: TensorLayout instance to convert.
Returns:
A `jax.sharding.NamedSharding` instance. | github-repos |
def _GetScanner(self, specification_store, signature_identifiers):
if not specification_store:
return None
scanner_object = pysigscan.scanner()
for format_specification in specification_store.specifications:
if format_specification.identifier not in signature_identifiers:
continue... | Initializes the scanner form the specification store.
Args:
specification_store (FormatSpecificationStore): a specification store.
signature_identifiers (list[str]): signature identifiers.
Returns:
pysigscan.scanner: signature scanner or None. | juraj-google-style |
def ReadArtifactDefinitionValues(self, artifact_definition_values):
if not artifact_definition_values:
raise errors.FormatError('Missing artifact definition values.')
different_keys = (
set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS)
if different_keys:
different_keys ... | Reads an artifact definition from a dictionary.
Args:
artifact_definition_values (dict[str, object]): artifact definition
values.
Returns:
ArtifactDefinition: an artifact definition.
Raises:
FormatError: if the format of the artifact definition is not set
or incorrect. | juraj-google-style |
def update_fitness(objective_function, particle):
fitness = objective_function(particle.position)
best_fitness = particle.best_fitness
cmp = comparator(fitness)
if best_fitness is None or cmp(fitness, best_fitness):
best_position = particle.position
return particle._replace(fitness=... | Calculates and updates the fitness and best_fitness of a particle.
Fitness is calculated using the 'problem.fitness' function.
Args:
problem: The optimization problem encapsulating the fitness function
and optimization type.
particle: cipy.algorithms.pso.Particle: Particle to update the fitness
for.
Returns:
cipy.al... | juraj-google-style |
def delete_storage_account(access_token, subscription_id, rgname, account_name):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.Storage/storageAccounts/', accoun... | Delete a storage account in the specified resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
account_name (str): Name of the new storage account.
Returns:
HTTP response. | juraj-google-style |
def validate_split(query):
if query.order:
raise SplitNotPossibleError('Query cannot have any sort orders.')
if query.limit is not None:
raise SplitNotPossibleError('Query cannot have a limit set.')
for filter in query.filters:
if isinstance(filter[1], ValueProvider):
fil... | Verifies that the given query can be properly scattered.
Note that equality and ancestor filters are allowed, however they may result
in inefficient sharding.
Raises:
QuerySplitterError if split could not be performed owing to query
parameters. | github-repos |
def FormatTree(tree, style_config=None, lines=None):
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
comment_splicer.SpliceComments(tree)
continuation_splicer.SpliceContinuations(tree)
subtype_assigner.AssignSubtypes(tree)
identify_container.IdentifyContainers(tree)
split_penalty... | Format a parsed lib2to3 pytree.
This provides an alternative entry point to YAPF.
Arguments:
tree: (pytree.Node) The root of the pytree to format.
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT... | github-repos |
def merkleroot(hashes):
if not hashes:
return sha3_256(b'').hexdigest()
if len(hashes) == 1:
return hexlify(hashes[0]).decode()
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
parent_hashes = [
sha3_256(hashes[i] + hashes[i+1]).digest... | Computes the merkle root for a given list.
Args:
hashes (:obj:`list` of :obj:`bytes`): The leaves of the tree.
Returns:
str: Merkle root in hexadecimal form. | juraj-google-style |
def resnet18(pretrained=False, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model | Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | codesearchnet |
def source_lines(self, host_name, file_path):
offset = self._host_name_file_path_to_offset[host_name, file_path]
return list(self._reader.read_source_files_event(offset).source_file.lines) | Read the line-by-line content of a source file.
Args:
host_name: Host name on which the source file is located.
file_path: File path at which the source file is located.
Returns:
Lines of the source file as a `list` of `str`s. | github-repos |
def update_acmg(self, institute_obj, case_obj, user_obj, link, variant_obj, acmg_str):
self.create_event(institute=institute_obj, case=case_obj, user=user_obj, link=link, category='variant', verb='acmg', variant=variant_obj, subject=variant_obj['display_name'])
LOG.info('Setting ACMG to {} for: {}'.format(acmg_... | Create an event for updating the ACMG classification of a variant.
Arguments:
institute_obj (dict): A Institute object
case_obj (dict): Case object
user_obj (dict): A User object
link (str): The url to be used in the event
variant_obj (dict): A variant object
acmg_str (str): The new ACMG classification string
Returns... | codesearchnet |
def _wrap_and_check_outputs(self, outputs, single_output_default_name, error_label=None):
if not isinstance(outputs, dict):
outputs = {single_output_default_name: outputs}
output_dict = {}
for key, value in outputs.items():
error_name = error_label or single_output_default_name
key =... | Wraps raw tensors as dicts and checks type.
Note that we create a new dict here so that we can overwrite the keys
if necessary.
Args:
outputs: A `Tensor` or a dict of string to `Tensor`.
single_output_default_name: A string key for use in the output dict
if the provided `outputs` is a raw tensor.
error_label: descrip... | github-repos |
def traverse_inorder(self, leaves=True, internal=True):
c = self; s = deque(); done = False
while not done:
if c is None:
if len(s) == 0:
done = True
else:
c = s.pop()
if (leaves and c.is_lea... | Perform an inorder traversal starting at this ``Node`` object
Args:
``leaves`` (``bool``): ``True`` to include leaves, otherwise ``False``
``internal`` (``bool``): ``True`` to include internal nodes, otherwise ``False`` | juraj-google-style |
def find_module_id_defining_flag(self, flagname, default=None):
registered_flag = self._flags().get(flagname)
if registered_flag is None:
return default
for module_id, flags in six.iteritems(self.flags_by_module_id_dict()):
for flag in flags:
if (flag.name... | Return the ID of the module defining this flag, or default.
Args:
flagname: str, name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we ... | juraj-google-style |
def exec_python(attr, src, executable="python"):
import subprocess
if isinstance(src, basestring):
src = [src]
p = popen([executable, "-c", "; ".join(src)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode:
from rez.exce... | Runs a python subproc to calculate a package attribute.
Args:
attr (str): Name of package attribute being created.
src (list of str): Python code to execute, will be converted into
semicolon-delimited single line of code.
Returns:
str: Output of python process. | juraj-google-style |
class RunScoreAndLearn(beam.PTransform[beam.PCollection[NestedKeyedInputT], beam.PCollection[NestedKeyedOutputT]]):
def __init__(self, detector: AnomalyDetector):
self._detector = detector
def expand(self, input: beam.PCollection[NestedKeyedInputT]) -> beam.PCollection[NestedKeyedOutputT]:
ret... | Applies the _ScoreAndLearnDoFn to a PCollection of data.
This PTransform scores and learns from data points using an anomaly
detection model.
Args:
detector: The anomaly detection model to use. | github-repos |
def _attempt_shard_retry(self, shard_state, tstate):
shard_attempts = (shard_state.retries + 1)
if (shard_attempts >= parameters.config.SHARD_MAX_ATTEMPTS):
logging.warning('Shard attempt %s exceeded %s max attempts.', shard_attempts, parameters.config.SHARD_MAX_ATTEMPTS)
return self._TASK_DIREC... | Whether to retry shard.
This method may modify shard_state and tstate to prepare for retry or fail.
Args:
shard_state: model.ShardState for current shard.
tstate: model.TransientShardState for current shard.
Returns:
A _TASK_DIRECTIVE enum. RETRY_SHARD if shard should be retried.
FAIL_TASK otherwise. | codesearchnet |
def update(w: jax.Array, scores: jax.Array, rows: jax.Array, cols: jax.Array, Y: jax.Array) -> typing.Tuple[jax.Array, jax.Array, int, float]:
N = w.shape[0]
M = scores.shape[0]
res = w.dot(Y) - jax.ops.segment_sum((w * (2 * Y - 1)).take(rows), cols, M)
err = 0.5 - jnp.abs(res - 0.5)
best_feature_in... | Calculates the new weight vector and the contribution scores.
Args:
w (jax.Array): A weight vector.
scores (JAX array): Contribution scores of features.
rows (jax.Array): Row indices of True values in the input data.
cols (jax.Array): Column indices of True values in the input data.
Y (jax.Array): The target output.
... | github-repos |
def blocks(self, name):
b = self._blocks(name)
if b:
return b
return self._blocks(name.replace('?>?', ' ')) | Search for defined blocks recursively.
Allow '>' to be ignored. '.a .b' == '.a > .b'
Args:
name (string): Search term
Returns:
Block object OR False | juraj-google-style |
def __init__(self, z=None, x=None, label=None):
r
if label is not None:
a = Pauli.from_label(label)
self._z = a.z
self._x = a.x
else:
self._init_from_bool(z, x) | r"""Make the Pauli object.
Note that, for the qubit index:
- Order of z, x vectors is q_0 ... q_{n-1},
- Order of pauli label is q_{n-1} ... q_0
E.g.,
- z and x vectors: z = [z_0 ... z_{n-1}], x = [x_0 ... x_{n-1}]
- a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
z (numpy.ndarray): boolean, z vector
x (numpy.nda... | juraj-google-style |
def DumpMany(objs):
precondition.AssertIterableType(objs, object)
text = yaml.safe_dump_all(objs, default_flow_style=False, allow_unicode=True)
if compatibility.PY2:
text = text.decode("utf-8")
return text | Stringifies a sequence of Python objects to a multi-document YAML.
Args:
objs: An iterable of Python objects to convert to YAML.
Returns:
A multi-document YAML representation of the given objects. | juraj-google-style |
def build(cls, value: object, binary: bool = False,
fallback: object = None) -> Union[Nil, 'String']:
if value is None:
if fallback is None:
return Nil()
else:
return cls.build(fallback, binary)
elif not value:
re... | Produce either a :class:`QuotedString` or :class:`LiteralString`
based on the contents of ``data``. This is useful to improve
readability of response data.
Args:
value: The string to serialize.
binary: True if the string should be transmitted as binary.
fallback: The default value to use if ``value`` is None. | juraj-google-style |
def token_network_connect(
self,
registry_address: PaymentNetworkID,
token_address: TokenAddress,
funds: TokenAmount,
initial_channel_target: int = 3,
joinable_funds_target: float = 0.4,
) -> None:
if not is_binary_address(regi... | Automatically maintain channels open for the given token network.
Args:
token_address: the ERC20 token network to connect to.
funds: the amount of funds that can be used by the ConnectionMananger.
initial_channel_target: number of channels to open proactively.
joinable_funds_target: fraction of the funds that will be ... | juraj-google-style |
def send_email_message(self, recipient, subject, html_message, text_message, sender_email, sender_name):
if not current_app.testing:
from flask_sendmail import Message
message = Message(
subject,
recipients=[recipient],
... | Send email message via Flask-Sendmail.
Args:
recipient: Email address or tuple of (Name, Email-address).
subject: Subject line.
html_message: The message body in HTML.
text_message: The message body in plain text. | juraj-google-style |
def save_scan_plot(self, filename='scan.pdf', img_format='pdf', coords=None):
plt = self.get_scan_plot(coords)
plt.savefig(filename, format=img_format) | Save matplotlib plot of the potential energy surface to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
coords: internal coordinate name to use as abcissa. | codesearchnet |
def _set_textarea(el, value):
if isinstance(value, dict):
el.text = value["val"]
elif type(value) in [list, tuple]:
el.text = "\n\n".join(
"-- %s --\n%s" % (item["source"], item["val"])
for item in value
)
else:
... | Set content of given textarea element `el` to `value`.
Args:
el (obj): Reference to textarea element you wish to set.
value (obj/list): Value to which the `el` will be set. | juraj-google-style |
def testRaggedOneHotMatchesArrayOpsOneHot(self, indices_shape, depth, on_value=None, off_value=None, axis=None, dtype=None):
indices_shape = tensor_shape.as_shape(indices_shape)
indices = np.random.randint(depth + 1, size=indices_shape)
expected = array_ops.one_hot(indices, depth, on_value=on_value, off_val... | Tests that tf.one_hot gives the same result for ragged & uniform tensors.
Runs tf.one_hot with a uniform tensor, and compares the output with the
results of calling tf.one_hot with ragged version of that tensor with
varying ragged ranks.
Args:
indices_shape: Shape for `indices` arg to `tf.one_hot`
depth: `depth` arg ... | github-repos |
def __call__(self, request: beam.Row, *args, **kwargs):
if self.entity_row_fn:
entity_dict = self.entity_row_fn(request)
else:
request_dict = request._asdict()
entity_dict = {self.entity_id: request_dict[self.entity_id]}
feature_values = self.store.get_online_features(features=self.f... | Fetches feature values for an entity-id from the Feast feature store.
Args:
request: the input `beam.Row` to enrich. | github-repos |
def _PreprocessSources(self, extraction_engine):
logger.debug('Starting preprocessing.')
try:
artifacts_registry = engine.BaseEngine.BuildArtifactsRegistry(
self._artifact_definitions_path, self._custom_artifacts_path)
extraction_engine.PreprocessSources(
artifacts_registry... | Preprocesses the sources.
Args:
extraction_engine (BaseEngine): extraction engine to preprocess
the sources. | juraj-google-style |
def set_volume(percentage):
if ((percentage > 100) or (percentage < 0)):
raise ValueError('percentage must be an integer between 0 and 100')
if (system.get_name() == 'windows'):
pass
elif (system.get_name() == 'mac'):
volume_int = (percentage / 10)
sp.Popen(['osascript', '-e'... | Set the volume.
Sets the volume to a given percentage (integer between 0 and 100).
Args:
percentage (int): The percentage (as a 0 to 100 integer) to set the volume to.
Raises:
ValueError: if the percentage is >100 or <0. | codesearchnet |
def authenticate(self, user, password):
request = Request(AUTH_URL)
request.add_header('X-Simperium-API-Key', API_KEY)
if (sys.version_info < (3, 3)):
request.add_data(json.dumps({'username': user, 'password': password}))
else:
request.data = json.dumps({'username': user, 'password': pas... | Method to get simplenote auth token
Arguments:
- user (string): simplenote email address
- password (string): simplenote password
Returns:
Simplenote API token as string | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.