code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3
values |
|---|---|---|
def stack(x, axis=0):
if any_symbolic_tensors((x,)):
return Stack(axis=axis).symbolic_call(x)
return backend.numpy.stack(x, axis=axis) | Join a sequence of tensors along a new axis.
The `axis` parameter specifies the index of the new axis in the
dimensions of the result.
Args:
x: A sequence of tensors.
axis: Axis along which to stack. Defaults to `0`.
Returns:
The stacked tensor. | github-repos |
def _AddPathSegments(self, path, ignore_list):
path_segments = path.split(self._path_segment_separator)
for (path_segment_index, path_segment) in enumerate(path_segments):
if (path_segment_index not in self.path_segments_per_index):
self.path_segments_per_index[path_segment_index] = {}
... | Adds the path segments to the table.
Args:
path: a string containing the path.
ignore_list: a list of path segment indexes to ignore, where 0 is the
index of the first path segment relative from the root. | codesearchnet |
def labels_in_range(self, start, end, fully_included=False):
if fully_included:
intervals = self.label_tree.envelop(start, end)
else:
intervals = self.label_tree.overlap(start, end)
return [iv.data for iv in intervals] | Return a list of labels, that are within the given range.
Also labels that only overlap are included.
Args:
start(float): Start-time in seconds.
end(float): End-time in seconds.
fully_included(bool): If ``True``, only labels fully included
in the range are returned. Otherwise
also overlapping ones are returned.
(defau... | codesearchnet |
def mel_spectrogram(self, sequence: np.ndarray):
mel_specs = []
for seq in sequence:
window = np.hanning(self.window_size + 1)[:-1]
mel_specs.append(spectrogram(waveform=seq, window=window, frame_length=self.window_size, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters))
m... | Generates MelSpectrogram.
Args:
sequence (`numpy.ndarray`):
The sequence of which the mel-spectrogram will be computed. | github-repos |
def _parse_services(self, service_config: dict, service_name: str,
service_list: dict) -> dict:
for key, value in service_list['services'][service_name].items():
service_config[key] = value
if 'command' in key:
key = "args"
... | Parse the docker compose file.
Args:
service_config (dict): Service configurations from the compose file
service_name (string): Name of the services
service_list (dict): Service configuration list
Returns:
dict, service specifications extracted from the compose file | juraj-google-style |
def _create_handler(self, config):
if (config is None):
raise ValueError('No handler config to create handler from.')
if ('name' not in config):
raise ValueError('Handler name is required.')
handler_name = config['name']
module_name = handler_name.rsplit('.', 1)[0]
class_name = handl... | Creates a handler from its config.
Params:
config: handler config
Returns:
handler instance | codesearchnet |
def download_items(cache_fn, start=None):
with SqliteDict(cache_fn) as db:
last_id = db.get("last_id", 0) if not start else start
_download_items(db, last_id)
db.commit() | Open the `cache_fn` as database and download all not-yet downloaded items.
Args:
cache_fn (str): Path to the sqlite database. If not exists, it will be
created.
start (int, default None): If set, start from this sysno. | juraj-google-style |
def _ParseEntryObjectOffsets(self, file_object, file_offset):
entry_array_object = self._ParseEntryArrayObject(file_object, file_offset)
entry_object_offsets = list(entry_array_object.entry_object_offsets)
while entry_array_object.next_entry_array_offset != 0:
entry_array_object = self._ParseEnt... | Parses entry array objects for the offset of the entry objects.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the first entry array object relative to
the start of the file-like object.
Returns:
list[int]: offsets of the entry objects. | juraj-google-style |
def create_heart(self, git_repo_url, max_commits=10, weeks_from_now=1):
self.weeks_from_now = weeks_from_now
self.end_date = self.get_end_date()
try:
self.repository_name = git_repo_url.split('/')[-1][:-4]
self.git_repo_url = git_repo_url
self.max_com... | Creates heart on the Summary.
Args:
git_repo_url: The url (ssh or https) of the Repository, used for cloning
max_commits: Maximum number of commits in a day
weeks_from_now: The number of week from this week the Heart's Right center boundary will be. | juraj-google-style |
def __init__(self, network, scope='network-baseline', summary_labels=()):
self.network = Network.from_spec(
spec=network,
kwargs=dict(summary_labels=summary_labels)
)
assert len(self.network.internals_spec()) == 0
self.linear = Linear(size=1, bias=0.0, s... | Network baseline.
Args:
network_spec: Network specification dict | juraj-google-style |
def iterator_full_type_from_spec(element_spec):
args = fulltypes_for_flat_tensors(element_spec)
return full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_ITERATOR, args=[full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT, args=args)])]... | Returns a FullTypeDef for an iterator for the elements.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing the
element type specification.
Returns:
A FullTypeDef for an iterator for the element tensor representation. | github-repos |
def get(self, center, target, date):
if (center.index, target.index) in self.segments:
pos, vel = self.segments[center.index, target.index].compute_and_differentiate(date.jd)
sign = 1
else:
pos, vel = self.segments[targ... | Retrieve the position and velocity of a target with respect to a center
Args:
center (Target):
target (Target):
date (Date):
Return:
numpy.array: length-6 array position and velocity (in m and m/s) of the
target, with respect to the center | juraj-google-style |
def __sub__(self, other: 'TensorFluent') -> 'TensorFluent':
return self._binary_op(self, other, tf.subtract, tf.float32) | Returns a TensorFluent for the subtraction arithmetic operator.
Args:
self: The first operand.
other: The second operand.
Returns:
A TensorFluent wrapping the operator's output. | juraj-google-style |
def combine(path1, path2):
if not path1:
return path2.lstrip()
return "{}/{}".format(path1.rstrip("/"), path2.lstrip("/")) | Join two paths together.
This is faster than :func:`~fs.path.join`, but only works when the
second path is relative, and there are no back references in either
path.
Arguments:
path1 (str): A PyFilesytem path.
path2 (str): A PyFilesytem path.
Returns:
str: The joint path.
Example:
>>> combine("foo/bar", "baz")
'foo... | juraj-google-style |
def train(self, mode=True):
super().train(mode)
if mode:
mu.apply_leaf(self, mu.set_train_mode)
return self | r"""
Sets the module in training mode.
This has any effect only on certain modules. See documentations of
particular modules for details of their behaviors in training/evaluation
mode, if they are affected, e.g. :class:`Dropout`, :class:`BatchNorm`,
etc.
Returns:
Module: self | codesearchnet |
def exp(x):
if any_symbolic_tensors((x,)):
return Exp().symbolic_call(x)
return backend.numpy.exp(x) | Calculate the exponential of all elements in the input tensor.
Args:
x: Input tensor.
Returns:
Output tensor, element-wise exponential of `x`. | github-repos |
def BDEVolumeOpen(bde_volume, path_spec, file_object, key_chain):
password = key_chain.GetCredential(path_spec, 'password')
if password:
bde_volume.set_password(password)
recovery_password = key_chain.GetCredential(path_spec, 'recovery_password')
if recovery_password:
bde_volume.set_recovery_passwor... | Opens the BDE volume using the path specification.
Args:
bde_volume (pybde.volume): BDE volume.
path_spec (PathSpec): path specification.
file_object (FileIO): file-like object.
key_chain (KeyChain): key chain. | juraj-google-style |
def get_image_patches(self, image: np.array, grid_pinpoints, size: tuple, patch_size: int, resample: PILImageResampling, data_format: ChannelDimension, input_data_format: ChannelDimension) -> List[np.array]:
if not isinstance(grid_pinpoints, list):
raise TypeError('grid_pinpoints must be a list of possible ... | Process an image with variable resolutions by dividing it into patches.
Args:
image (np.array):
The input image to be processed.
grid_pinpoints (List):
A string representation of a list of possible resolutions.
size (`tuple`):
Size to resize the original image to.
patch_size (`int`):
Size of the patches to divide the ... | github-repos |
def create_creation_event(self):
event = self.create_audit_event(code='AUDIT_CREATE')
if self._meta.create_message:
event.body = self._meta.create_message['message']
event.code = self._meta.create_message['code']
event.meta = self.parse_meta(self._meta.create_message['meta'])
self.cr... | Parse the create message DSL to insert the data into the Event.
Returns:
fleaker.peewee.EventStorageMixin:
A new Event instance with data put in it | codesearchnet |
def ordered_repr(obj: object, attrlist: Iterable[str],
joiner: str = COMMA_SPACE) -> str:
return "<{classname}({kvp})>".format(
classname=type(obj).__name__,
kvp=joiner.join("{}={}".format(a, repr(getattr(obj, a)))
for a in attrlist)
) | Shortcut to make :func:`repr` functions ordered.
Define your :func:`__repr__` like this:
.. code-block:: python
def __repr__(self):
return ordered_repr(self, ["field1", "field2", "field3"])
Args:
obj: object to display
attrlist: iterable of attribute names
joiner: string with which to join the elements
Returns:
str... | juraj-google-style |
def init(library: typing.Union[str, types.ModuleType]) -> None:
if isinstance(library, types.ModuleType):
library = library.__name__
if library not in manager._handlers:
raise ValueError("Possible values are <{}>, not <{}>".format(manager._handlers.keys(),
... | Must be called at some point after import and before your event loop
is run.
Populates the asynclib instance of _AsyncLib with methods relevant to the
async library you are using.
The supported libraries at the moment are:
- curio
- trio
Args:
library (str or module): Either the module name as a string or the
import... | juraj-google-style |
def generate_query_key(self, serializer):
rewritten = []
last = len(self.field) - 1
s = serializer
field = None
for i, field_name in enumerate(self.field):
fields = s.fields
if field... | Get the key that can be passed to Django's filter method.
To account for serialier field name rewrites, this method
translates serializer field names to model field names
by inspecting `serializer`.
For example, a query like `filter{users.events}` would be
returned as `users__events`.
Arguments:
serializer: A DRF se... | juraj-google-style |
def delete(self, wait_for_deletion=True):
if self.exists():
try:
self._api.objects_delete(self._bucket, self._key)
except Exception as e:
raise e
if wait_for_deletion:
for _ in range(_MAX_POLL_ATTEMPTS):
objects = Objects(self._bucket, pref... | Deletes this object from its bucket.
Args:
wait_for_deletion: If True, we poll until this object no longer appears in
objects.list operations for this bucket before returning.
Raises:
Exception if there was an error deleting the object. | codesearchnet |
def _apply(self, ctx: ExtensionContext) -> AugmentedDict:
node_key, node_value = ctx.node
def process(pattern: Pattern[str], _str: str) -> Any:
_match = pattern.match(_str)
if _match is None:
return _str
placeholder,... | Replaces any {{var::*}} directives with it's actual variable value or a default.
Args:
ctx: The processing context.
Returns:
Returns the altered node key and value. | juraj-google-style |
def get_public_ip(access_token, subscription_id, resource_group, ip_name):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Network/', 'publicIPAddresses/', ip_name, '?api-version=', NETWORK_API])
return do_get(endpoint, access_... | Get details about the named public ip address.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
public_ip_name (str): Name of the public ip address resource.
Returns:
HTTP response. Public IP address JSON body. | codesearchnet |
def set_settings(self, settings):
for k, v in settings.items():
setattr(self, k, v) | Set every given settings as object attributes.
Args:
settings (dict): Dictionnary of settings. | juraj-google-style |
def MaxBipartiteMatching(self, graph):
self.g = nx.Graph(graph)
self.left = set((n for n, d in self.g.nodes(data=True) if not d['bipartite']))
self.right = set(self.g) - self.left
self.num_matched = 0
self.s = set()
self.t = set()
self.matches = {}
self.slack = {}
self.slackx = {}
... | Find a maximum matching for a bipartite graph.
This is O(n^3) implementation of the Hungarian method for complete bipartite
matching problems.
Args:
graph: A networkx graph object, assumed to be bipartite.
Returns:
A dictionary keyed on node names in left to node names in right. | github-repos |
def __init__(self, variables, name='ShardedVariable'):
super(ShardedVariableMixin, self).__init__()
self._variables = variables
self._name = name
if not isinstance(variables, Sequence) or not variables or any((not isinstance(v, variables_lib.Variable) for v in variables)):
raise TypeError(f'Argu... | Treats `variables` as shards of a larger Variable.
Example:
```
variables = [
tf.Variable(..., shape=(10, 100), dtype=tf.float32),
tf.Variable(..., shape=(15, 100), dtype=tf.float32),
tf.Variable(..., shape=(5, 100), dtype=tf.float32)
]
sharded_variable = ShardedVariableMixin(variables)
assert sharded_variable.shape.... | github-repos |
def partial_tile(tensor, tile_assignment, use_sharding_op=False, unspecified_dims=None):
return Sharding.partial_tile(tile_assignment).apply_to_tensor(tensor, use_sharding_op=use_sharding_op, unspecified_dims=unspecified_dims or []) | Returns a tensor that has tiled sharding.
Args:
tensor: A tf.Tensor to shard.
tile_assignment: An np.ndarray describing the topology of the tiling and
which device will compute which part of the topology. It must have one
more dimension than tensor, and the last dimension represents partially
replicated tiles.
use_sha... | github-repos |
def fit_transform(self, col):
if self.anonymize:
col = self.anonymize_column(col)
self._fit(col)
return self.transform(col) | Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame | juraj-google-style |
def call_with_mapped_args(self, mapped_args: MappedArgs[FrameType]) -> _HasReturnT: | Calls this function with the given mapped arguments.
Args:
mapped_args: The function arguments mapped to parameter names.
Returns:
An object with information about the result of the function call, with a
get_return_value() method that retrieves the return value. | github-repos |
def __init__(self, path: utils.KeyPath, target: 'Symbolic', field: Optional[pg_typing.Field], old_value: Any, new_value: Any):
self.path = path
self.target = target
self.field = field
self.old_value = old_value
self.new_value = new_value | Constructor.
Args:
path: KeyPath of the field that is updated.
target: Parent of updated field.
field: Specification of the updated field.
old_value: Old value of the field.
new_value: New value of the field. | github-repos |
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
with tf.name_scope('loss', [logits, labels]):
(logits, labels) = _pad_tensors_to_same_length(logits, labels)
with tf.name_scope('smoothing_cross_entropy', [logits, labels]):
confidence = (1.0 - smoothing)
l... | Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns a float32 ten... | codesearchnet |
def select_by_value(self, value):
self._selected_key = None
self._selected_item = None
for k in self.children:
item = self.children[k]
if item.get_text() == value:
item.attributes['selected'] = 'selected'
self._selected_key = k
... | Selects a DropDownItem by means of the contained text-
Args:
value (str): Textual content of the DropDownItem that have to be selected. | juraj-google-style |
def __init__(self, maximum_number_of_cached_values):
if maximum_number_of_cached_values <= 0:
raise ValueError(
'Invalid maximum number of cached objects value zero or less.')
super(ObjectsCache, self).__init__()
self._maximum_number_of_cached_values = maximum_number_of_cached_values
... | Initializes the resolver objects cache object.
Args:
maximum_number_of_cached_values (int): maximum number of cached values.
Raises:
ValueError: when the maximum number of cached objects is 0 or less. | juraj-google-style |
def get_storage(self, id_or_uri):
uri = self.URI + "/{}/storage".format(extract_id_from_uri(id_or_uri))
return self._client.get(uri) | Get storage details of an OS Volume.
Args:
id_or_uri: ID or URI of the OS Volume.
Returns:
dict: Storage details | juraj-google-style |
def process_exception_message(exception):
exception_message = str(exception)
for replace_char in ['\t', '\n', '\\n']:
exception_message = exception_message.replace(replace_char, ('' if (replace_char != '\t') else ' '))
return exception_message.replace('section', 'alias') | Process an exception message.
Args:
exception: The exception to process.
Returns:
A filtered string summarizing the exception. | codesearchnet |
def SetEncodedValue(env, name, value, encoding=None):
name = Encode(name, encoding=encoding)
if value is None:
env.pop(name, None)
return
env[name] = Encode(value, encoding=encoding) | Sets the value of name in env to an encoded value.
Args:
env: {str: str}, The env dict.
name: str, The env var name.
value: str or unicode, The value for name. If None then name is removed from
env.
encoding: str, The encoding to use or None to try to infer it. | github-repos |
def Analyze(self, hashes):
hash_analyses = []
for digest in hashes:
json_response = self._QueryHash(digest)
hash_analysis = interface.HashAnalysis(digest, json_response)
hash_analyses.append(hash_analysis)
return hash_analyses | Looks up hashes in Viper using the Viper HTTP API.
Args:
hashes (list[str]): hashes to look up.
Returns:
list[HashAnalysis]: hash analysis.
Raises:
RuntimeError: If no host has been set for Viper. | codesearchnet |
def remove_block(self, block, index="-1"):
self[index]["__blocks__"].remove(block)
self[index]["__names__"].remove(block.raw()) | Remove block element from scope
Args:
block (Block): Block object | juraj-google-style |
def _find_classes(self, dir):
if (sys.version_info >= (3, 5)):
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
retu... | Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another. | codesearchnet |
def GetPluginObjectByName(cls, plugin_name):
plugin_class = cls._plugin_classes.get(plugin_name, None)
if plugin_class:
return plugin_class()
return None | Retrieves a specific plugin object by its name.
Args:
plugin_name (str): name of the plugin.
Returns:
BasePlugin: a plugin object or None if not available. | codesearchnet |
def _SmallestColSize(self, text):
if (not text):
return 0
stripped = terminal.StripAnsiText(text)
return max((len(word) for word in stripped.split())) | Finds the largest indivisible word of a string.
...and thus the smallest possible column width that can contain that
word unsplit over rows.
Args:
text: A string of text potentially consisting of words.
Returns:
Integer size of the largest single word in the text. | codesearchnet |
def _wait_and_retry(provider, job_id, poll_interval, retries, job_descriptor):
while True:
tasks = provider.lookup_job_tasks({'*'}, job_ids=[job_id])
running_tasks = set()
completed_tasks = set()
canceled_tasks = set()
fully_failed_tasks = set()
task_fail_count = dict... | Wait for job and retry any tasks that fail.
Stops retrying an individual task when: it succeeds, is canceled, or has been
retried "retries" times.
This function exits when there are no tasks running and there are no tasks
eligible to be retried.
Args:
provider: job service provider
job_id: a single job ID (string) t... | codesearchnet |
def display_required_items(msg_type):
print(('Configure a profile for: ' + msg_type))
print('You will need the following information:')
for (k, v) in CONFIG[msg_type]['settings'].items():
print((' * ' + v))
print('Authorization/credentials required:')
for (k, v) in CONFIG[msg_type]['auth']... | Display the required items needed to configure a profile for the given
message type.
Args:
:msg_type: (str) message type to create config entry. | codesearchnet |
def path_get(p: tcod.path.AStar, idx: int) -> Tuple[int, int]:
x = ffi.new("int *")
y = ffi.new("int *")
lib.TCOD_path_get(p._path_c, idx, x, y)
return x[0], y[0] | Get a point on a path.
Args:
p (AStar): An AStar instance.
idx (int): Should be in range: 0 <= inx < :any:`path_size` | juraj-google-style |
def _get_elements(self, site):
try:
if isinstance(site.specie, Element):
return [site.specie]
return [Element(site.specie)]
except:
return site.species.elements | Get the list of elements for a Site
Args:
site (Site): Site to assess
Returns:
[Element]: List of elements | juraj-google-style |
def proportions_from_distribution(table, label, sample_size, column_name='Random Sample'):
proportions = sample_proportions(sample_size, table.column(label))
return table.with_column('Random Sample', proportions) | Adds a column named ``column_name`` containing the proportions of a random
draw using the distribution in ``label``.
This method uses ``np.random.multinomial`` to draw ``sample_size`` samples
from the distribution in ``table.column(label)``, then divides by
``sample_size`` to create the resulting column of proportions... | codesearchnet |
def FoldValue(self, value):
if ((value is False) and (self._data_type_definition.false_value is not None)):
return self._data_type_definition.false_value
if ((value is True) and (self._data_type_definition.true_value is not None)):
return self._data_type_definition.true_value
raise ValueErro... | Folds the data type into a value.
Args:
value (object): value.
Returns:
object: folded value.
Raises:
ValueError: if the data type definition cannot be folded into the value. | codesearchnet |
def fetch(self, url):
opener = self._urllib.build_opener()
opener.addheaders = self._requestHeaders.items()
response = opener.open(url)
headers = response.info()
raw = response.read()
raw = raw.decode('utf8')
if (not ('Content-Type' in headers)):
raise OEmbedError('Missing mime-type ... | Fetch url and create a response object according to the mime-type.
Args:
url: The url to fetch data from
Returns:
OEmbedResponse object according to data fetched | codesearchnet |
def aggr(array, op, initial_value, ty):
weld_obj = WeldObject(encoder_, decoder_)
array_var = weld_obj.update(array)
if isinstance(array, WeldObject):
array_var = array.obj_id
weld_obj.dependencies[array_var] = array
weld_template =
weld_obj.weld_code = weld_template % {
... | Computes the aggregate of elements in the array.
Args:
array (WeldObject / Numpy.ndarray): Input array to aggregate
op (str): Op string used to aggregate the array (+ / *)
initial_value (int): Initial value for aggregation
ty (WeldType): Type of each element in the input array
Returns:
A WeldObject representing this... | juraj-google-style |
def _list_samples(self, predicate=None):
cursor = self.database[self.sample_collection].find(predicate, {'_id':0, 'md5':1})
return [item['md5'] for item in cursor] | List all samples that meet the predicate or all if predicate is not specified.
Args:
predicate: Match samples against this predicate (or all if not specified)
Returns:
List of the md5s for the matching samples | juraj-google-style |
def _process_sum_prod(self, func, **kwargs):
axis = kwargs.get("axis", 0)
min_count = kwargs.get("min_count", 0)
def sum_prod_builder(df, **kwargs):
return func(df, **kwargs)
if min_count <= 1:
return self._full_reduce(axis, sum_prod_builder)
el... | Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
A new QueryCompiler object with sum or prod of the object. | juraj-google-style |
def simple_layer_stack(include_encdec_attention,
num_layers=6,
d_ff=2048,
num_heads=8,
d_kv=128,
dropout_rate=0.1):
ret = []
for _ in xrange(num_layers):
ret.append(
transformer_layers.S... | Create a layer stack.
Args:
include_encdec_attention: a boolean
num_layers: an integer
d_ff: an integer
num_heads: an integer
d_kv: an integer
dropout_rate: a float
Returns:
a LayerStack | juraj-google-style |
def invert(self) -> Rotation:
if self._rot_mats is not None:
return Rotation(rot_mats=invert_rot_mat(self._rot_mats), quats=None)
elif self._quats is not None:
return Rotation(rot_mats=None, quats=invert_quat(self._quats), normalize_quats=False)
else:
raise ValueError('Both rotations... | Returns the inverse of the current Rotation.
Returns:
The inverse of the current Rotation | github-repos |
def get_vcf_entry(variant_obj, case_id=None):
if (variant_obj['category'] == 'snv'):
var_type = 'TYPE'
else:
var_type = 'SVTYPE'
info_field = ';'.join([('END=' + str(variant_obj['end'])), ((var_type + '=') + variant_obj['sub_category'].upper())])
variant_string = '{0}\t{1}\t{2}\t{3}\t{4}... | Get vcf entry from variant object
Args:
variant_obj(dict)
Returns:
variant_string(str): string representing variant in vcf format | codesearchnet |
def int(name, default=None, allow_none=False, fallback=None):
value = read(name, default, allow_none, fallback=fallback)
if isinstance(value, builtins.str):
value = value.strip()
if ((value is None) and allow_none):
return None
else:
return builtins.int(value) | Get a string environment value or the default.
Args:
name: The environment variable name
default: The default value to use if no environment variable is found
allow_none: If the return value can be `None` (i.e. optional) | codesearchnet |
def to_representation(self, obj):
representation = {}
for (name, field) in self.fields.items():
if field.write_only:
continue
attribute = self.get_attribute(obj, (field.source or name))
if (attribute is None):
representation[name] = ([] if field.many else None)
... | Convert given internal object instance into representation dict.
Representation dict may be later serialized to the content-type
of choice in the resource HTTP method handler.
This loops over all fields and retrieves source keys/attributes as
field values with respect to optional field sources and converts each
one u... | codesearchnet |
def find_field_names(fields, model=DEFAULT_MODEL, app=DEFAULT_APP, score_cutoff=50, pad_with_none=False):
fields = util.listify(fields)
model = get_model(model, app)
available_field_names = model._meta.get_all_field_names()
matched_fields = []
for field_name in fields:
match = fuzzy.extractO... | Use fuzzy string matching to find similar model field names without consulting a synonyms list
Returns:
list: A list model field names (strings) sorted from most likely to least likely.
[] If no similar field names could be found in the indicated model
[None] If none found and and `pad_with_none` set
Examples:
>>> f... | codesearchnet |
def validate(item, namespace='accounts', version=2, context=None):
if namespace == 'accounts':
if version == 2:
schema = v2.AccountSchema(strict=True, context=context)
return schema.load(item).data
elif version == 1:
return v1.AccountSchema(strict=True).load(... | Validate item against version schema.
Args:
item: data object
namespace: backend namespace
version: schema version
context: schema context object | juraj-google-style |
def get_job(self):
return Job(self.rest_client.make_request(self.job), self.rest_client) | Get the Streams job that owns this view.
Returns:
Job: Streams Job owning this view. | codesearchnet |
class JavaJarExpansionService(object):
def __init__(self, path_to_jar, extra_args=None, classpath=None, append_args=None):
if extra_args and append_args:
raise ValueError('Only one of extra_args or append_args may be provided')
self.path_to_jar = path_to_jar
self._extra_args = e... | An expansion service based on an Java Jar file.
This can be passed into an ExternalTransform as the expansion_service
argument which will spawn a subprocess using this jar to expand the
transform.
Args:
path_to_jar: the path to a locally available executable jar file to be used
to start up the expansion service.
extr... | github-repos |
def assert_raises(expected_exception, extras=None, *args, **kwargs):
context = _AssertRaisesContext(expected_exception, extras=extras)
return context | Assert that an exception is raised when a function is called.
If no exception is raised, test fail. If an exception is raised but not
of the expected type, the exception is let through.
This should only be used as a context manager:
with assert_raises(Exception):
func()
Args:
expected_exception: An exception class t... | github-repos |
def __init__(self, todo_tasklet, limit):
self._todo_tasklet = todo_tasklet
self._limit = limit
self._queues = {}
self._running = []
self._cache = {} | Init.
Args:
todo_tasklet: the tasklet that actually fires RPC and waits on a MultiRPC.
It should take a list of (future, arg) pairs and an "options" as
arguments. "options" are rpc options.
limit: max number of items to batch for each distinct value of "options". | juraj-google-style |
def render(self, program: moderngl.Program, mode=None, vertices=(- 1), first=0, instances=1):
vao = self.instance(program)
if (mode is None):
mode = self.mode
vao.render(mode, vertices=vertices, first=first, instances=instances) | Render the VAO.
Args:
program: The ``moderngl.Program``
Keyword Args:
mode: Override the draw mode (``TRIANGLES`` etc)
vertices (int): The number of vertices to transform
first (int): The index of the first vertex to start with
instances (int): The number of instances | codesearchnet |
def MultiDelete(self, urns, token=None):
urns = [rdfvalue.RDFURN(urn) for urn in urns]
if token is None:
token = data_store.default_token
for urn in urns:
if urn.Path() == "/":
raise ValueError("Can't delete root URN. Please enter a valid URN")
deletion_pool = DeletionPool(to... | Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
ValueError: If one of the urns is too short. This is a safety check to
ensure the ro... | juraj-google-style |
def get(cls, issue_id):
res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id)
return cls(res) if res else None | Returns the class object identified by `issue_id`
Args:
issue_id (str): Unique EC2 Instance ID to load from database
Returns:
EC2 Instance object if found, else None | juraj-google-style |
def payments(self, virtual_account_id, data={}, **kwargs):
url = "{}/{}/payments".format(self.base_url, virtual_account_id)
return self.get_url(url, data, **kwargs) | Fetch Payment for Virtual Account Id
Args:
virtual_account_id :
Id for which Virtual Account objects has to be retrieved
Returns:
Payment dict for given Virtual Account Id | juraj-google-style |
def convert_file_size_to_int(size: Union[int, str]):
if isinstance(size, int):
return size
if size.upper().endswith('GIB'):
return int(size[:-3]) * 2 ** 30
if size.upper().endswith('MIB'):
return int(size[:-3]) * 2 ** 20
if size.upper().endswith('KIB'):
return int(size[:-... | Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).
Args:
size (`int` or `str`): The size to convert. Will be directly returned if an `int`.
Example:
```py
>>> convert_file_size_to_int("1MiB")
1048576
``` | github-repos |
def _ProcessUnknownMessages(message, encoded_message):
if (not encoded_message):
return message
decoded_message = json.loads(six.ensure_str(encoded_message))
message_fields = ([x.name for x in message.all_fields()] + list(message.all_unrecognized_fields()))
missing_fields = [x for x in decoded_m... | Store any remaining unknown fields as strings.
ProtoRPC currently ignores unknown values for which no type can be
determined (and logs a "No variant found" message). For the purposes
of reserializing, this is quite harmful (since it throws away
information). Here we simply add those as unknown fields of type
string (s... | codesearchnet |
def GetColocationGroups(self):
return tf_item.TF_GetColocationGroups(self.tf_item) | Return a list of hard colocation constraints.
All the nodes in a colocation tuple must be placed on the same device for
the model to work.
Returns:
A list of colocation tuples. | github-repos |
def market_normal(self, session, after_open, before_close) -> Session:
logger = logs.get_logger(self.market_normal)
if session not in self.exch: return SessNA
ss = self.exch[session]
s_time = shift_time(ss[0], int(after_open) + 1)
e_time = shift_time(ss[-1], -int(befor... | Time intervals between market
Args:
session: [allday, day, am, pm, night]
after_open: mins after open
before_close: mins before close
Returns:
Session of start_time and end_time | juraj-google-style |
def Map(fn, *args, **kwargs):
if not callable(fn):
raise TypeError('Map can be used only with callable objects. Received %r instead.' % fn)
from apache_beam.transforms.util import fn_takes_side_inputs
if fn_takes_side_inputs(fn):
wrapper = lambda x, *args, **kwargs: [fn(x, *args, **kwargs)]
... | :func:`Map` is like :func:`FlatMap` except its callable returns only a
single element.
Args:
fn (callable): a callable object.
*args: positional arguments passed to the transform callable.
**kwargs: keyword arguments passed to the transform callable.
Returns:
~apache_beam.pvalue.PCollection:
A :class:`~apache_beam.pv... | github-repos |
def for_new_graph(*args, **kwargs):
graph = tf.Graph()
with graph.as_default():
return for_default_graph(*args, **kwargs) | Creates a Bookkeeper for a new graph.
You must use `m.g.as_default()` to put the graph in scope:
m = Bookkeeper.for_new_graph()
with m.g.as_default():
...
Args:
*args: Arguments to pass into Bookkeeper's constructor.
**kwargs: Arguments to pass into Bookkeeper's constructor.
Returns:
A new Bookkeeper. | codesearchnet |
def MROMerge(input_seqs):
seqs = [Dedup(s) for s in input_seqs]
try:
return MergeSequences(seqs)
except ValueError as e:
raise MROError(input_seqs) from e | Merge a sequence of MROs into a single resulting MRO.
Args:
input_seqs: A sequence of MRO sequences.
Returns:
A single resulting MRO.
Raises:
MROError: If we discovered an illegal inheritance. | github-repos |
def to_yaml(self, **kwargs):
raise RuntimeError('Method `model.to_yaml()` has been removed due to security risk of arbitrary code execution. Please use `model.to_json()` instead.') | Returns a yaml string containing the network configuration.
Note: Since TF 2.6, this method is no longer supported and will raise a
RuntimeError.
To load a network from a yaml save file, use
`keras.models.model_from_yaml(yaml_string, custom_objects={})`.
`custom_objects` should be a dictionary mapping
the names of c... | github-repos |
def unravel_sections(section_data):
sections = []
for (type, subsection_list) in section_data.items():
for section in subsection_list:
section['sectionType'] = type
sections.append(section)
return sections | Unravels section type dictionary into flat list of sections with
section type set as an attribute.
Args:
section_data(dict): Data return from py:method::get_sections
Returns:
list: Flat list of sections with ``sectionType`` set to
type (i.e. recitation, lecture, etc) | codesearchnet |
def create_all(cls, list_of_kwargs):
try:
return cls.add_all([
cls.new(**kwargs) if kwargs is not None else None for kwargs in list_of_kwargs])
except:
cls.session.rollback()
raise | Batch method for creating a list of instances
Args:
list_of_kwargs(list of dicts): hereA list of dicts where
each dict denotes the keyword args that you would pass
to the create method separately
Examples:
>>> Customer.create_all([
... {'name': 'Vicky', 'age': 34, 'user_id': 1},
... {'name': 'Ron', 'age': 40, 'user_... | juraj-google-style |
def create_index(self, model, waiting_models):
bucket_name = model._get_bucket_name()
bucket_type = client.bucket_type(settings.DEFAULT_BUCKET_TYPE)
index_name = ('%s_%s' % (settings.DEFAULT_BUCKET_TYPE, bucket_name))
bucket = bucket_type.bucket(bucket_name)
try:
client.get_search_index(inde... | Creates search indexes.
Args:
model: model to execute
waiting_models: if riak can't return response immediately, model is taken to queue.
After first execution session, method is executed with waiting models and controlled.
And be ensured that all given models are executed properly.
Returns: | codesearchnet |
def create_nsg_rule(access_token, subscription_id, resource_group, nsg_name, nsg_rule_name, description, protocol='Tcp', source_range='*', destination_range='*', source_prefix='*', destination_prefix='*', access='Allow', priority=100, direction='Inbound'):
endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', s... | Create network security group rule.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
nsg_name (str): Name of the Network Security Group.
nsg_rule_name (str): Name of the new rule.
description (str): Description.... | codesearchnet |
def register(self, token, regexp):
self._tokens.append((token, re.compile(regexp))) | Register a token.
Args:
token (Token): the token class to register
regexp (str): the regexp for that token | codesearchnet |
def get_pattern_additional_cycles(self, patternnumber):
_checkPatternNumber(patternnumber)
address = _calculateRegisterAddress('cycles', patternnumber)
return self.read_register(address) | Get the number of additional cycles for a given pattern.
Args:
patternnumber (integer): 0-7
Returns:
The number of additional cycles (int). | juraj-google-style |
def routerify(obj):
router = Router()
for info in get_routing_attributes(obj):
router.add_route(*info)
obj.__growler_router = router
return router | Scan through attributes of object parameter looking for any which
match a route signature.
A router will be created and added to the object with parameter.
Args:
obj (object): The object (with attributes) from which to
setup a router
Returns:
Router: The router created from attributes in the object. | juraj-google-style |
def _tf_predict(model_dir, input_csvlines):
with tf.Graph().as_default(), tf.Session() as sess:
(input_alias_map, output_alias_map) = _tf_load_model(sess, model_dir)
csv_tensor_name = list(input_alias_map.values())[0]
results = sess.run(fetches=output_alias_map, feed_dict={csv_tensor_name: i... | Prediction with a tf savedmodel.
Args:
model_dir: directory that contains a saved model
input_csvlines: list of csv strings
Returns:
Dict in the form tensor_name:prediction_list. Note that the value is always
a list, even if there was only 1 row in input_csvlines. | codesearchnet |
def prune(self, cutoff: int = 2):
for node_pair in tqdm(list(permutations(self.nodes(), 2))):
paths = [
list(pairwise(path))
for path in nx.all_simple_paths(self, *node_pair, cutoff)
]
if len(paths) > 1:
for p... | Prunes the CAG by removing redundant paths. If there are multiple
(directed) paths between two nodes, this function removes all but the
longest paths. Subsequently, it restricts the graph to the largest
connected component.
Args:
cutoff: The maximum path length to consider for finding redundant
paths. Higher values of... | juraj-google-style |
def str_to_mac(mac_string):
sp = mac_string.split(':')
mac_string = ''.join(sp)
return binascii.unhexlify(mac_string) | Convert a readable string to a MAC address
Args:
mac_string (str): a readable string (e.g. '01:02:03:04:05:06')
Returns:
str: a MAC address in hex form | codesearchnet |
def add(self, *dic):
dicList = list(flatten(dic))
for d in dicList:
di = []
for k in d:
di.append(Pair(k, IntegerSingle(d[k])))
dictSingle = DictSingle(di)
self._add([dictSingle], self.l) | add a config to StartCalendarInterval.
Args:
*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary. | codesearchnet |
def hill_climb(nsteps, start_node, get_next_node):
outputs = []
best_score = (- float('inf'))
for step in range(nsteps):
(next_node, score, output) = get_next_node(copy.deepcopy(start_node))
if (score > best_score):
start_node = copy.deepcopy(next_node)
best_score = s... | Modular hill climbing algorithm.
Example:
>>> def get_next_node(node):
... a, b = random.sample(range(len(node)), 2)
... node[a], node[b] = node[b], node[a]
... plaintext = decrypt(node, ciphertext)
... score = lantern.score(plaintext, *fitness_functions)
... return node, score, Decryption(plaintex... | codesearchnet |
def get_input(self, name, ds):
columns = self.inputs.get(name)
df = ds.get_dataframe()
for column in columns:
if column not in df.columns:
df[column] = self.defaults.get(column)
return df[columns] | Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.
Args:
name (str): The name of the input.
ds (openflow.DataSource): The DataSource that will feed the data.
Returns:
pandas.DataFrame: The content of the input. | juraj-google-style |
def print_projects(projects=None):
grouped_by = {}
if not projects:
print(
"Your selection didn't include any projects for this experiment.")
return
for name in projects:
prj = projects[name]
if prj.GROUP not in grouped_by:
grouped_by[prj.GROUP]... | Print a list of projects registered for that experiment.
Args:
exp: The experiment to print all projects for. | juraj-google-style |
def _is_of_type(self, path, st_flag, follow_symlinks=True):
path = make_string_path(path)
if path is None:
raise TypeError
try:
obj = self.resolve(path, follow_symlinks)
if obj:
self.raise_for_filepath_ending_with_separator(
... | Helper function to implement isdir(), islink(), etc.
See the stat(2) man page for valid stat.S_I* flag values
Args:
path: Path to file to stat and test
st_flag: The stat.S_I* flag checked for the file's st_mode
Returns:
(boolean) `True` if the st_flag is set in path's st_mode.
Raises:
TypeError: if path is None | juraj-google-style |
def apply_fixup_array(bin_view, fx_offset, fx_count, entry_size):
fx_array = bin_view[fx_offset:fx_offset+(2 * fx_count)]
fx_len = fx_count - 1
sector_size = int(entry_size / fx_len)
index = 1
position = (sector_size * index) - 2
while (position <= entry_size):
if bin_view... | This function reads the fixup array and apply the correct values
to the underlying binary stream. This function changes the bin_view
in memory.
Args:
bin_view (memoryview of bytearray) - The binary stream
fx_offset (int) - Offset to the fixup array
fx_count (int) - Number of elements in the fixup array
entry_size (int... | juraj-google-style |
def build_exon(exon_info, build='37'):
try:
chrom = exon_info['chrom']
except KeyError:
raise KeyError('Exons has to have a chromosome')
try:
start = int(exon_info['start'])
except KeyError:
raise KeyError('Exon has to have a start')
except TypeError:
raise Ty... | Build a Exon object object
Args:
exon_info(dict): Exon information
Returns:
exon_obj(Exon)
"exon_id": str, # str(chrom-start-end)
"chrom": str,
"start": int,
"end": int,
"transcript": str, # ENST ID
"hgnc_id": int, # HGNC_id
"rank": int, # Order of exon in transcript
"build": str, # Genome build | codesearchnet |
def to_representation(self, instance):
updated_course = copy.deepcopy(instance)
enterprise_customer_catalog = self.context['enterprise_customer_catalog']
updated_course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(
updated_course['key']
)... | Return the updated course data dictionary.
Arguments:
instance (dict): The course data.
Returns:
dict: The updated course data. | juraj-google-style |
def FormatType(self, level_name, class_problist):
class_problist.sort()
output = []
for classname, problist in class_problist:
output.append('<h4 class="issueHeader"><a name="%s%s">%s</a></h4><ul>\n' %
(level_name, classname, UnCamelCase(classname)))
for e in problist.pr... | Write the HTML dumping all problems of one type.
Args:
level_name: string such as "Error" or "Warning"
class_problist: sequence of tuples (class name,
BoundedProblemList object)
Returns:
HTML in a string | juraj-google-style |
def restore(self, restored_tensors, unused_restored_shapes):
with ops.control_dependencies([self._create_op]):
return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token=restored_tensors[0], tree_ensemble_serialized=restored_tensors[1]) | Restores the associated tree ensemble from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint.
unused_restored_shapes: the shapes this object should conform to after
restore. Not meaningful for trees.
Returns:
The operation that restores the state of the tree ensemble variable. | github-repos |
def evaluate_partition(self, direction, mechanism, purview, partition, repertoire=None):
if (repertoire is None):
repertoire = self.repertoire(direction, mechanism, purview)
partitioned_repertoire = self.partitioned_repertoire(direction, partition)
phi = repertoire_distance(direction, repertoire, pa... | Return the |small_phi| of a mechanism over a purview for the given
partition.
Args:
direction (Direction): |CAUSE| or |EFFECT|.
mechanism (tuple[int]): The nodes in the mechanism.
purview (tuple[int]): The nodes in the purview.
partition (Bipartition): The partition to evaluate.
Keyword Args:
repertoire (np.array): T... | codesearchnet |
def merge_leading_dims(array_or_tensor, n_dims=2):
tensor = tf.convert_to_tensor(array_or_tensor)
tensor_shape_static = tensor.get_shape()
if (tensor_shape_static.dims is None):
raise ValueError("Can't merge leading dimensions of tensor of unknown rank.")
tensor_shape_list = tensor_shape_static.... | Merge the first dimensions of a tensor.
Args:
array_or_tensor: Tensor to have its first dimensions merged. Can also
be an array or numerical value, which will be converted to a tensor
for batch application, if needed.
n_dims: Number of dimensions to merge.
Returns:
Either the input value converted to a Tensor, with t... | codesearchnet |
def VerifyStructure(self, parser_mediator, lines):
match_generator = self._VERIFICATION_GRAMMAR.scanString(lines, maxMatches=1)
return bool(list(match_generator)) | Verifies that this is a bash history file.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
lines (str): one or more lines from the text file.
Returns:
bool: True if this is the correct parser, False otherwise. | juraj-google-style |
def add(self, *value):
flattenedValueList = list(flatten(value))
return self._add(flattenedValueList, self.value) | convert value and add to self.value
Subclass must overwrite this method.
Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value``
Args:
*value: the value to be added | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.