code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def delete_meta_features(self, path):
if os.path.exists(self.meta_features_path(path)):
os.remove(self.meta_features_path(path))
|
Deletes meta-features of base learner if it exists
Args:
path (str): Absolute/local path of xcessiv folder
|
juraj-google-style
|
def register(self, name, namespace):
if (name in self._NAMESPACES):
raise ValueError('Namespace {0} already exists.'.format(name))
if (not isinstance(namespace, ns.Namespace)):
raise TypeError('Namespaces must be of type Namespace.')
self._NAMESPACES[name] = namespace
|
Register a new namespace with the Configuration object.
Args:
name (str): The name of the section/namespace.
namespace (namespace.Namespace): The Namespace object to store.
Raises:
TypeError: If the namespace is not a Namespace object.
ValueError: If the namespace is already registered.
|
codesearchnet
|
def extract_objects_from_source(self, text, type_filter=None):
objects = parse_verilog(text)
if type_filter:
objects = [o for o in objects if isinstance(o, type_filter)]
return objects
|
Extract object declarations from a text buffer
Args:
text (str): Source code to parse
type_filter (class, optional): Object class to filter results
Returns:
List of parsed objects.
|
juraj-google-style
|
def _read_callback(self, data=None):
try:
if (data is not None):
self.__reader.feed(data)
while True:
reply = self.__reader.gets()
if (reply is not False):
try:
callback = self.__callback_queue.popleft()
callback(reply)
except IndexError:
self._reply_list.append(reply)
self._condition.notify_all()
else:
break
except hiredis.ProtocolError:
LOG.warning('corrupted stream => disconnect')
self.disconnect()
|
Callback called when some data are read on the socket.
The buffer is given to the hiredis parser. If a reply is complete,
we put the decoded reply to on the reply queue.
Args:
data (str): string (buffer) read on the socket.
|
codesearchnet
|
def encode(self):
match_id = self.match_id
if (match_id is None):
match_id = ((1 << 11) - 1)
return (((self.match_type << 12) | DataStreamSelector.SpecifierEncodings[self.match_spec]) | match_id)
|
Encode this stream as a packed 16-bit unsigned integer.
Returns:
int: The packed encoded stream
|
codesearchnet
|
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k not in ('auxiliary_outputs', 'enc_outputs')}
if self.assign_second_stage:
indices = self.stg2_assigner(outputs_without_aux, targets)
else:
indices = self.matcher(outputs_without_aux, targets)
num_boxes = sum((len(t['class_labels']) for t in targets))
num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
world_size = 1
if is_accelerate_available():
if PartialState._shared_state != {}:
num_boxes = reduce(num_boxes)
world_size = PartialState().num_processes
num_boxes = torch.clamp(num_boxes / world_size, min=1).item()
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))
if 'auxiliary_outputs' in outputs:
for i, auxiliary_outputs in enumerate(outputs['auxiliary_outputs']):
if not self.assign_second_stage:
indices = self.matcher(auxiliary_outputs, targets)
for loss in self.losses:
l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
if 'enc_outputs' in outputs:
enc_outputs = outputs['enc_outputs']
bin_targets = copy.deepcopy(targets)
for bt in bin_targets:
bt['class_labels'] = torch.zeros_like(bt['class_labels'])
if self.assign_first_stage:
indices = self.stg1_assigner(enc_outputs, bin_targets)
else:
indices = self.matcher(enc_outputs, bin_targets)
for loss in self.losses:
l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes)
l_dict = {k + '_enc': v for k, v in l_dict.items()}
losses.update(l_dict)
return losses
|
This performs the loss computation.
Args:
outputs (`dict`, *optional*):
Dictionary of tensors, see the output specification of the model for the format.
targets (`List[dict]`, *optional*):
List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the
losses applied, see each loss' doc.
|
github-repos
|
def find_stream(cls, fileobj, max_bytes):
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12)
return stream
|
Returns a possibly valid _ADTSStream or None.
Args:
max_bytes (int): maximum bytes to read
|
juraj-google-style
|
def block_start(self, previous_block):
previous_header_bytes = previous_block.header
previous_header = BlockHeader()
previous_header.ParseFromString(previous_header_bytes)
block_info = BlockInfo(
block_num=previous_header.block_num,
previous_block_id=previous_header.previous_block_id,
signer_public_key=previous_header.signer_public_key,
header_signature=previous_block.header_signature,
timestamp=int(time.time()))
return [self.create_batch(block_info)]
|
Returns an ordered list of batches to inject at the beginning of the
block. Can also return None if no batches should be injected.
Args:
previous_block (Block): The previous block.
Returns:
A list of batches to inject.
|
juraj-google-style
|
def classify_tables_by_dependency_type(
metadata: MetaData,
extra_dependencies: List[TableDependency] = None,
sort: bool = True) \
-> List[TableDependencyClassification]:
tables = list(metadata.tables.values())
all_deps = get_all_dependencies(metadata, extra_dependencies)
tdcmap = {}
for table in tables:
parents = [td.parent_table for td in all_deps
if td.child_table == table]
children = [td.child_table for td in all_deps
if td.parent_table == table]
tdcmap[table] = TableDependencyClassification(
table, parents=parents, children=children
)
def parents_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.parents:
return True, [start, probe]
for parent in tdc_.parents:
contains_, chain_ = parents_contain(start=parent, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
def children_contain(start: Table,
probe: Table) -> Tuple[bool, List[Table]]:
tdc_ = tdcmap[start]
if probe in tdc_.children:
return True, [start, probe]
for child in tdc_.children:
contains_, chain_ = children_contain(start=child, probe=probe)
if contains_:
return True, [start] + chain_
return False, []
for table in tables:
tdc = tdcmap[table]
contains, chain = parents_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
contains, chain = children_contain(start=table, probe=table)
if contains:
tdc.set_circular(contains, chain)
else:
tdc.set_circular(False)
classifications = list(tdcmap.values())
if sort:
classifications.sort(key=lambda c: c.tablename)
return classifications
|
Inspects a metadata object (optionally adding other specified dependencies)
and returns a list of objects describing their dependencies.
Args:
metadata: the :class:`MetaData` to inspect
extra_dependencies: additional dependencies
sort: sort the results by table name?
Returns:
list of :class:`TableDependencyClassification` objects, one for each
table
|
juraj-google-style
|
def dataframe_from_excel(path, sheetname=0, header=0, skiprows=None):
sheetname = sheetname or 0
if isinstance(sheetname, (basestring, float)):
try:
sheetname = int(sheetname)
except (TypeError, ValueError, OverflowError):
sheetname = str(sheetname)
wb = xlrd.open_workbook(path)
return pd.io.excel.read_excel(wb, sheetname=sheetname, header=header, skiprows=skiprows, engine='xlrd')
|
Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name
Arguments:
path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from
ext (str): file name extension (to filter files by)
date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used
Returns:
dict of DataFrame: { file_path: flattened_data_frame }
|
juraj-google-style
|
def db_wb004(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `db_wb004`'.format(value))
self._db_wb004 = value
|
Corresponds to IDD Field `db_wb004`
mean coincident dry-bulb temperature to
Wet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `db_wb004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def call(self, input_ids=None, inputs_embeds=None, training=False):
assert not (input_ids is None and inputs_embeds is None)
assert not (input_ids is not None and inputs_embeds is not None)
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(self.weight, input_ids)
final_embeddings = self.LayerNorm(inputs=inputs_embeds)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
|
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
|
github-repos
|
def ConsumeByteString(self):
the_list = [self._ConsumeSingleByteString()]
while (self.token and (self.token[0] in _QUOTES)):
the_list.append(self._ConsumeSingleByteString())
return b''.join(the_list)
|
Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
|
codesearchnet
|
def do_IDENT(self, service_name: str, source: list, *args, **kwargs) -> None:
self.logger.info(' IDENT %s as %s', service_name, source)
self.messaging._address_map[service_name] = source
|
Perform identification of a service to a binary representation.
Args:
service_name: human readable name for service
source: zmq representation for the socket source
|
codesearchnet
|
def _PreparedData(self, order_by=()):
if (not order_by):
return self.__data
sorted_data = self.__data[:]
if (isinstance(order_by, six.string_types) or (isinstance(order_by, tuple) and (len(order_by) == 2) and (order_by[1].lower() in ['asc', 'desc']))):
order_by = (order_by,)
for key in reversed(order_by):
if isinstance(key, six.string_types):
sorted_data.sort(key=(lambda x: x[0].get(key)))
elif (isinstance(key, (list, tuple)) and (len(key) == 2) and (key[1].lower() in ('asc', 'desc'))):
key_func = (lambda x: x[0].get(key[0]))
sorted_data.sort(key=key_func, reverse=(key[1].lower() != 'asc'))
else:
raise DataTableException("Expected tuple with second value: 'asc' or 'desc'")
return sorted_data
|
Prepares the data for enumeration - sorting it by order_by.
Args:
order_by: Optional. Specifies the name of the column(s) to sort by, and
(optionally) which direction to sort in. Default sort direction
is asc. Following formats are accepted:
"string_col_name" -- For a single key in default (asc) order.
("string_col_name", "asc|desc") -- For a single key.
[("col_1","asc|desc"), ("col_2","asc|desc")] -- For more than
one column, an array of tuples of (col_name, "asc|desc").
Returns:
The data sorted by the keys given.
Raises:
DataTableException: Sort direction not in 'asc' or 'desc'
|
codesearchnet
|
def compute_jaccard_index(x_set, y_set):
if not x_set or not y_set:
return 0.0
intersection_cardinal = len(x_set & y_set)
union_cardinal = len(x_set | y_set)
return intersection_cardinal / float(union_cardinal)
|
Return the Jaccard similarity coefficient of 2 given sets.
Args:
x_set (set): first set.
y_set (set): second set.
Returns:
float: Jaccard similarity coefficient.
|
juraj-google-style
|
def set(self, key, samples, sampling_rate):
if not np.issubdtype(samples.dtype, np.floating):
raise ValueError('Samples are required as np.float32!')
if len(samples.shape) > 1:
raise ValueError('Only single channel supported!')
self.raise_error_if_not_open()
if key in self._file:
del self._file[key]
samples = (samples * MAX_INT16_VALUE).astype(np.int16)
dset = self._file.create_dataset(key, data=samples)
dset.attrs[SAMPLING_RATE_ATTR] = sampling_rate
|
Set the samples and sampling-rate for the given key.
Existing data will be overwritten.
The samples have to have ``np.float32`` datatype and values in
the range of -1.0 and 1.0.
Args:
key (str): A key to store the data for.
samples (numpy.ndarray): 1-D array of audio samples (np.float32).
sampling_rate (int): The sampling-rate of the audio samples.
Note:
The container has to be opened in advance.
|
juraj-google-style
|
def update(self, task_name, result):
with open(self.filepath, 'rb') as f:
existing_results = pickle.load(f)
if (task_name not in self.tasks):
self._add_task(task_name)
existing_results['tasks'].append(task_name)
existing_results['results'].append([])
task_name_idx = existing_results['tasks'].index(task_name)
results = existing_results['results'][task_name_idx]
results.append(result)
with open(self.filepath, 'wb') as f:
pickle.dump(existing_results, f)
|
Update the results file with new information.
Args:
task_name (str): Name of the currently running task. A previously unseen
``task_name`` will create a new entry in both :attr:`tasks`
and :attr:`results`.
result: This will be appended to the list in :attr:`results` which
corresponds to the ``task_name`` in ``task_name``:attr:`tasks`.
|
codesearchnet
|
def _get_index_points(self, index_points=None):
if ((self._index_points is None) and (index_points is None)):
raise ValueError("This GaussianProcess instance was not instantiated with a value for index_points. One must therefore be provided when calling sample, log_prob, and other such methods. In particular, one can't compute KL divergences to/from an instance of `GaussianProccess` with unspecified `index_points` directly. Instead, use the `get_marginal_distribution` function, which takes `index_points` as an argument and returns a `Normal` or `MultivariateNormalLinearOperator` instance, whose KL can be computed.")
return (index_points if (index_points is not None) else self._index_points)
|
Return `index_points` if not None, else `self._index_points`.
Args:
index_points: if given, this is what is returned; else,
`self._index_points`
Returns:
index_points: the given arg, if not None, else the class member
`self._index_points`.
Rases:
ValueError: if `index_points` and `self._index_points` are both `None`.
|
codesearchnet
|
def from_file(cls, filename):
filename = str(filename)
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return cls.from_str(contents, fmt="g09")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["out", "lis", "log"]]):
return GaussianOutput(filename).final_structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
else:
from pymatgen.io.babel import BabelMolAdaptor
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename,
m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Unrecognized file extension!")
|
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
|
juraj-google-style
|
def from_string(string):
lines = list(clean_lines(string.splitlines()))
params = {}
for line in lines:
for sline in line.split(';'):
m = re.match('(\\w+)\\s*=\\s*(.*)', sline.strip())
if m:
key = m.group(1).strip()
val = m.group(2).strip()
val = Incar.proc_val(key, val)
params[key] = val
return Incar(params)
|
Reads an Incar object from a string.
Args:
string (str): Incar string
Returns:
Incar object
|
codesearchnet
|
def get_branch_length(self, age=None, pos=0):
if age is None:
age = self.age
return self.length * pow(self.branches[pos][0], age)
|
Get the length of a branch.
This method calculates the length of a branch in specific age.
The used formula: length * scale^age.
Args:
age (int): The age, for which you want to know the branch length.
Returns:
float: The length of the branch
|
juraj-google-style
|
def generate_packer_filename(provider, region, builder):
filename = '{0}_{1}_{2}.json'.format(provider, region, builder)
return filename
|
Generate a filename to be used by packer.
Args:
provider (str): Name of Spinnaker provider.
region (str): Name of provider region to use.
builder (str): Name of builder process type.
Returns:
str: Generated filename based on parameters.
|
juraj-google-style
|
def to_string(self, verbose=0, title=None, **kwargs):
from pprint import pformat
s = pformat(self, **kwargs)
if (title is not None):
return '\n'.join([marquee(title, mark='='), s])
return s
|
String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string.
|
codesearchnet
|
def WaitForFlow(flow_urn, token=None, timeout=DEFAULT_TIMEOUT, max_sleep_time=1, min_sleep_time=0.2, dampening_multiplier=0.9):
start_time = time.time()
sleep_time = max_sleep_time
while True:
with aff4.FACTORY.Open(flow_urn, token=token, aff4_type=flow.GRRFlow) as flow_obj:
if ((time.time() - start_time) > timeout):
logging.warning('Timed out after waiting %ss for %s!', timeout, flow_obj)
raise IOError('Timed out trying to access client! Is it connected?')
if (not flow_obj.GetRunner().IsRunning()):
break
sleep_time = max((sleep_time * dampening_multiplier), min_sleep_time)
time.sleep(sleep_time)
logging.debug('Waiting for %s, sleeping for %.3fs', flow_obj, sleep_time)
|
Waits for a flow to finish, polling while we wait.
Args:
flow_urn: The urn of the flow to wait for.
token: The datastore access token.
timeout: How long to wait before giving up, usually because the client has
gone away.
max_sleep_time: The initial and longest time to wait in between polls.
min_sleep_time: The final and shortest time to wait in between polls.
dampening_multiplier: The current sleep time is multiplied by this number on
each iteration. Controls how fast the polling reaches its minimum sleep
time. You probably want this to be less than 1, unless you want to wait an
increasing amount of time in between flows.
Raises:
IOError: If we time out while waiting for the client.
|
codesearchnet
|
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if (len(all_chempots) > len(self.elements)):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if (len(common_ent_ind) == len(elements)):
common_entries = [pd.qhull_entries[i] for i in common_ent_ind]
data = np.array([[(all_chempots[i][j] - el_energies[pd.elements[j]]) for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
|
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
|
codesearchnet
|
def word_to_vector_list(self, word, numeric=False, xsampa=False):
if xsampa:
word = self.xsampa.convert(word)
tensor = list(map(self.segment_to_vector, self.segs(word)))
if numeric:
return self.tensor_to_numeric(tensor)
else:
return tensor
|
Return a list of feature vectors, given a Unicode IPA word.
Args:
word (unicode): string in IPA
numeric (bool): if True, return features as numeric values instead
of strings
Returns:
list: a list of lists of '+'/'-'/'0' or 1/-1/0
|
juraj-google-style
|
def _padding_to_conv_op_padding(padding):
if not isinstance(padding, tuple):
raise ValueError("padding should be a tuple.")
if all(p == SAME for p in padding):
return SAME
else:
return VALID
|
Whether to use SAME or VALID for the underlying convolution op.
Args:
padding: A tuple of members of ALLOWED_PADDINGS, e.g. as returned from
`_fill_and_verify_padding`.
Returns:
One of CONV_OP_ALLOWED_PADDINGS, the padding method to use for the
underlying convolution op.
Raises:
ValueError: If padding is not a tuple.
|
juraj-google-style
|
def __init__(self, channel):
self.Analyze = channel.unary_unary(
'/pulumirpc.Analyzer/Analyze',
request_serializer=analyzer__pb2.AnalyzeRequest.SerializeToString,
response_deserializer=analyzer__pb2.AnalyzeResponse.FromString,
)
self.GetPluginInfo = channel.unary_unary(
'/pulumirpc.Analyzer/GetPluginInfo',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=plugin__pb2.PluginInfo.FromString,
)
|
Constructor.
Args:
channel: A grpc.Channel.
|
juraj-google-style
|
def _SparseSoftmaxGrad(op: ops.Operation, grad):
indices, shape = (op.inputs[0], op.inputs[2])
out_vals = op.outputs[0]
sp_output = sparse_tensor.SparseTensor(indices, out_vals, shape)
sp_grad = sparse_tensor.SparseTensor(indices, grad, shape)
sp_product = sparse_tensor.SparseTensor(indices, sp_output.values * sp_grad.values, shape)
sum_reduced = -sparse_ops.sparse_reduce_sum(sp_product, [-1], keepdims=True)
sp_sum = sparse_ops.sparse_dense_cwise_add(sp_grad, sum_reduced)
grad_x = sp_sum.values * sp_output.values
return [None, grad_x, None]
|
Gradients for SparseSoftmax.
The calculation is the same as SoftmaxGrad:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
where we now only operate on the non-zero values present in the SparseTensors.
Args:
op: the SparseSoftmax op.
grad: the upstream gradient w.r.t. the non-zero SparseSoftmax output values.
Returns:
Gradients w.r.t. the input (sp_indices, sp_values, sp_shape).
|
github-repos
|
def _isValidQuery(self, query, mode="phonefy"):
try:
validator = self.modes[mode].get("query_validator")
if validator:
try:
compiledRegexp = re.compile(
"^{expr}$".format(
expr=validator
)
)
return compiledRegexp.match(query)
except AttributeError as e:
return True
except AttributeError as e:
compiledRegexp = re.compile("^{r}$".format(r=self.validQuery[mode]))
return compiledRegexp.match(query)
|
Method to verify if a given query is processable by the platform.
The system looks for the forbidden characters in self.Forbidden list.
Args:
-----
query: The query to be launched.
mode: To be chosen amongst mailfy, phonefy, usufy, searchfy.
Return:
-------
True | False
|
juraj-google-style
|
def build_filter_stack(stack, options):
if options.get('keyword_case'):
stack.preprocess.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case'):
stack.preprocess.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings'):
stack.preprocess.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
if options.get('use_space_around_operators', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.SpacesAroundOperatorsFilter())
if options.get('strip_comments'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripCommentsFilter())
if options.get('strip_whitespace') or options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
if options.get('reindent'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.ReindentFilter(
char=options['indent_char'],
width=options['indent_width'],
indent_after_first=options['indent_after_first'],
indent_columns=options['indent_columns'],
wrap_after=options['wrap_after'],
comma_first=options['comma_first']))
if options.get('reindent_aligned', False):
stack.enable_grouping()
stack.stmtprocess.append(
filters.AlignedIndentFilter(char=options['indent_char']))
if options.get('right_margin'):
stack.enable_grouping()
stack.stmtprocess.append(
filters.RightMarginFilter(width=options['right_margin']))
if options.get('output_format'):
frmt = options['output_format']
if frmt.lower() == 'php':
fltr = filters.OutputPHPFilter()
elif frmt.lower() == 'python':
fltr = filters.OutputPythonFilter()
else:
fltr = None
if fltr is not None:
stack.postprocess.append(fltr)
return stack
|
Setup and return a filter stack.
Args:
stack: :class:`~sqlparse.filters.FilterStack` instance
options: Dictionary with options validated by validate_options.
|
juraj-google-style
|
def bounds(self, thr=0):
min_lat = float('inf')
min_lon = float('inf')
max_lat = (- float('inf'))
max_lon = (- float('inf'))
for segment in self.segments:
(milat, milon, malat, malon) = segment.bounds(thr=thr)
min_lat = min(milat, min_lat)
min_lon = min(milon, min_lon)
max_lat = max(malat, max_lat)
max_lon = max(malon, max_lon)
return (min_lat, min_lon, max_lat, max_lon)
|
Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude
|
codesearchnet
|
def RunPlugins(cls, artifacts_registry, file_system, mount_point, knowledge_base):
searcher = file_system_searcher.FileSystemSearcher(file_system, mount_point)
cls.CollectFromFileSystem(artifacts_registry, knowledge_base, searcher, file_system)
environment_variables = None
if knowledge_base:
environment_variables = knowledge_base.GetEnvironmentVariables()
registry_file_reader = FileSystemWinRegistryFileReader(file_system, mount_point, environment_variables=environment_variables)
win_registry = dfwinreg_registry.WinRegistry(registry_file_reader=registry_file_reader)
searcher = registry_searcher.WinRegistrySearcher(win_registry)
cls.CollectFromWindowsRegistry(artifacts_registry, knowledge_base, searcher)
cls.CollectFromKnowledgeBase(knowledge_base)
if (not knowledge_base.HasUserAccounts()):
logger.warning('Unable to find any user accounts on the system.')
|
Runs the preprocessing plugins.
Args:
artifacts_registry (artifacts.ArtifactDefinitionsRegistry): artifacts
definitions registry.
file_system (dfvfs.FileSystem): file system to be preprocessed.
mount_point (dfvfs.PathSpec): mount point path specification that refers
to the base location of the file system.
knowledge_base (KnowledgeBase): to fill with preprocessing information.
|
codesearchnet
|
def _CreateConfig(self, project_id):
project_id = project_id or self._GetNumericProjectId()
if not project_id:
return
self.boto_config_header %= (
self.boto_config_script, self.boto_config_template)
config = config_manager.ConfigManager(
config_file=self.boto_config_template,
config_header=self.boto_config_header)
boto_dir = os.path.dirname(self.boto_config_script)
config.SetOption('GSUtil', 'default_project_id', project_id)
config.SetOption('GSUtil', 'default_api_version', '2')
config.SetOption('GoogleCompute', 'service_account', 'default')
config.SetOption('Plugin', 'plugin_directory', boto_dir)
config.WriteConfig(config_file=self.boto_config)
|
Create the boto config to support standalone GSUtil.
Args:
project_id: string, the project ID to use in the config file.
|
juraj-google-style
|
def has_extana(self, cached=True):
if cached and self.hardware != -1:
return True if (self.hardware & EXT_HW_EXTANA) else False
result = self._check_hardware()
return True if (result & EXT_HW_EXTANA) != 0 else False
|
Can be used to check if an SK8-ExtAna device is currently connected.
NOTE: do not attempt to call while data streaming is active!
Args:
cached (bool): if True, use the cached value of the connected hardware
state rather than querying the device. Set to False to force a query.
Returns:
bool. True if the SK8 currently has an SK8-ExtAna device attached, False otherwise.
|
juraj-google-style
|
def find_exception_by_code(code):
errorName = None
for error in WebDriverError:
if (error.value.code == code):
errorName = error
break
return errorName
|
Find name of exception by WebDriver defined error code.
Args:
code(str): Error code defined in protocol.
Returns:
The error name defined in protocol.
|
codesearchnet
|
def dbmax_mean(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbmax_mean`'.format(value))
self._dbmax_mean = value
|
Corresponds to IDD Field `dbmax_mean`
Mean of extreme annual maximum dry-bulb temperature
Args:
value (float): value for IDD Field `dbmax_mean`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
juraj-google-style
|
def get_num_bytes(self, batch: Sequence[tf.Tensor]) -> int:
return sum((sys.getsizeof(element) for element in batch))
|
Returns:
The number of bytes of data for a batch of Tensors.
|
github-repos
|
def __init__(self, settings, room_id):
StreamProcess.__init__(self, settings, room_id)
self._reactor = self._connection.get_twisted_reactor()
self._protocol = None
|
Initialize.
Args:
settings (dict): Settings used to create a :class:`Connection` instance
room_id (int): Room ID
|
juraj-google-style
|
def _somethingFound(self, data, mode='phonefy'):
if data:
try:
for text in self.notFoundText[mode]:
if (text in data):
return False
return True
except AttributeError as e:
verifier = self.modes.get(mode)
if verifier:
if (verifier.get('not_found_text', '') in data):
return False
else:
return True
return False
|
Verifying if something was found.
Args:
-----
data: Data where the self.notFoundText will be searched.
mode: Mode to be executed.
Return:
-------
True if exists.
|
codesearchnet
|
def consume(self, callback, queue):
self.consumers[queue] = callback
if self._client_ready.called:
return self.client.consume(callback, queue)
|
Register a new consumer.
This consumer will be configured for every protocol this factory
produces so it will be reconfigured on network failures. If a connection
is already active, the consumer will be added to it.
Args:
callback (callable): The callback to invoke when a message arrives.
queue (str): The name of the queue to consume from.
|
codesearchnet
|
def findAll(self, selfValue):
resultList = []
for element in selfValue:
if isinstance(element, Single):
resultList += element.findAll(element.value)
else:
resultList.append(element)
return resultList
|
Looks for all the non single values(str, int) *recursively* and returns a list of them
Args:
selfValue: A list of single, str, int. Normally just ``self.value``
Returns:
list: A list contains only non singles(str, int).
|
juraj-google-style
|
def _subdivide_nodes(nodes, degree):
if (degree == 1):
nodes_a = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_A)
nodes_b = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_B)
nodes_c = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_C)
nodes_d = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_D)
elif (degree == 2):
nodes_a = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_A)
nodes_b = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_B)
nodes_c = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_C)
nodes_d = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_D)
elif (degree == 3):
nodes_a = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_A)
nodes_b = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_B)
nodes_c = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_C)
nodes_d = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_D)
elif (degree == 4):
nodes_a = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_A)
nodes_b = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_B)
nodes_c = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_C)
nodes_d = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_D)
else:
nodes_a = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE0, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE2)
nodes_b = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE1)
nodes_c = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE4, _WEIGHTS_SUBDIVIDE3)
nodes_d = specialize_surface(nodes, degree, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE5)
return (nodes_a, nodes_b, nodes_c, nodes_d)
|
Subdivide a surface into four sub-surfaces.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Does so by taking the unit triangle (i.e. the domain of the surface) and
splitting it into four sub-triangles by connecting the midpoints of each
side.
Args:
nodes (numpy.ndarray): Control points for a surface.
degree (int): The degree of the surface.
Returns:
Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]: The
nodes for the four sub-surfaces.
|
codesearchnet
|
def setMinimum(self, minimum):
if not isinstance(minimum, int):
raise TypeError("Argument is not of type int or long")
self._minimum = minimum
|
setter to _minimum.
Args:
minimum (int or long): new _minimum value.
Raises:
TypeError: If the given argument is not an integer.
|
juraj-google-style
|
def get_polling_override(self):
polling_override = self.get_characteristic_handle_from_uuid(UUID_POLLING_OVERRIDE)
if (polling_override is None):
logger.warn('Failed to find handle for polling override')
return None
override_ms = self.dongle._read_attribute(self.conn_handle, polling_override, True)
return (None if (override_ms is None) else ord(override_ms))
|
Get the current polling override value in milliseconds.
See :meth:`set_polling_override` for more information.
Returns:
None on error, otherwise the current override period in milliseconds
(0 = disabled).
|
codesearchnet
|
def _DescriptionSection(component, info):
if custom_descriptions.NeedsCustomDescription(component):
available_space = LINE_LENGTH - SECTION_INDENTATION
description = custom_descriptions.GetDescription(component, available_space, LINE_LENGTH)
summary = custom_descriptions.GetSummary(component, available_space, LINE_LENGTH)
else:
description = _GetDescription(info)
summary = _GetSummary(info)
text = description or summary or None
if text:
return ('DESCRIPTION', text)
else:
return None
|
The "Description" sections of the help string.
Args:
component: The component to produce the description section for.
info: The info dict for the component of interest.
Returns:
Returns the description if available. If not, returns the summary.
If neither are available, returns None.
|
github-repos
|
def get_snapshot(self, snapshot_id_or_uri, volume_id_or_uri=None):
uri = self.__build_volume_snapshot_uri(volume_id_or_uri, snapshot_id_or_uri)
return self._client.get(uri)
|
Gets a snapshot of a volume.
Args:
volume_id_or_uri:
Can be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,
but required if it passed a snapshot ID.
snapshot_id_or_uri:
Can be either the snapshot ID or the snapshot URI.
Returns:
dict: The snapshot.
|
juraj-google-style
|
def dataset(self, mode, hparams=None, global_step=None, **kwargs):
datasets = [p.dataset(mode, **kwargs) for p in self.problems]
datasets = [
d.map(lambda x, i=j: self.normalize_example(
dict(x, problem_id=tf.constant([i])), hparams))
for j, d in enumerate(datasets)
]
if mode is problem.DatasetSplit.TRAIN:
if global_step is None:
global_step = tf.train.get_or_create_global_step()
pmf = get_schedule_distribution(self.schedule, global_step)
return get_multi_dataset(datasets, pmf)
elif self.only_eval_first_problem:
return datasets[0]
else:
datasets = [d.repeat() for d in datasets]
return tf.data.Dataset.zip(tuple(datasets)).flat_map(
lambda *x: functools.reduce(
tf.data.Dataset.concatenate,
map(tf.data.Dataset.from_tensors, x)))
|
Returns a dataset containing examples from multiple problems.
Args:
mode: A member of problem.DatasetSplit.
hparams: A tf.HParams object, the model hparams.
global_step: A scalar tensor used to compute the sampling distribution.
If global_step is None, we call tf.train.get_or_create_global_step by
default.
**kwargs: Keywords for problem.Problem.Dataset.
Returns:
A dataset containing examples from multiple problems.
|
juraj-google-style
|
def make_instance(cls, data):
schema = cls()
if (not hasattr(schema.Meta, 'model')):
raise AttributeError('In order to make an instance, a model for the schema must be defined in the Meta class.')
serialized_data = schema.load(data).data
return cls.Meta.model(**serialized_data)
|
Validate the data and create a model instance from the data.
Args:
data (dict): The unserialized data to insert into the new model
instance through it's constructor.
Returns:
peewee.Model|sqlalchemy.Model: The model instance with it's data
inserted into it.
Raises:
AttributeError: This is raised if ``Meta.model`` isn't set on the
schema's definition.
|
codesearchnet
|
def merge(self, other_rel):
if ((other_rel.thresholds.size == self.thresholds.size) and np.all((other_rel.thresholds == self.thresholds))):
self.frequencies += other_rel.frequencies
else:
print('Input table thresholds do not match.')
|
Ingest another DistributedReliability and add its contents to the current object.
Args:
other_rel: a Distributed reliability object.
|
codesearchnet
|
def fetch_woeid(self, location):
rss = self._fetch_xml(WOEID_LOOKUP_URL.format(quote(location)))
try:
woeid = rss.find('results/Result/woeid').text
except AttributeError:
return None
return woeid
|
Fetch a location's corresponding WOEID.
Args:
location: (string) a location (e.g. 23454 or Berlin, Germany).
Returns:
a string containing the location's corresponding WOEID or None if
the WOEID could not be found.
Raises:
urllib.error.URLError: urllib.request could not open the URL
(Python 3).
urllib2.URLError: urllib2 could not open the URL (Python 2).
xml.etree.ElementTree.ParseError: xml.etree.ElementTree failed to
parse the XML document.
|
codesearchnet
|
def dim_reduce_data(data, d):
(genes, cells) = data.shape
distances = np.zeros((cells, cells))
for i in range(cells):
for j in range(cells):
distances[(i, j)] = poisson_dist(data[(:, i)], data[(:, j)])
proximity = (distances ** 2)
J = (np.eye(cells) - (1.0 / cells))
B = ((- 0.5) * np.dot(J, np.dot(proximity, J)))
(e_val, e_vec) = np.linalg.eigh(B)
lam = np.diag(e_val[(- d):])[::(- 1)]
E = e_vec[(:, (- d):)][::(- 1)]
X = np.dot(E, (lam ** 0.5))
return X
|
Does a MDS on the data directly, not on the means.
Args:
data (array): genes x cells
d (int): desired dimensionality
Returns:
X, a cells x d matrix
|
codesearchnet
|
def set_dft_grid(self, radical_points=128, angular_points=302,
grid_type="Lebedev"):
available_lebedev_angular_points = {6, 18, 26, 38, 50, 74, 86, 110, 146,
170, 194, 230, 266, 302, 350, 434,
590, 770, 974, 1202, 1454, 1730,
2030, 2354, 2702, 3074, 3470, 3890,
4334, 4802, 5294}
if grid_type.lower() == "sg-0":
self.params["rem"]["xc_grid"] = 0
elif grid_type.lower() == "sg-1":
self.params["rem"]["xc_grid"] = 1
elif grid_type.lower() == "lebedev":
if angular_points not in available_lebedev_angular_points:
raise ValueError(str(angular_points) + " is not a valid "
"Lebedev angular points number")
self.params["rem"]["xc_grid"] = "{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
elif grid_type.lower() == "gauss-legendre":
self.params["rem"]["xc_grid"] = "-{rp:06d}{ap:06d}".format(
rp=radical_points, ap=angular_points)
else:
raise ValueError("Grid type " + grid_type + " is not supported "
"currently")
|
Set the grid for DFT numerical integrations.
Args:
radical_points: Radical points. (Integer)
angular_points: Angular points. (Integer)
grid_type: The type of of the grid. There are two standard grids:
SG-1 and SG-0. The other two supported grids are "Lebedev" and
"Gauss-Legendre"
|
juraj-google-style
|
def on_epoch_begin(self, epoch, logs=None):
|
Called at the start of an epoch.
Subclasses should override for any actions to run. This function should
only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this
method but that may change in the future.
|
github-repos
|
def dedent(text: str) -> str:
return textwrap.dedent(text).strip()
|
Wrapper around `textwrap.dedent` which also `strip()` the content.
Before:
```python
text = textwrap.dedent(
\"\"\"\\
A(
x=1,
)\"\"\"
)
```
After:
```python
text = epy.dedent(
\"\"\"
A(
x=1,
)
\"\"\"
)
```
Args:
text: The text to dedent
Returns:
The dedented text
|
github-repos
|
def get_desired():
public_members = get_public_members()
if public_members:
members = '\n :members: {}'.format(', '.join(public_members))
else:
members = ''
return DESIRED_TEMPLATE.format(members=members)
|
Populate ``DESIRED_TEMPLATE`` with public members.
If there are no members, does nothing.
Returns:
str: The "desired" contents of ``bezier.rst``.
|
codesearchnet
|
def get_evaluations(self, variant_obj):
query = dict(variant_id=variant_obj['variant_id'])
res = self.acmg_collection.find(query).sort([('created_at', pymongo.DESCENDING)])
return res
|
Return all evaluations for a certain variant.
Args:
variant_obj (dict): variant dict from the database
Returns:
pymongo.cursor: database cursor
|
codesearchnet
|
def _delete_gridfs_data(self, data):
if isinstance(data, ObjectId):
if self._gridfs.exists({'_id': data}):
self._gridfs.delete(data)
else:
raise DataStoreGridfsIdInvalid()
elif isinstance(data, list):
for item in data:
self._delete_gridfs_data(item)
elif isinstance(data, dict):
for (key, item) in data.items():
self._delete_gridfs_data(item)
|
Delete all GridFS data that is linked by fields in the specified data.
Args:
data: The data that is parsed for MongoDB ObjectIDs. The linked GridFs object
for any ObjectID is deleted.
|
codesearchnet
|
def convert_maxpool3(params, w_name, scope_name, inputs, layers, weights, names):
print('Converting pooling ...')
if names == 'short':
tf_name = 'P' + random_string(7)
elif names == 'keep':
tf_name = w_name
else:
tf_name = w_name + str(random.random())
if 'kernel_shape' in params:
height, width, depth = params['kernel_shape']
else:
height, width, depth = params['kernel_size']
if 'strides' in params:
stride_height, stride_width, stride_depth = params['strides']
else:
stride_height, stride_width, stride_depth = params['stride']
if 'pads' in params:
padding_h, padding_w, padding_d, _, _ = params['pads']
else:
padding_h, padding_w, padding_d = params['padding']
input_name = inputs[0]
if padding_h > 0 and padding_w > 0 and padding_d > 0:
padding_name = tf_name + '_pad'
padding_layer = keras.layers.ZeroPadding3D(
padding=(padding_h, padding_w, padding_d),
name=padding_name
)
layers[padding_name] = padding_layer(layers[inputs[0]])
input_name = padding_name
pooling = keras.layers.MaxPooling3D(
pool_size=(height, width, depth),
strides=(stride_height, stride_width, stride_depth),
padding='valid',
name=tf_name
)
layers[scope_name] = pooling(layers[input_name])
|
Convert 3d Max pooling.
Args:
params: dictionary with layer parameters
w_name: name prefix in state_dict
scope_name: pytorch scope name
inputs: pytorch node inputs
layers: dictionary with keras tensors
weights: pytorch state_dict
names: use short names for keras layers
|
juraj-google-style
|
def _add_arg(self, key, value, mask=False):
if (self.lang == 'python'):
self._add_arg_python(key, value, mask)
elif (self.lang == 'java'):
self._add_arg_java(key, value, mask)
|
Add CLI Arg for the correct language.
Args:
key (string): The CLI Args key (e.g., --name).
value (string): The CLI Args value (e.g., bob).
mask (boolean, default:False): Indicates whether no mask value.
|
codesearchnet
|
def van(first_enc, first_frame, current_enc, gt_image, reuse=False, scope_prefix='', hparams=None):
with tf.variable_scope((scope_prefix + 'van'), reuse=reuse):
output_shape = first_frame.get_shape().as_list()
output_shape[0] = (- 1)
first_depth = 64
(f_first_enc, _) = van_enc_2d(first_enc, first_depth)
(f_first_frame, image_enc_history) = van_image_enc_2d(first_frame, first_depth, hparams=hparams)
(f_current_enc, van_higher_level) = van_enc_2d(current_enc, first_depth, reuse=True)
(f_gt_image, _) = van_image_enc_2d(gt_image, first_depth, True, hparams=hparams)
analogy_t = analogy_computation_2d(f_first_enc, f_first_frame, f_current_enc, first_depth)
enc_img = (f_current_enc + analogy_t)
img = van_dec_2d(enc_img, image_enc_history, output_shape, first_depth, hparams=hparams)
batch_size = tf.to_float(tf.shape(first_enc)[0])
r_loss = (tf.nn.l2_loss(((f_gt_image - f_current_enc) - analogy_t)) / batch_size)
return (img, r_loss, van_higher_level)
|
Implements a VAN.
Args:
first_enc: The first encoding.
first_frame: The first ground truth frame.
current_enc: The encoding of the frame to generate.
gt_image: The ground truth image, only used for regularization.
reuse: To reuse in variable scope or not.
scope_prefix: The prefix before the scope name.
hparams: The python hparams.
Returns:
The generated image.
|
codesearchnet
|
def recipe_cm360_report_replicate(config, auth_read, recipe_name, auth_write, account, recipe_slug, report_id, report_name, delete, Aggregate):
drive(config, {'auth': 'user', 'copy': {'source': 'https:
dataset(config, {'auth': auth_write, 'dataset': recipe_slug})
cm_report_replicate(config, {'auth': auth_read, 'report': {'account': account, 'id': report_id, 'name': report_name, 'delete': delete}, 'replicate': {'sheets': {'sheet': recipe_name, 'tab': 'Accounts', 'range': ''}}, 'write': {'bigquery': {'dataset': recipe_slug, 'is_incremental_load': Aggregate}}})
|
Replicate a report across multiple networks and advertisers.
Args:
auth_read (authentication) - Credentials used for reading data.
recipe_name (string) - Sheet to read ids from.
auth_write (authentication) - Credentials used for writing data.
account (integer) - CM network id.
recipe_slug (string) - NA
report_id (integer) - CM template report id, for template
report_name (string) - CM template report name, empty if using id instead.
delete (boolean) - Use only to reset the reports if setup changes.
Aggregate (boolean) - Append report data to existing table, requires Date column.
|
github-repos
|
def __init__(self, fail_silently=False, aws_access_key_id=None,
aws_secret_access_key=None, **kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
access_key_id = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
secret_access_key = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
region_name = getattr(settings, 'AWS_DEFAULT_REGION', 'us-east-1')
access_key_id = getattr(settings, 'AWS_SES_ACCESS_KEY_ID',
access_key_id)
secret_access_key = getattr(settings, 'AWS_SES_SECRET_ACCESS_KEY',
secret_access_key)
region_name = getattr(settings, 'AWS_SES_REGION', region_name)
if aws_access_key_id is not None and aws_secret_access_key is not None:
access_key_id = aws_access_key_id
secret_access_key = aws_secret_access_key
self.conn = boto3.client(
'ses',
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
region_name=region_name,
)
|
Creates a client for the Amazon SES API.
Args:
fail_silently: Flag that determines whether Amazon SES
client errors should throw an exception.
|
juraj-google-style
|
def config_file(self, filename):
if os.path.isfile(filename):
with open(filename, 'r') as fh:
self._config_data = json.load(fh)
else:
self.tcex.log.error('Could not load configuration file "{}".'.format(filename))
|
Load configuration data from provided file and inject values into sys.argv.
Args:
config (str): The configuration file name.
|
juraj-google-style
|
def download(self, folder=None):
url = self.data.get('url', None)
if (not url):
raise HDXError('No URL to download!')
logger.debug(('Downloading %s' % url))
filename = self.data['name']
format = ('.%s' % self.data['format'])
if (format not in filename):
filename = ('%s%s' % (filename, format))
with Download(full_agent=self.configuration.get_user_agent()) as downloader:
path = downloader.download_file(url, folder, filename)
return (url, path)
|
Download resource store to provided folder or temporary folder if no folder supplied
Args:
folder (Optional[str]): Folder to download resource to. Defaults to None.
Returns:
Tuple[str, str]: (URL downloaded, Path to downloaded file)
|
codesearchnet
|
def get(cls, issue_type):
if isinstance(issue_type, str):
obj = getattr(db, cls.__name__).find_one((cls.issue_type == issue_type))
elif isinstance(issue_type, int):
obj = getattr(db, cls.__name__).find_one((cls.issue_type_id == issue_type))
elif isinstance(issue_type, cls):
return issue_type
else:
obj = None
if (not obj):
obj = cls()
obj.issue_type = issue_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj
|
Returns the IssueType object for `issue_type`. If no existing object was found, a new type will
be created in the database and returned
Args:
issue_type (str,int,IssueType): Issue type name, id or class
Returns:
:obj:`IssueType`
|
codesearchnet
|
def acquire(self):
self._fd = open(self._path, mode='w+')
os.chmod(self._path, 432)
fcntl.flock(self._fd, self._op)
|
Acquire the lock
Raises:
IOError: if the call to flock fails
|
codesearchnet
|
def get_user(self, user_id=None, user_name=None):
if user_id:
endpoint = '/api/user_id/{0}'.format(user_id)
elif user_name:
endpoint = '/api/user_name/{0}'.format(user_name)
else:
endpoint = '/api/user'
data = self._make_request(verb="GET", endpoint=endpoint)
try:
return User.NewFromJSON(data)
except:
return data
|
Get a user object from the API. If no ``user_id`` or ``user_name``
is specified, it will return the User object for the currently
authenticated user.
Args:
user_id (int): User ID of the user for whom you want to get
information. [Optional]
user_name(str): Username for the user for whom you want to get
information. [Optional]
Returns:
A User object.
|
juraj-google-style
|
def refresh_db(**kwargs):
salt.utils.pkg.clear_rtag(__opts__)
retcodes = {100: True, 0: None, 1: False}
ret = True
check_update_ = kwargs.pop('check_update', True)
options = _get_options(**kwargs)
clean_cmd = ['--quiet', '--assumeyes', 'clean', 'expire-cache']
clean_cmd.extend(options)
_call_yum(clean_cmd, ignore_retcode=True)
if check_update_:
update_cmd = ['--quiet', '--assumeyes', 'check-update']
if ((__grains__.get('os_family') == 'RedHat') and (__grains__.get('osmajorrelease') == 7)):
update_cmd.append('--setopt=autocheck_running_kernel=false')
update_cmd.extend(options)
ret = retcodes.get(_call_yum(update_cmd, ignore_retcode=True)['retcode'], False)
return ret
|
Check the yum repos for updated packages
Returns:
- ``True``: Updates are available
- ``False``: An error occurred
- ``None``: No updates are available
repo
Refresh just the specified repo
disablerepo
Do not refresh the specified repo
enablerepo
Refresh a disabled repo using this option
branch
Add the specified branch when refreshing
disableexcludes
Disable the excludes defined in your config files. Takes one of three
options:
- ``all`` - disable all excludes
- ``main`` - disable excludes defined in [main] in yum.conf
- ``repoid`` - disable excludes defined for that repo
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
|
codesearchnet
|
def Approve(self, request, global_params=None):
config = self.GetMethodConfig('Approve')
return self._RunMethod(config, request, global_params=global_params)
|
Approves or rejects a pending build. If approved, the returned LRO will be analogous to the LRO returned from a CreateBuild call. If rejected, the returned LRO will be immediately done.
Args:
request: (CloudbuildProjectsBuildsApproveRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
|
github-repos
|
def _wrap_any(cls, self, obj: Any):
if isinstance(obj, expressions.Builder):
return cls.from_fhir_path_builder(obj)
if isinstance(obj, list):
return [cls._wrap_any(self, item) for item in obj]
if isinstance(obj, tuple):
return (cls._wrap_any(self, item) for item in obj)
if isinstance(obj, dict):
return {cls._wrap_any(self, key): cls._wrap_any(self, value) for key, value in obj.items()}
if callable(obj):
return cls._wrap_function(self, obj)
return obj
|
Wraps any object with the logic below.
Args:
self: self instance reference.
obj: any object.
Returns:
If the object is:
- an expressions.Builder: returns this class to wrap it;
- a list: returns a new list with each item in the list wrapped;
- a tuple: returns a new tuple with each item in the list wrapped;
- a dictionary: returns a new dictionary with each key/value pair in
the dictionary wrapped.
- a callable function: returns a wrapper function with the return result
of the function wrapped.
- anything else: returns the object itself.
|
github-repos
|
def from_json_value(cls, json_value: Optional[Any], primitive_cls: Type[message.Message], context: Context) -> 'PrimitiveWrapper':
if isinstance(json_value, (list, tuple)):
raise ValueError('Error, unable to wrap sequence.')
if json_value is None or isinstance(json_value, (dict,)):
return cls(no_value_primitive(primitive_cls), context)
if not isinstance(json_value, cast(Tuple[Type[Any], ...], cls._PARSABLE_TYPES)):
raise fhir_errors.InvalidFhirError(f'Unable to parse JSON. {type(json_value)} is invalid FHIR JSON.')
return cls.from_json_str(str(json_value), primitive_cls, context)
|
Parses json_value into an instance of primitive_cls and wraps.
Args:
json_value: The optional raw json_value to parse and wrap.
primitive_cls: The type of FHIR primitive message to create and validate.
context: Related primitive information to use for printing/parsing a
wrapped primitive.
Returns:
An instance of PrimitiveWrapper.
|
github-repos
|
def children(self, obj, save_type=base.SaveType.CHECKPOINT, **kwargs):
children = {}
for name, ref in self.list_children(obj, **kwargs):
children[name] = ref
return children
|
Returns all child trackables attached to obj.
Args:
obj: A `Trackable` object.
save_type: A string, can be 'savedmodel' or 'checkpoint'.
**kwargs: kwargs to use when retrieving the object's children.
Returns:
Dictionary of all children attached to the object with name to trackable.
|
github-repos
|
def _set_bearer_user_vars(allowed_client_ids, scopes):
all_scopes, sufficient_scopes = _process_scopes(scopes)
try:
authorized_scopes = oauth.get_authorized_scopes(sorted(all_scopes))
except oauth.Error:
_logger.debug('Unable to get authorized scopes.', exc_info=True)
return
if not _are_scopes_sufficient(authorized_scopes, sufficient_scopes):
_logger.warning('Authorized scopes did not satisfy scope requirements.')
return
client_id = oauth.get_client_id(authorized_scopes)
if (list(allowed_client_ids) != SKIP_CLIENT_ID_CHECK and
client_id not in allowed_client_ids):
_logger.warning('Client ID is not allowed: %s', client_id)
return
os.environ[_ENV_USE_OAUTH_SCOPE] = ' '.join(authorized_scopes)
_logger.debug('get_current_user() will return user from matched oauth_user.')
|
Validate the oauth bearer token and set endpoints auth user variables.
If the bearer token is valid, this sets ENDPOINTS_USE_OAUTH_SCOPE. This
provides enough information that our endpoints.get_current_user() function
can get the user.
Args:
allowed_client_ids: List of client IDs that are acceptable.
scopes: List of acceptable scopes.
|
juraj-google-style
|
def create_combination(list_of_sentences):
num_sentences = (len(list_of_sentences) - 1)
combinations = []
for (i, _) in enumerate(list_of_sentences):
if (i == num_sentences):
break
num_pairs = (num_sentences - i)
populated = (num_pairs * [list_of_sentences[i]])
zipped = list(zip(populated, list_of_sentences[(i + 1):]))
combinations += zipped
return combinations
|
Generates all possible pair combinations for the input list of sentences.
For example:
input = ["paraphrase1", "paraphrase2", "paraphrase3"]
output = [("paraphrase1", "paraphrase2"),
("paraphrase1", "paraphrase3"),
("paraphrase2", "paraphrase3")]
Args:
list_of_sentences: the list of input sentences.
Returns:
the list of all possible sentence pairs.
|
codesearchnet
|
def create_index(index_name, index_config, client):
client.create(index=index_name, body=index_config)
|
Creates an index with a given configuration
Args:
index_name (str): Name of the index you want to create
index_config (dict) configuration for the index
client (Elasticsearch.IndicesClient) the Elasticsearch client
|
juraj-google-style
|
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
def _apply_fn(dataset):
return dataset.rejection_resample(class_func=class_func, target_dist=target_dist, initial_dist=initial_dist, seed=seed)
return _apply_fn
|
A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
|
github-repos
|
def _dist_to_opt(self):
dist_to_opt_ops = []
self._grad_norm = tf.sqrt(self._grad_norm_squared)
avg_op = self._moving_averager.apply([self._grad_norm])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
self._d_t = (self._grad_norm_avg / self._grad_norm_squared_avg)
avg_op = self._moving_averager.apply([self._d_t])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._dist_to_opt_avg = tf.identity(self._moving_averager.average(self._d_t))
if self._sparsity_debias:
self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
return dist_to_opt_ops
|
Distance to optimum.
Returns:
D_t ops
|
codesearchnet
|
def explore(config, mutations, resample_probability, custom_explore_fn):
new_config = copy.deepcopy(config)
for (key, distribution) in mutations.items():
if isinstance(distribution, dict):
new_config.update({key: explore(config[key], mutations[key], resample_probability, None)})
elif isinstance(distribution, list):
if ((random.random() < resample_probability) or (config[key] not in distribution)):
new_config[key] = random.choice(distribution)
elif (random.random() > 0.5):
new_config[key] = distribution[max(0, (distribution.index(config[key]) - 1))]
else:
new_config[key] = distribution[min((len(distribution) - 1), (distribution.index(config[key]) + 1))]
else:
if (random.random() < resample_probability):
new_config[key] = distribution()
elif (random.random() > 0.5):
new_config[key] = (config[key] * 1.2)
else:
new_config[key] = (config[key] * 0.8)
if (type(config[key]) is int):
new_config[key] = int(new_config[key])
if custom_explore_fn:
new_config = custom_explore_fn(new_config)
assert (new_config is not None), 'Custom explore fn failed to return new config'
logger.info('[explore] perturbed config from {} -> {}'.format(config, new_config))
return new_config
|
Return a config perturbed as specified.
Args:
config (dict): Original hyperparameter configuration.
mutations (dict): Specification of mutations to perform as documented
in the PopulationBasedTraining scheduler.
resample_probability (float): Probability of allowing resampling of a
particular variable.
custom_explore_fn (func): Custom explore fn applied after built-in
config perturbations are.
|
codesearchnet
|
def fmt_partition(partition):
if not partition:
return ''
parts = [fmt_part(part, partition.node_labels).split('\n')
for part in partition]
times = (' ',
' {} '.format(MULTIPLY),
' ')
breaks = ('\n', '\n', '')
between = [times] * (len(parts) - 1) + [breaks]
elements = chain.from_iterable(zip(parts, between))
return ''.join(chain.from_iterable(zip(*elements)))
|
Format a |Bipartition|.
The returned string looks like::
0,1 ∅
─── ✕ ───
2 0,1
Args:
partition (Bipartition): The partition in question.
Returns:
str: A human-readable string representation of the partition.
|
juraj-google-style
|
def get_cases(variant_source, case_lines=None, case_type='ped', variant_type='snv', variant_mode='vcf'):
individuals = get_individuals(variant_source=variant_source, case_lines=case_lines, case_type=case_type, variant_mode=variant_mode)
case_objs = []
case_ids = set()
compressed = False
tabix_index = False
if variant_source.endswith('.gz'):
logger.debug('Found compressed variant source')
compressed = True
tabix_file = '.'.join([variant_source, 'tbi'])
if os.path.exists(tabix_file):
logger.debug('Found index file')
tabix_index = True
if (len(individuals) > 0):
for individual in individuals:
case_ids.add(individual.case_id)
else:
case_ids = [os.path.basename(variant_source)]
for case_id in case_ids:
logger.info('Found case {0}'.format(case_id))
case = Case(case_id=case_id, name=case_id, variant_source=variant_source, variant_type=variant_type, variant_mode=variant_mode, compressed=compressed, tabix_index=tabix_index)
for individual in individuals:
if (individual.case_id == case_id):
logger.info('Adding ind {0} to case {1}'.format(individual.name, individual.case_id))
case.add_individual(individual)
case_objs.append(case)
return case_objs
|
Create a cases and populate it with individuals
Args:
variant_source (str): Path to vcf files
case_lines (Iterable): Ped like lines
case_type (str): Format of case lines
Returns:
case_objs (list(puzzle.models.Case))
|
codesearchnet
|
def _batch(self, batch_size) -> TypeSpec:
raise NotImplementedError(f'{type(self).__name__}._batch')
|
Returns a TypeSpec representing a batch of objects with this TypeSpec.
Args:
batch_size: An `int` representing the number of elements in a batch, or
`None` if the batch size may vary.
Returns:
A `TypeSpec` representing a batch of objects with this TypeSpec.
|
github-repos
|
def _get_events_list(object_key: str) -> List[str]:
return DB.get_list(_keys.events_list(object_key))
|
Get list of event ids for the object with the specified key.
Args:
object_key (str): Key of an object in the database.
|
juraj-google-style
|
def from_dlpack(dlcapsule):
context.context().ensure_initialized()
return pywrap_tfe.TFE_FromDlpackCapsule(dlcapsule, context.context()._handle)
|
Returns the Tensorflow eager tensor.
The returned tensor uses the memory shared by dlpack capsules from other
framework.
```python
a = tf.experimental.dlpack.from_dlpack(dlcapsule)
# `a` uses the memory shared by dlpack
```
Args:
dlcapsule: A PyCapsule named as dltensor
Returns:
A Tensorflow eager tensor
|
github-repos
|
def rand_ascii_str(length):
letters = [random.choice(ascii_letters_and_digits) for _ in range(length)]
return ''.join(letters)
|
Generates a random string of specified length, composed of ascii letters
and digits.
Args:
length: The number of characters in the string.
Returns:
The random string generated.
|
juraj-google-style
|
def hash_file(fpath, algorithm='sha256', chunk_size=65535):
if isinstance(algorithm, str):
hasher = resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
|
Calculates a file sha256 or md5 hash.
Example:
>>> hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
Args:
fpath: Path to the file being validated.
algorithm: Hash algorithm, one of `"auto"`, `"sha256"`, or `"md5"`.
The default `"auto"` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash.
|
github-repos
|
def split_leading_dim(tensor, inputs, n_dims=2):
input_shape_static = inputs.get_shape()
input_shape_list = input_shape_static.as_list()
tensor_shape_static = tensor.get_shape()
tensor_shape_list = tensor_shape_static.as_list()
if (input_shape_static.is_fully_defined() and tensor_shape_static.is_fully_defined()):
new_shape = (input_shape_list[:n_dims] + tensor_shape_list[1:])
return tf.reshape(tensor, new_shape)
dims_after_first = tf.shape(tensor)[1:]
split_sizes = tf.shape(inputs)[:n_dims]
known_split_sizes = input_shape_list[:n_dims]
known_dims_after_first = tensor_shape_list[1:]
output_size = tf.concat([split_sizes, dims_after_first], 0)
result = tf.reshape(tensor, output_size)
result.set_shape((known_split_sizes + known_dims_after_first))
return result
|
Split the first dimension of a tensor.
Args:
tensor: Tensor to have its first dimension split.
inputs: Original reference input to look the dimensions of.
n_dims: Number of dimensions to split.
Returns:
The input tensor, with its first dimension split.
|
codesearchnet
|
def easeOutElastic(n, amplitude=1, period=0.3):
_checkRange(n)
if amplitude < 1:
amplitude = 1
s = period / 4
else:
s = period / (2 * math.pi) * math.asin(1 / amplitude)
return amplitude * 2**(-10*n) * math.sin((n-s)*(2*math.pi / period)) + 1
|
An elastic tween function that overshoots the destination and then "rubber bands" into the destination.
Args:
n (float): The time progress, starting at 0.0 and ending at 1.0.
Returns:
(float) The line progress, starting at 0.0 and ending at 1.0. Suitable for passing to getPointOnLine().
|
juraj-google-style
|
def swo_flush(self, num_bytes=None):
if (num_bytes is None):
num_bytes = self.swo_num_bytes()
buf = ctypes.c_uint32(num_bytes)
res = self._dll.JLINKARM_SWO_Control(enums.JLinkSWOCommands.FLUSH, ctypes.byref(buf))
if (res < 0):
raise errors.JLinkException(res)
return None
|
Flushes data from the SWO buffer.
After this method is called, the flushed part of the SWO buffer is
empty.
If ``num_bytes`` is not present, flushes all data currently in the SWO
buffer.
Args:
self (JLink): the ``JLink`` instance
num_bytes (int): the number of bytes to flush
Returns:
``None``
Raises:
JLinkException: on error
|
codesearchnet
|
def confirm_iam_role(self, account):
try:
iam = self.session.client('iam')
rolearn = iam.get_role(RoleName=self.role_name)['Role']['Arn']
return rolearn
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
self.create_iam_role(account)
else:
raise
except Exception as e:
self.log.exception('Failed validating IAM role for VPC Flow Log Auditing for {}'.format(e))
|
Return the ARN of the IAM Role on the provided account as a string. Returns an `IAMRole` object from boto3
Args:
account (:obj:`Account`): Account where to locate the role
Returns:
:obj:`IAMRole`
|
juraj-google-style
|
def add_text(self, coords, text, color=(0, 0, 0)):
source = vtk.vtkVectorText()
source.SetText(text)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
follower = vtk.vtkFollower()
follower.SetMapper(mapper)
follower.GetProperty().SetColor(color)
follower.SetPosition(coords)
follower.SetScale(0.5)
self.ren.AddActor(follower)
follower.SetCamera(self.ren.GetActiveCamera())
|
Add text at a coordinate.
Args:
coords: Coordinates to add text at.
text: Text to place.
color: Color for text as RGB. Defaults to black.
|
juraj-google-style
|
def SetActiveBreakpoints(self, breakpoints_data):
with self._lock:
ids = set([x['id'] for x in breakpoints_data])
for breakpoint_id in (six.viewkeys(self._active) - ids):
self._active.pop(breakpoint_id).Clear()
self._active.update([(x['id'], python_breakpoint.PythonBreakpoint(x, self._hub_client, self, self.data_visibility_policy)) for x in breakpoints_data if (x['id'] in ((ids - six.viewkeys(self._active)) - self._completed))])
self._completed &= ids
if self._active:
self._next_expiration = datetime.min
else:
self._next_expiration = datetime.max
|
Adds new breakpoints and removes missing ones.
Args:
breakpoints_data: updated list of active breakpoints.
|
codesearchnet
|
def get_module_class_from_name(module, name):
modules_children = list(module.children())
if module.__class__.__name__ == name:
return module.__class__
elif len(modules_children) == 0:
return
else:
for child_module in modules_children:
module_class = get_module_class_from_name(child_module, name)
if module_class is not None:
return module_class
|
Gets a class from a module by its name.
Args:
module (`torch.nn.Module`): The module to get the class from.
name (`str`): The name of the class.
|
github-repos
|
def _AddPropertiesForField(field, cls):
assert _FieldDescriptor.MAX_CPPTYPE == 10
constant_name = field.name.upper() + "_FIELD_NUMBER"
setattr(cls, constant_name, field.number)
if field.label == _FieldDescriptor.LABEL_REPEATED:
_AddPropertiesForRepeatedField(field, cls)
elif field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
_AddPropertiesForNonRepeatedCompositeField(field, cls)
else:
_AddPropertiesForNonRepeatedScalarField(field, cls)
|
Adds a public property for a protocol message field.
Clients can use this property to get and (in the case
of non-repeated scalar fields) directly set the value
of a protocol message field.
Args:
field: A FieldDescriptor for this field.
cls: The class we're constructing.
|
juraj-google-style
|
def doc2id(self, doc):
doc = map(self.process_token, doc)
return [self.token_to_id(token) for token in doc]
|
Get the list of token_id given doc.
Args:
doc (list): document.
Returns:
list: int id of doc.
|
juraj-google-style
|
def from_path(cls, path):
stat_res = os.stat(path)
return cls.from_int(stat.S_IMODE(stat_res.st_mode))
|
Make a new :class:`FilePerms` object based on the permissions
assigned to the file or directory at *path*.
Args:
path (str): Filesystem path of the target file.
>>> from os.path import expanduser
>>> 'r' in FilePerms.from_path(expanduser('~')).user # probably
True
|
juraj-google-style
|
def saveplot(fig, *name_args, close=True, **name_kwargs):
oname = out_name(*name_args, **name_kwargs)
fig.savefig('{}.{}'.format(oname, conf.plot.format),
format=conf.plot.format, bbox_inches='tight')
if close:
plt.close(fig)
|
Save matplotlib figure.
You need to provide :data:`stem` as a positional or keyword argument (see
:func:`out_name`).
Args:
fig (:class:`matplotlib.figure.Figure`): matplotlib figure.
close (bool): whether to close the figure.
name_args: positional arguments passed on to :func:`out_name`.
name_kwargs: keyword arguments passed on to :func:`out_name`.
|
juraj-google-style
|
def _add_tags(self, tags):
alltagsadded = True
for tag in tags:
if not self._add_tag(tag):
alltagsadded = False
return alltagsadded
|
Add a list of tag
Args:
tags (List[str]): list of tags to add
Returns:
bool: True if all tags added or False if any already present.
|
juraj-google-style
|
def numeric_task_id(task_id):
if (task_id is not None):
if task_id.startswith('task-'):
return int(task_id[len('task-'):])
else:
return int(task_id)
|
Converts a task-id to the numeric task-id.
Args:
task_id: task-id in either task-n or n format
Returns:
n
|
codesearchnet
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.