Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
373,400
|
def strftime(self, fmt):
timetuple = (1900, 1, 1,
self._hour, self._minute, self._second,
0, 1, -1)
return _wrap_strftime(self, fmt, timetuple)
|
Format using strftime(). The date part of the timestamp passed
to underlying strftime should not be used.
|
373,401
|
def addMonitor(self, monitorFriendlyName, monitorURL):
url = self.baseUrl
url += "newMonitor?apiKey=%s" % self.apiKey
url += "&monitorFriendlyName=%s" % monitorFriendlyName
url += "&monitorURL=%s&monitorType=1" % monitorURL
url += "&monitorAlertContacts=%s" % monitorAlertContacts
url += "&noJsonCallback=1&format=json"
success, response = self.requestApi(url)
if success:
return True
else:
return False
|
Returns True if Monitor was added, otherwise False.
|
373,402
|
def _wait_for_handles(handles, timeout=-1):
arrtype = HANDLE * len(handles)
handle_array = arrtype(*handles)
ret = windll.kernel32.WaitForMultipleObjects(
len(handle_array), handle_array, BOOL(False), DWORD(timeout))
if ret == WAIT_TIMEOUT:
return None
else:
h = handle_array[ret]
return h
|
Waits for multiple handles. (Similar to 'select') Returns the handle which is ready.
Returns `None` on timeout.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms687025(v=vs.85).aspx
|
373,403
|
def add_row(self, row):
if self._field_names and len(row) != len(self._field_names):
raise Exception("Row has incorrect number of values, (actual) %d!=%d (expected)" %(len(row),len(self._field_names)))
self._rows.append(list(row))
|
Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields
|
373,404
|
def callback(self, callback, *args, **kwds):
return self << _CloseDummy(callback, args, kwds)
|
Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
|
373,405
|
def _fix_syscall_ip(state):
try:
bypass = o.BYPASS_UNSUPPORTED_SYSCALL in state.options
stub = state.project.simos.syscall(state, allow_unsupported=bypass)
if stub:
state.ip = stub.addr
except AngrUnsupportedSyscallError:
pass
|
Resolve syscall information from the state, get the IP address of the syscall SimProcedure, and set the IP of
the state accordingly. Don't do anything if the resolution fails.
:param SimState state: the program state.
:return: None
|
373,406
|
def enforce_versioning(force=False):
connect_str, repo_url = get_version_data()
LOG.warning("Your database uses an unversioned benchbuild schema.")
if not force and not ui.ask(
"Should I enforce version control on your schema?"):
LOG.error("User declined schema versioning.")
return None
repo_version = migrate.version(repo_url, url=connect_str)
migrate.version_control(connect_str, repo_url, version=repo_version)
return repo_version
|
Install versioning on the db.
|
373,407
|
def configureLastWill(self, topic, payload, QoS):
self._AWSIoTMQTTClient.configureLastWill(topic, payload, QoS)
|
**Description**
Used to configure the last will topic, payload and QoS of the client. Should be called before connect. This is a public
facing API inherited by application level public clients.
**Syntax**
.. code:: python
myShadowClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0)
myJobsClient.configureLastWill("last/Will/Topic", "lastWillPayload", 0)
**Parameters**
*topic* - Topic name that last will publishes to.
*payload* - Payload to publish for last will.
*QoS* - Quality of Service. Could be 0 or 1.
**Returns**
None
|
373,408
|
def validate_member_id_params_for_group_type(group_type, params, member_group_ids, member_entity_ids):
if group_type == :
if member_entity_ids is not None:
logger.warning("InvalidRequest: member entities canmember_entity_idsexternalt be set for external groups; ignoring member_group_ids argument.")
else:
params[] = member_group_ids
return params
|
Determine whether member ID parameters can be sent with a group create / update request.
These parameters are only allowed for the internal group type. If they're set for an external group type, Vault
returns a "error" response.
:param group_type: Type of the group, internal or external
:type group_type: str | unicode
:param params: Params dict to conditionally add the member entity/group ID's to.
:type params: dict
:param member_group_ids: Group IDs to be assigned as group members.
:type member_group_ids: str | unicode
:param member_entity_ids: Entity IDs to be assigned as group members.
:type member_entity_ids: str | unicode
:return: Params dict with conditionally added member entity/group ID's.
:rtype: dict
|
373,409
|
def copy_logstore(from_client, from_project, from_logstore, to_logstore, to_project=None, to_client=None):
if to_project is not None:
to_client = to_client or from_client
ret = from_client.get_project(from_project)
try:
ret = to_client.create_project(to_project, ret.get_description())
except LogException as ex:
if ex.get_error_code() == :
ret = to_client.update_logstore(to_project, to_logstore,
ttl=ret.get_ttl(),
enable_tracking=ret.get_enable_tracking(),
append_meta=ret.append_meta,
auto_split=ret.auto_split,
max_split_shard=ret.max_split_shard,
preserve_storage=ret.preserve_storage
)
res = arrange_shard(to_client, to_project, to_logstore, min(expected_rwshard_count, MAX_INIT_SHARD_COUNT))
else:
raise
try:
ret = from_client.get_index_config(from_project, from_logstore)
ret = to_client.create_index(to_project, to_logstore, ret.get_index_config())
except LogException as ex:
if ex.get_error_code() == :
pass
elif ex.get_error_code() == :
ret = to_client.update_index(to_project, to_logstore, ret.get_index_config())
pass
else:
raise
default_fetch_size = 100
offset, size = 0, default_fetch_size
while True:
ret = from_client.list_logtail_config(from_project, offset=offset, size=size)
count = ret.get_configs_count()
total = ret.get_configs_total()
for config_name in ret.get_configs():
ret = from_client.get_logtail_config(from_project, config_name)
config = ret.logtail_config
if config.logstore_name != from_logstore:
continue
config.config_name = to_logstore + + config_name
config.logstore_name = to_logstore
ret = to_client.create_logtail_config(to_project, config)
offset += count
if count < size or offset >= total:
break
|
copy logstore, index, logtail config to target logstore, machine group are not included yet.
the target logstore will be crated if not existing
:type from_client: LogClient
:param from_client: logclient instance
:type from_project: string
:param from_project: project name
:type from_logstore: string
:param from_logstore: logstore name
:type to_logstore: string
:param to_logstore: target logstore name
:type to_project: string
:param to_project: project name, copy to same project if not being specified, will try to create it if not being specified
:type to_client: LogClient
:param to_client: logclient instance, use it to operate on the "to_project" if being specified
:return:
|
373,410
|
def point_translate(point_in, vector_in):
try:
if point_in is None or len(point_in) == 0 or vector_in is None or len(vector_in) == 0:
raise ValueError("Input arguments cannot be empty")
except TypeError as e:
print("An error occurred: {}".format(e.args[-1]))
raise TypeError("Input must be a list or tuple")
except Exception:
raise
point_out = [coord + comp for coord, comp in zip(point_in, vector_in)]
return point_out
|
Translates the input points using the input vector.
:param point_in: input point
:type point_in: list, tuple
:param vector_in: input vector
:type vector_in: list, tuple
:return: translated point
:rtype: list
|
373,411
|
def create_index(self, index):
index._path = os.path.join(self.indexes_path, index._name)
if whoosh.index.exists_in(index._path):
_whoosh = whoosh.index.open_dir(index._path)
elif not os.path.exists(index._path):
os.makedirs(index._path)
_whoosh = whoosh.index.create_in(index._path, index._schema)
index._whoosh = _whoosh
|
Creates and opens index folder for given index.
If the index already exists, it just opens it, otherwise it creates it first.
|
373,412
|
def relabel(self, qubits: Qubits) -> :
return State(self.vec.tensor, qubits, self._memory)
|
Return a copy of this state with new qubits
|
373,413
|
def get_variant_genotypes(self, variant):
if not self.has_index:
raise NotImplementedError("Not implemented when IMPUTE2 file is "
"not indexed (see genipe)")
try:
impute2_chrom = CHROM_STR_TO_INT[variant.chrom.name]
except KeyError:
raise ValueError(
"Invalid chromosome () for IMPUTE2.".format(variant.chrom)
)
variant_info = self._impute2_index[
(self._impute2_index.chrom == impute2_chrom) &
(self._impute2_index.pos == variant.pos)
]
if variant_info.shape[0] == 0:
logging.variant_not_found(variant)
return []
elif variant_info.shape[0] == 1:
return self._get_biallelic_variant(variant, variant_info)
else:
return self._get_multialleic_variant(variant, variant_info)
|
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
|
373,414
|
def _residual(self, x, in_filter, out_filter, stride,
activate_before_residual=False):
if activate_before_residual:
with tf.variable_scope():
x = self._layer_norm(, x)
x = self._relu(x, self.hps.relu_leakiness)
orig_x = x
else:
with tf.variable_scope():
orig_x = x
x = self._layer_norm(, x)
x = self._relu(x, self.hps.relu_leakiness)
with tf.variable_scope():
x = self._conv(, x, 3, in_filter, out_filter, stride)
with tf.variable_scope():
x = self._layer_norm(, x)
x = self._relu(x, self.hps.relu_leakiness)
x = self._conv(, x, 3, out_filter, out_filter, [1, 1, 1, 1])
with tf.variable_scope():
if in_filter != out_filter:
orig_x = tf.nn.avg_pool(orig_x, stride, stride, )
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter - in_filter) // 2,
(out_filter - in_filter) // 2]])
x += orig_x
return x
|
Residual unit with 2 sub layers.
|
373,415
|
def get_file(fn):
fn = os.path.join(os.path.dirname(__file__), , fn)
f = open(fn, )
lines = [line.decode().strip() for line in f.readlines()]
return lines
|
Returns file contents in unicode as list.
|
373,416
|
def catalogAdd(type, orig, replace):
ret = libxml2mod.xmlCatalogAdd(type, orig, replace)
return ret
|
Add an entry in the catalog, it may overwrite existing but
different entries. If called before any other catalog
routine, allows to override the default shared catalog put
in place by xmlInitializeCatalog();
|
373,417
|
def radial_density(im, bins=10, voxel_size=1):
r
if im.dtype == bool:
im = spim.distance_transform_edt(im)
mask = find_dt_artifacts(im) == 0
im[mask] = 0
x = im[im > 0].flatten()
h = sp.histogram(x, bins=bins, density=True)
h = _parse_histogram(h=h, voxel_size=voxel_size)
rdf = namedtuple(,
(, , , , ,
))
return rdf(h.bin_centers, h.pdf, h.cdf, h.bin_centers, h.bin_edges,
h.bin_widths)
|
r"""
Computes radial density function by analyzing the histogram of voxel
values in the distance transform. This function is defined by
Torquato [1] as:
.. math::
\int_0^\infty P(r)dr = 1.0
where *P(r)dr* is the probability of finding a voxel at a lying at a radial
distance between *r* and *dr* from the solid interface. This is equivalent
to a probability density function (*pdf*)
The cumulative distribution is defined as:
.. math::
F(r) = \int_r^\infty P(r)dr
which gives the fraction of pore-space with a radius larger than *r*. This
is equivalent to the cumulative distribution function (*cdf*).
Parameters
----------
im : ND-array
Either a binary image of the pore space with ``True`` indicating the
pore phase (or phase of interest), or a pre-calculated distance
transform which can save time.
bins : int or array_like
This number of bins (if int) or the location of the bins (if array).
This argument is passed directly to Scipy's ``histogram`` function so
see that docstring for more information. The default is 10 bins, which
reduces produces a relatively smooth distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several 1D arrays:
*R* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
This function should not be taken as a pore size distribution in the
explict sense, but rather an indicator of the sizes in the image. The
distance transform contains a very skewed number of voxels with small
values near the solid walls. Nonetheless, it does provide a useful
indicator and it's mathematical formalism is handy.
Torquato refers to this as the *pore-size density function*, and mentions
that it is also known as the *pore-size distribution function*. These
terms are avoided here since they have specific connotations in porous
media analysis.
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 48 & 292
|
373,418
|
def start(self):
if not self.valid:
err = ("\nMessengers and listeners that still need set:\n\n"
"messengers : %s\n\n"
"listeners : %s\n")
raise InvalidApplication(err % (self.needed_messengers,
self.needed_listeners))
self.dispatcher.start()
|
If we have a set of plugins that provide our expected listeners and
messengers, tell our dispatcher to start up. Otherwise, raise
InvalidApplication
|
373,419
|
def to_output(self, value):
return json.loads(resolwe_runtime_utils.save_file(self.name, value.path, *value.refs))
|
Convert value to process output format.
|
373,420
|
def save(self, path, name, save_meta=True):
decdiscdec_optdisc_optmeta
_save_model(self.dec, str(path), "%s_dec" % str(name))
_save_model(self.disc, str(path), "%s_disc" % str(name))
_save_model(self.dec_opt, str(path), "%s_dec_opt" % str(name))
_save_model(self.disc_opt, str(path), "%s_disc_opt" % str(name))
if save_meta:
self._save_meta(os.path.join(path, "%s_meta" % str(name)))
|
Saves model as a sequence of files in the format:
{path}/{name}_{'dec', 'disc', 'dec_opt',
'disc_opt', 'meta'}.h5
Parameters
----------
path : str
The directory of the file you wish to save the model to.
name : str
The name prefix of the model and optimizer files you wish
to save.
save_meta [optional] : bool
Flag that controls whether to save the class metadata along with
the generator, discriminator, and respective optimizer states.
|
373,421
|
def by_coordinates(self,
lat,
lng,
radius=25.0,
zipcode_type=ZipcodeType.Standard,
sort_by=SORT_BY_DIST,
ascending=True,
returns=DEFAULT_LIMIT):
return self.query(
lat=lat, lng=lng, radius=radius,
sort_by=sort_by, zipcode_type=zipcode_type,
ascending=ascending, returns=returns,
)
|
Search zipcode information near a coordinates on a map.
Returns multiple results.
:param lat: center latitude.
:param lng: center longitude.
:param radius: only returns zipcode within X miles from ``lat``, ``lng``.
**中文文档**
1. 计算出在中心坐标处, 每一经度和纬度分别代表多少miles.
2. 以给定坐标为中心, 画出一个矩形, 长宽分别为半径的2倍多一点, 找到该
矩形内所有的Zipcode.
3. 对这些Zipcode计算出他们的距离, 然后按照距离远近排序。距离超过我们
限定的半径的直接丢弃.
|
373,422
|
def render_next_step(self, form, **kwargs):
next_step = self.get_next_step()
self.storage.current_step = next_step
return redirect(self.url_name, step=next_step)
|
When using the NamedUrlFormWizard, we have to redirect to update the
browser's URL to match the shown step.
|
373,423
|
def cli_execute(self, cmd):
try:
args = parse_quotes(cmd)
if args and args[0] == :
self.config.set_feedback()
self.user_feedback = False
azure_folder = get_config_dir()
if not os.path.exists(azure_folder):
os.makedirs(azure_folder)
ACCOUNT.load(os.path.join(azure_folder, ))
CONFIG.load(os.path.join(azure_folder, ))
SESSION.load(os.path.join(azure_folder, ), max_age=3600)
invocation = self.cli_ctx.invocation_cls(cli_ctx=self.cli_ctx,
parser_cls=self.cli_ctx.parser_cls,
commands_loader_cls=self.cli_ctx.commands_loader_cls,
help_cls=self.cli_ctx.help_cls)
if in args:
args.remove()
execute_args = [args]
thread = Thread(target=invocation.execute, args=execute_args)
thread.daemon = True
thread.start()
self.threads.append(thread)
self.curr_thread = thread
progress_args = [self]
thread = Thread(target=progress_view, args=progress_args)
thread.daemon = True
thread.start()
self.threads.append(thread)
result = None
else:
result = invocation.execute(args)
self.last_exit = 0
if result and result.result is not None:
if self.output:
self.output.write(result)
self.output.flush()
else:
formatter = self.cli_ctx.output.get_formatter(self.cli_ctx.invocation.data[])
self.cli_ctx.output.out(result, formatter=formatter, out_file=sys.stdout)
self.last = result
except Exception as ex:
self.last_exit = handle_exception(ex)
except SystemExit as ex:
self.last_exit = int(ex.code)
|
sends the command to the CLI to be executed
|
373,424
|
def append_docstring_attributes(docstring, locals):
docstring = docstring or
for attr, val in locals.items():
doc = val.__doc__
if not doc:
continue
doc = get_minimum_indent(doc) + doc
lines = [ + l for l in textwrap.dedent(doc).splitlines()]
docstring = append_docstring(
docstring,
,
+ attr,
,
*lines
)
return docstring
|
Manually appends class' ``docstring`` with its attribute docstrings.
For example::
class Entity(object):
# ...
__doc__ = append_docstring_attributes(
__doc__,
dict((k, v) for k, v in locals()
if isinstance(v, MyDescriptor))
)
:param docstring: class docstring to be appended
:type docstring: :class:`str`
:param locals: attributes dict
:type locals: :class:`~typing.Mapping`\ [:class:`str`, :class:`object`]
:returns: appended docstring
:rtype: :class:`str`
|
373,425
|
def delete(self):
if self.tobj:
if not self._merged:
pycard.itot_del(self.tobj)
self.tobj = None
self.lits = []
self.ubound = 0
self.top_id = 0
self.cnf = CNF()
self.rhs = []
self.nof_new = 0
|
Destroys a previously constructed :class:`ITotalizer` object.
Internal variables ``self.cnf`` and ``self.rhs`` get cleaned.
|
373,426
|
def get_index_text(self, modname, name_cls):
if self.objtype.endswith():
if not modname:
return _() % \
(name_cls[0], self.chpl_type_name)
return _() % (name_cls[0], modname)
elif self.objtype in (, , ):
if not modname:
type_name = self.objtype
if type_name == :
type_name =
return _() % (name_cls[0], type_name)
return _() % (name_cls[0], modname)
else:
return
|
Return text for index entry based on object type.
|
373,427
|
def tranz(parser, token, is_transchoice=False):
tokens = token.split_contents()
id = tokens[1]
number = domain = locale = None
parameters = {}
if len(tokens) > 2:
skip_idx = None
for idx, token in enumerate(tokens[2:], start=2):
if idx == skip_idx:
skip_idx = None
continue
if "=" in token:
k, v = token[0:token.index()], token[token.index() + 1:]
parameters[k] = v
elif token == "number":
number = tokens[idx + 1]
skip_idx = idx + 1
elif token == "from":
domain = tokens[idx + 1]
skip_idx = idx + 1
elif token == "into":
locale = tokens[idx + 1]
skip_idx = idx + 1
else:
raise TemplateSyntaxError(
"Unexpected token {0} in tag tranz".format(token))
if is_transchoice and number is None:
raise TemplateSyntaxError(
"number parameter expected in tag {tag_name}")
return TranzNode(
id,
parameters,
domain,
locale,
number,
is_transchoice=is_transchoice)
|
Templatetagish wrapper for Translator.trans()
:param parser:
:param token:
:param is_transchoice:
:return:
|
373,428
|
def build_output_map(protomap, get_tensor_by_name):
def get_output_from_tensor_info(tensor_info):
encoding = tensor_info.WhichOneof("encoding")
if encoding == "name":
return get_tensor_by_name(tensor_info.name)
elif encoding == "coo_sparse":
return tf.SparseTensor(
get_tensor_by_name(tensor_info.coo_sparse.indices_tensor_name),
get_tensor_by_name(tensor_info.coo_sparse.values_tensor_name),
get_tensor_by_name(tensor_info.coo_sparse.dense_shape_tensor_name))
else:
raise ValueError("Invalid TensorInfo.encoding: %s" % encoding)
return {
key: get_output_from_tensor_info(tensor_info)
for key, tensor_info in protomap.items()
}
|
Builds a map of tensors from `protomap` using `get_tensor_by_name`.
Args:
protomap: A proto map<string,TensorInfo>.
get_tensor_by_name: A lambda that receives a tensor name and returns a
Tensor instance.
Returns:
A map from string to Tensor or SparseTensor instances built from `protomap`
and resolving tensors using `get_tensor_by_name()`.
Raises:
ValueError: if a TensorInfo proto is malformed.
|
373,429
|
def combine_intersections(
intersections, nodes1, degree1, nodes2, degree2, all_types
):
r
if intersections:
return basic_interior_combine(intersections)
elif all_types:
return tangent_only_intersections(all_types)
else:
return no_intersections(nodes1, degree1, nodes2, degree2)
|
r"""Combine curve-curve intersections into curved polygon(s).
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
Does so assuming each intersection lies on an edge of one of
two :class:`.Surface`-s.
.. note ::
This assumes that each ``intersection`` has been classified via
:func:`classify_intersection` and only the intersections classified
as ``FIRST`` and ``SECOND`` were kept.
Args:
intersections (List[.Intersection]): Intersections from each of the
9 edge-edge pairs from a surface-surface pairing.
nodes1 (numpy.ndarray): The nodes defining the first surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree1 (int): The degree of the surface given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree2 (int): The degree of the surface given by ``nodes2``.
all_types (Set[.IntersectionClassification]): The set of all
intersection classifications encountered among the intersections
for the given surface-surface pair.
Returns:
Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of
* List of "edge info" lists. Each list represents a curved polygon
and contains 3-tuples of edge index, start and end (see the
output of :func:`ends_to_curve`).
* "Contained" boolean. If not :data:`None`, indicates
that one of the surfaces is contained in the other.
|
373,430
|
def generate_key(self, force=False):
if self.key and not force:
logger.warning(
)
return False
with NamedTemporaryFile(suffix=) as file_:
crypto.create_key(file_)
self.key = File(file_, name=.format(uuid.uuid4().hex))
self.save()
return True
|
Creates a key file for this TaxPayer
Creates a key file for this TaxPayer if it does not have one, and
immediately saves it.
Returns True if and only if a key was created.
|
373,431
|
def AREA(a, b):
return cmp(b[0] * b[1], a[0] * a[1]) or cmp(b[1], a[1]) or cmp(b[0], a[0])
|
area: Sort pack by area
|
373,432
|
def _destroy(self):
self.unuse_region()
if self._rlist is not None:
try:
if len(self._rlist) == 0:
self._manager._fdict.pop(self._rlist.path_or_fd())
except (TypeError, KeyError):
pass
|
Destruction code to decrement counters
|
373,433
|
def recreate_grams(self):
session = self.Session()
for document in session.query(Document).all():
logger.info(document.text)
grams = self._get_grams(session, document.text, make=True)
document.grams = list(grams)
broken_links = session.query(Gram) \
.filter(~Gram.documents.any()).all()
for gram in broken_links:
session.delete(gram)
session.commit()
|
Re-create grams for database.
In normal situations, you never need to call this method.
But after migrate DB, this method is useful.
:param session: DB session
:type session: :class:`sqlalchemt.orm.Session`
|
373,434
|
def create_multiple_replace_func(*args, **kwds):
adict = dict(*args, **kwds)
rx = re.compile(.join(map(re.escape, adict)))
def one_xlat(match):
return adict[match.group(0)]
def xlat(text):
return rx.sub(one_xlat, text)
return xlat
|
You can call this function and pass it a dictionary, or any other
combination of arguments you could pass to built-in dict in order to
construct a dictionary. The function will return a xlat closure that
takes as its only argument text the string on which the substitutions
are desired and returns a copy of text with all the substitutions
performed.
Source: Python Cookbook 2nd ed, Chapter 1.18. Replacing Multiple Patterns
in a Single Pass.
https://www.safaribooksonline.com/library/view/python-cookbook-2nd/0596007973/ch01s19.html
|
373,435
|
def init_from_wave_file(wavpath):
try:
samplerate, data = SW.read(wavpath)
nframes = data.shape[0]
except:
try:
w = wave.open(wavpath)
samplerate = w.getframerate()
nframes = w.getnframes()
except:
raise Exception( + wavpath)
return SVEnv(samplerate, nframes, wavpath)
|
Init a sonic visualiser environment structure based the analysis
of the main audio file. The audio file have to be encoded in wave
Args:
wavpath(str): the full path to the wavfile
|
373,436
|
def clear_description(self):
if (self.get_description_metadata().is_read_only() or
self.get_description_metadata().is_required()):
raise errors.NoAccess()
self._my_map[] = dict(self._description_default)
|
Clears the description.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
|
373,437
|
def __cancel_timer(self):
if self.__timer is not None:
self.__timer.cancel()
self.__unbind_call(True)
self.__timer_args = None
self.__timer = None
|
Cancels the timer, and calls its target method immediately
|
373,438
|
def generic_div(a, b):
logger.debug(.format(a, b))
return a / b
|
Simple function to divide two numbers
|
373,439
|
def connect(self, *args, **kwargs):
self.connection = DynamoDBConnection.connect(*args, **kwargs)
self._session = kwargs.get("session")
if self._session is None:
self._session = botocore.session.get_session()
|
Proxy to DynamoDBConnection.connect.
|
373,440
|
def xeval(source, optimize=True):
native = xcompile(source, optimize=optimize)
return native()
|
Compiles to native Python bytecode and runs program, returning the
topmost value on the stack.
Args:
optimize: Whether to optimize the code after parsing it.
Returns:
None: If the stack is empty
obj: If the stack contains a single value
[obj, obj, ...]: If the stack contains many values
|
373,441
|
def render_import_image(self, use_auth=None):
if self.user_params.imagestream_name.value is None:
self.pt.remove_plugin(, ,
)
elif self.pt.has_plugin_conf(, ):
self.pt.set_plugin_arg(, , ,
self.user_params.imagestream_name.value)
|
Configure the import_image plugin
|
373,442
|
def unpack_pargs(positional_args, param_kwargs, gnu=False):
def _transform(argname):
if len(argname) == 1:
return .format(argname)
return .format(argname.replace(, ))
args = []
for item in param_kwargs.keys():
for value in param_kwargs.getlist(item):
if gnu:
args.append(.format(
_transform(item),
value
))
else:
args.extend([
_transform(item),
value
])
if positional_args:
for item in positional_args:
args.append(_transform(item))
return args
|
Unpack multidict and positional args into a
list appropriate for subprocess.
:param param_kwargs:
``ParamDict`` storing '--param' style data.
:param positional_args: flags
:param gnu:
if True, long-name args are unpacked as:
--parameter=argument
otherwise, they are unpacked as:
--parameter argument
:returns: list appropriate for sending to subprocess
|
373,443
|
def name(self, name=None):
if name:
self._name = name
return self
return self._name
|
api name, default is module.__name__
|
373,444
|
def main():
log.configure(logging.DEBUG)
tornado.log.enable_pretty_logging()
(parser, child_parser) = args.create_parsers()
(parsed_args, remaining) = parser.parse_known_args()
if remaining:
r = child_parser.parse_args(args=remaining, namespace=parsed_args)
namespace = vars(r)
if in namespace:
common_config.print_build_info(zipped_pex=True)
else:
parser.print_help()
parser.exit()
command_line_args = vars(parsed_args)
Log.info("Listening at http://%s:%d%s", command_line_args[],
command_line_args[], command_line_args[])
Log.info("Using tracker url: %s", command_line_args[])
define_options(command_line_args[],
command_line_args[],
command_line_args[],
command_line_args[])
http_server = tornado.httpserver.HTTPServer(Application(command_line_args[]))
http_server.listen(command_line_args[], address=command_line_args[])
def signal_handler(signum, frame):
print(, end=)
Log.debug()
tornado.ioloop.IOLoop.instance().stop()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
tornado.ioloop.IOLoop.instance().start()
|
:param argv:
:return:
|
373,445
|
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"coord_geoms": jsanitize(self.coord_geoms)}
|
Returns a dictionary representation of the ChemicalEnvironments object
:return:
|
373,446
|
def _get_param_iterator(self):
return model_selection.ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
)
|
Return ParameterSampler instance for the given distributions
|
373,447
|
def main():
server_configs = (
{: url, : (, ), : False}
for url
in (, )
)
for server_config in server_configs:
response = requests.post(
server_config[] + ,
json.dumps({
: {
: 1,
: ,
: ,
: [get_organization_id(
server_config,
)],
: ,
}
}),
auth=server_config[],
headers={: },
verify=server_config[],
)
response.raise_for_status()
pprint(response.json())
|
Create an identical user account on a pair of satellites.
|
373,448
|
def push_to_remote(self, base_branch, head_branch, commit_message=""):
set_state(WORKFLOW_STATES.PUSHING_TO_REMOTE)
cmd = ["git", "push", self.pr_remote, f"{head_branch}:{head_branch}"]
try:
self.run_cmd(cmd)
set_state(WORKFLOW_STATES.PUSHED_TO_REMOTE)
except subprocess.CalledProcessError:
click.echo(f"Failed to push to {self.pr_remote} \u2639")
set_state(WORKFLOW_STATES.PUSHING_TO_REMOTE_FAILED)
else:
gh_auth = os.getenv("GH_AUTH")
if gh_auth:
set_state(WORKFLOW_STATES.PR_CREATING)
self.create_gh_pr(
base_branch,
head_branch,
commit_message=commit_message,
gh_auth=gh_auth,
)
else:
set_state(WORKFLOW_STATES.PR_OPENING)
self.open_pr(self.get_pr_url(base_branch, head_branch))
|
git push <origin> <branchname>
|
373,449
|
def generate_resource(config, raml_resource, parent_resource):
from .models import get_existing_model
"represent collections instead")
return
route_name = get_route_name(resource_uri)
log.info(.format(
route_name, parent_resource.uid or ))
resource_args = (singularize(clean_uri),)
if not is_singular:
resource_args += (clean_uri,)
return parent_resource.add(*resource_args, **resource_kwargs)
|
Perform complete one resource configuration process
This function generates: ACL, view, route, resource, database
model for a given `raml_resource`. New nefertari resource is
attached to `parent_resource` class which is an instance of
`nefertari.resource.Resource`.
Things to consider:
* Top-level resources must be collection names.
* No resources are explicitly created for dynamic (ending with '}')
RAML resources as they are implicitly processed by parent collection
resources.
* Resource nesting must look like collection/id/collection/id/...
* Only part of resource path after last '/' is taken into account,
thus each level of resource nesting should add one more path
element. E.g. /stories -> /stories/{id} and not
/stories -> /stories/mystories/{id}. Latter route will be generated
at /stories/{id}.
:param raml_resource: Instance of ramlfications.raml.ResourceNode.
:param parent_resource: Parent nefertari resource object.
|
373,450
|
def get_versioned_files(cls):
files = cls._git_ls_files()
submodules = cls._list_submodules()
for subdir in submodules:
subdir = os.path.relpath(subdir).replace(os.path.sep, )
files += add_prefix_to_each(subdir, cls._git_ls_files(subdir))
return add_directories(files)
|
List all files versioned by git in the current directory.
|
373,451
|
def _SMOTE(T, N, k, h = 1.0):
n_minority_samples, n_features = T.shape
if N < 100:
N = 100
pass
if (N % 100) != 0:
raise ValueError("N must be < 100 or multiple of 100")
N = N/100
n_synthetic_samples = N * n_minority_samples
S = np.zeros(shape=(n_synthetic_samples, n_features))
neigh = NearestNeighbors(n_neighbors = k)
neigh.fit(T)
for i in range(n_minority_samples):
nn = neigh.kneighbors(T[i], return_distance=False)
for n in range(int(N)):
nn_index = choice(nn[0])
while nn_index == i:
nn_index = choice(nn[0])
dif = T[nn_index] - T[i]
gap = np.random.uniform(low = 0.0, high = h)
S[n + i * N, :] = T[i,:] + gap * dif[:]
return S
|
Returns (N/100) * n_minority_samples synthetic minority samples.
Parameters
----------
T : array-like, shape = [n_minority_samples, n_features]
Holds the minority samples
N : percetange of new synthetic samples:
n_synthetic_samples = N/100 * n_minority_samples. Can be < 100.
k : int. Number of nearest neighbours.
Returns
-------
S : Synthetic samples. array,
shape = [(N/100) * n_minority_samples, n_features].
|
373,452
|
def middleware(self, *args, **kwargs):
def register_middleware(_middleware):
future_middleware = FutureMiddleware(_middleware, args, kwargs)
self.middlewares.append(future_middleware)
return _middleware
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
middleware = args[0]
args = []
return register_middleware(middleware)
else:
if kwargs.get("bp_group") and callable(args[0]):
middleware = args[0]
args = args[1:]
kwargs.pop("bp_group")
return register_middleware(middleware)
else:
return register_middleware
|
Create a blueprint middleware from a decorated function.
:param args: Positional arguments to be used while invoking the
middleware
:param kwargs: optional keyword args that can be used with the
middleware.
|
373,453
|
def is_consistent(self) -> bool:
from ledger.compact_merkle_tree import CompactMerkleTree
return self.nodeCount == CompactMerkleTree.get_expected_node_count(
self.leafCount)
|
Returns True if number of nodes are consistent with number of leaves
|
373,454
|
def save(self):
params = {}
for setting in self.all():
if setting._setValue:
log.info( % (setting.id, setting._setValue))
params[setting.id] = quote(setting._setValue)
if not params:
raise BadRequest()
querystr = .join([ % (k, v) for k, v in params.items()])
url = % (self.key, querystr)
self._server.query(url, self._server._session.put)
self.reload()
|
Save any outstanding settnig changes to the :class:`~plexapi.server.PlexServer`. This
performs a full reload() of Settings after complete.
|
373,455
|
def list_incidents(self, update_keys, session=None, lightweight=None):
params = clean_locals(locals())
method = % (self.URI, )
(response, elapsed_time) = self.request(method, params, session)
return self.process_response(response, resources.Incidents, elapsed_time, lightweight)
|
Returns a list of incidents for the given events.
:param dict update_keys: The filter to select desired markets. All markets that match
the criteria in the filter are selected e.g. [{'eventId': '28205674', 'lastUpdateSequenceProcessed': 2}]
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.Incidents]
|
373,456
|
def format_prettytable(table):
for i, row in enumerate(table.rows):
for j, item in enumerate(row):
table.rows[i][j] = format_output(item)
ptable = table.prettytable()
ptable.hrules = prettytable.FRAME
ptable.horizontal_char =
ptable.vertical_char =
ptable.junction_char =
return ptable
|
Converts SoftLayer.CLI.formatting.Table instance to a prettytable.
|
373,457
|
def start(self):
super().start()
comments_thread = BotThread(name=.format(self._name),
target=self._listen_comments)
comments_thread.start()
self._threads.append(comments_thread)
self.log.info()
|
Starts this bot in a separate thread. Therefore, this call is non-blocking.
It will listen to all new comments created in the :attr:`~subreddits` list.
|
373,458
|
def load_rule_definitions(self, ruleset_generator = False, rule_dirs = []):
self.rule_definitions = {}
for rule_filename in self.rules:
for rule in self.rules[rule_filename]:
if not rule.enabled and not ruleset_generator:
continue
self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename, rule_dirs = rule_dirs)
if ruleset_generator:
rule_dirs.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), ))
rule_filenames = []
for rule_dir in rule_dirs:
rule_filenames += [f for f in os.listdir(rule_dir) if os.path.isfile(os.path.join(rule_dir, f))]
for rule_filename in rule_filenames:
if rule_filename not in self.rule_definitions:
self.rule_definitions[os.path.basename(rule_filename)] = RuleDefinition(rule_filename)
|
Load definition of rules declared in the ruleset
:param services:
:param ip_ranges:
:param aws_account_id:
:param generator:
:return:
|
373,459
|
def get_old(dataset_label, data_id, destination_dir=None):
if destination_dir is None:
destination_dir = op.join(dataset_path(get_root=True), "medical", "orig")
destination_dir = op.expanduser(destination_dir)
data_url, url, expected_hash, hash_path, relative_output_path = get_dataset_meta(
dataset_label
)
paths = glob.glob(os.path.join(destination_dir, hash_path))
paths.sort()
import fnmatch
print(paths)
print(data_id)
pathsf = fnmatch.filter(paths, data_id)
print(pathsf)
datap = io3d.read(pathsf[0], dataplus_format=True)
return datap
|
Get the 3D data from specified dataset with specified id.
Download data if necessary.
:param dataset_label:
:param data_id: integer or wildcards file pattern
:param destination_dir:
:return:
|
373,460
|
def write_to_screen(self, screen, mouse_handlers, write_position,
parent_style, erase_bg, z_index):
" Fill the whole area of write_position with dots. "
default_char = Char(, )
dot = Char(, )
ypos = write_position.ypos
xpos = write_position.xpos
for y in range(ypos, ypos + write_position.height):
row = screen.data_buffer[y]
for x in range(xpos, xpos + write_position.width):
row[x] = dot if (x + y) % 3 == 0 else default_char
|
Fill the whole area of write_position with dots.
|
373,461
|
def run_checker(cls, ds_loc, checker_names, verbose, criteria,
skip_checks=None, output_filename=,
output_format=[]):
all_groups = []
cs = CheckSuite()
score_dict = OrderedDict()
if not isinstance(ds_loc, six.string_types):
locs = ds_loc
else:
locs = [ds_loc]
if isinstance(output_format, six.string_types):
output_format = [output_format]
for loc in locs:
ds = cs.load_dataset(loc)
score_groups = cs.run(ds, skip_checks, *checker_names)
for group in score_groups.values():
all_groups.append(group[0])
if hasattr(ds, ):
ds.close()
if not score_groups:
raise ValueError("No checks found, please check the name of the checker(s) and that they are installed")
else:
score_dict[loc] = score_groups
if criteria == :
limit = 2
elif criteria == :
limit = 1
elif criteria == :
limit = 3
for out_fmt in output_format:
if out_fmt == :
if output_filename == :
cls.stdout_output(cs, score_dict, verbose, limit)
else:
if len(output_format) > 1:
output_filename = .format(os.path.splitext(output_filename)[0])
with io.open(output_filename, , encoding=) as f:
with stdout_redirector(f):
cls.stdout_output(cs, score_dict, verbose, limit)
elif out_fmt == :
if len(output_format) > 1 and output_filename != :
output_filename = .format(os.path.splitext(output_filename)[0])
cls.html_output(cs, score_dict, output_filename, ds_loc, limit)
elif out_fmt in {, }:
if len(output_format) > 1 and output_filename != :
output_filename = .format(os.path.splitext(output_filename)[0])
cls.json_output(cs, score_dict, output_filename, ds_loc, limit,
out_fmt)
else:
raise TypeError( % out_fmt)
errors_occurred = cls.check_errors(score_groups, verbose)
return (all(cs.passtree(groups, limit) for groups in all_groups),
errors_occurred)
|
Static check runner.
@param ds_loc Dataset location (url or file)
@param checker_names List of string names to run, should match keys of checkers dict (empty list means run all)
@param verbose Verbosity of the output (0, 1, 2)
@param criteria Determines failure (lenient, normal, strict)
@param output_filename Path to the file for output
@param skip_checks Names of checks to skip
@param output_format Format of the output(s)
@returns If the tests failed (based on the criteria)
|
373,462
|
def complex_fault_node(edges):
node = Node()
node.append(edge_node(, edges[0]))
for edge in edges[1:-1]:
node.append(edge_node(, edge))
node.append(edge_node(, edges[-1]))
return node
|
:param edges: a list of lists of points
:returns: a Node of kind complexFaultGeometry
|
373,463
|
def remove_option(self, section, option):
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
|
Remove an option.
|
373,464
|
def to_vars_dict(self):
return {
: self.client_id,
: self.location,
: self.secret,
: self.subscription_id,
: self.tenant_id,
}
|
Return local state which is relevant for the cluster setup process.
|
373,465
|
def collect_pac_urls(from_os_settings=True, from_dns=True, **kwargs):
from_registry = kwargs.get()
if from_registry is not None:
import warnings
warnings.warn()
from_os_settings = from_registry
pac_urls = []
if from_os_settings:
if ON_WINDOWS:
url_or_path = autoconfig_url_from_registry()
elif ON_DARWIN:
url_or_path = autoconfig_url_from_preferences()
else:
url_or_path = None
if url_or_path and (url_or_path.lower().startswith() or url_or_path.lower().startswith()):
pac_urls.append(url_or_path)
if from_dns:
pac_urls.extend(proxy_urls_from_dns())
return pac_urls
|
Get all the URLs that potentially yield a PAC file.
:param bool from_os_settings: Look for a PAC URL from the OS settings.
If a value is found and is a URL, it comes first in the returned list.
Doesn't do anything on non-Windows or non-macOS/OSX platforms.
:param bool from_dns: Assemble a list of PAC URL candidates using the WPAD protocol.
:return: A list of URLs that should be tried in order.
:rtype: list[str]
|
373,466
|
def pc_anova(self, covariates, num_pc=5):
from scipy.stats import f_oneway
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
anova = pd.Panel(items=[, ],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in anova.major_axis:
for j in anova.minor_axis:
t = [mat[j][covariates[i] == x] for x in set(covariates[i])]
f, p = f_oneway(*t)
anova.ix[, i, j] = f
anova.ix[, i, j] = p
return anova
|
Calculate one-way ANOVA between the first num_pc prinicipal components
and known covariates. The size and index of covariates determines
whether u or v is used.
Parameters
----------
covariates : pandas.DataFrame
Dataframe of covariates whose index corresponds to the index of
either u or v.
num_pc : int
Number of principal components to correlate with.
Returns
-------
anova : pandas.Panel
Panel with F-values and p-values.
|
373,467
|
def count_mnemonic(self, mnemonic, uwis=uwis, alias=None):
all_mnemonics = self.get_mnemonics([mnemonic], uwis=uwis, alias=alias)
return len(list(filter(None, utils.flatten_list(all_mnemonics))))
|
Counts the wells that have a given curve, given the mnemonic and an
alias dict.
|
373,468
|
def load_exons(self, exons, genes=None, build=):
genes = genes or self.ensembl_genes(build)
for exon in exons:
exon_obj = build_exon(exon, genes)
if not exon_obj:
continue
res = self.exon_collection.insert_one(exon_obj)
|
Create exon objects and insert them into the database
Args:
exons(iterable(dict))
|
373,469
|
def standard_input():
with click.get_text_stream("stdin") as stdin:
while stdin.readable():
line = stdin.readline()
if line:
yield line.strip().encode("utf-8")
|
Generator that yields lines from standard input.
|
373,470
|
def modify(db=None, sql=None):
*CREATE TABLE test(id INT, testdata TEXT);
cur = _connect(db)
if not cur:
return False
cur.execute(sql)
return True
|
Issue an SQL query to sqlite3 (with no return data), usually used
to modify the database in some way (insert, delete, create, etc)
CLI Example:
.. code-block:: bash
salt '*' sqlite3.modify /root/test.db 'CREATE TABLE test(id INT, testdata TEXT);'
|
373,471
|
def _update(self):
self.delta = numpy.array(list(
map(lambda e: (e[-1] - e[0]) / (len(e) - 1), self.edges)))
self.midpoints = self._midpoints(self.edges)
self.origin = numpy.array(list(map(lambda m: m[0], self.midpoints)))
if self.__interpolated is not None:
self.__interpolated = self._interpolationFunctionFactory()
|
compute/update all derived data
Can be called without harm and is idem-potent.
Updates these attributes and methods:
:attr:`origin`
the center of the cell with index 0,0,0
:attr:`midpoints`
centre coordinate of each grid cell
:meth:`interpolated`
spline interpolation function that can generated a value for
coordinate
|
373,472
|
def save_direction(self, rootpath, raw=False, as_int=False):
self.save_array(self.direction, None, , rootpath, raw, as_int=as_int)
|
Saves the direction of the slope to a file
|
373,473
|
def AAS(cpu):
if (cpu.AL & 0x0F > 9) or cpu.AF == 1:
cpu.AX = cpu.AX - 6
cpu.AH = cpu.AH - 1
cpu.AF = True
cpu.CF = True
else:
cpu.AF = False
cpu.CF = False
cpu.AL = cpu.AL & 0x0f
|
ASCII Adjust AL after subtraction.
Adjusts the result of the subtraction of two unpacked BCD values to create a unpacked
BCD result. The AL register is the implied source and destination operand for this instruction.
The AAS instruction is only useful when it follows a SUB instruction that subtracts
(binary subtraction) one unpacked BCD value from another and stores a byte result in the AL
register. The AAA instruction then adjusts the contents of the AL register to contain the
correct 1-digit unpacked BCD result. If the subtraction produced a decimal carry, the AH register
is decremented by 1, and the CF and AF flags are set. If no decimal carry occurred, the CF and AF
flags are cleared, and the AH register is unchanged. In either case, the AL register is left with
its top nibble set to 0.
The AF and CF flags are set to 1 if there is a decimal borrow; otherwise, they are cleared to 0.
This instruction executes as described in compatibility mode and legacy mode.
It is not valid in 64-bit mode.::
IF ((AL AND 0FH) > 9) Operators.OR(AF = 1)
THEN
AX = AX - 6;
AH = AH - 1;
AF = 1;
CF = 1;
ELSE
CF = 0;
AF = 0;
FI;
AL = AL AND 0FH;
:param cpu: current CPU.
|
373,474
|
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
|
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
|
373,475
|
def page(self, date_created_before=values.unset, date_created=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
params = values.of({
: serialize.iso8601_date(date_created_before),
: serialize.iso8601_date(date_created),
: serialize.iso8601_date(date_created_after),
: page_token,
: page_number,
: page_size,
})
response = self._version.page(
,
self._uri,
params=params,
)
return RecordingPage(self._version, response, self._solution)
|
Retrieve a single page of RecordingInstance records from the API.
Request is executed immediately
:param date date_created_before: The `YYYY-MM-DD` value of the resources to read
:param date date_created: The `YYYY-MM-DD` value of the resources to read
:param date date_created_after: The `YYYY-MM-DD` value of the resources to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of RecordingInstance
:rtype: twilio.rest.api.v2010.account.call.recording.RecordingPage
|
373,476
|
def _updateViewer(self, force=False):
if not force and not self.viewer_dialog.isVisible():
return
path = self.purrer.indexfile
mtime = self.fileModTime(path)
if mtime and mtime <= (self._viewer_timestamp or 0):
return
busy = BusyIndicator()
self.viewer_dialog.setDocument(path, empty=
"<P>Nothing in the log yet. Try adding some log entries.</P>")
self.viewer_dialog.reload()
self.viewer_dialog.setLabel( % self.purrer.indexfile)
self._viewer_timestamp = mtime
|
Updates the viewer dialog.
If dialog is not visible and force=False, does nothing.
Otherwise, checks the mtime of the current purrer index.html file against self._viewer_timestamp.
If it is newer, reloads it.
|
373,477
|
def render(self, data, accepted_media_type=None, renderer_context=None):
if data is None:
return
stream = StringIO()
xml = SimplerXMLGenerator(stream, self.charset)
xml.startDocument()
xml.startElement(self.root_tag_name, {})
self._to_xml(xml, data)
xml.endElement(self.root_tag_name)
xml.endDocument()
return stream.getvalue()
|
Renders `data` into serialized XML.
|
373,478
|
def get_inasafe_default_value_qsetting(
qsetting, category, inasafe_field_key):
key = % (category, inasafe_field_key)
default_value = qsetting.value(key)
if default_value is None:
if category == GLOBAL:
inasafe_field = definition(inasafe_field_key)
default_value = inasafe_field.get(, {})
return default_value.get(, zero_default_value)
return zero_default_value
try:
return float(default_value)
except ValueError:
return zero_default_value
|
Helper method to get the inasafe default value from qsetting.
:param qsetting: QSetting.
:type qsetting: QSetting
:param category: Category of the default value. It can be global or
recent. Global means the global setting for default value. Recent
means the last set custom for default value from the user.
:type category: str
:param inasafe_field_key: Key for the field.
:type inasafe_field_key: str
:returns: Value of the inasafe_default_value.
:rtype: float
|
373,479
|
def subvolume_get_default(path):
*
cmd = [, , , path]
res = __salt__[](cmd)
salt.utils.fsutils._verify_run(res)
line = res[].strip()
id_ = line.split()[1]
name = line.split()[-1]
return {
: id_,
: name,
}
|
Get the default subvolume of the filesystem path
path
Mount point for the subvolume
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_get_default /var/volumes/tmp
|
373,480
|
def clean_proced(self, proced):
for loc in proced:
try:
del loc[]
except KeyError:
pass
try:
del loc[]
except KeyError:
pass
try:
del loc[]
except KeyError:
pass
try:
del loc[]
except KeyError:
pass
try:
del loc[]
except KeyError:
pass
try:
del loc[]
except KeyError:
pass
try:
del loc[]
except KeyError:
pass
return proced
|
Small helper function to delete the features from the final dictionary.
These features are mostly interesting for debugging but won't be relevant for most users.
|
373,481
|
def assemble(self, ops, target=None):
self.internals = target = get_py_internals(target, self.internals)
self.co_code = assemble(ops, target)
self.co_stacksize = calculate_max_stack_depth(ops, target)
return self
|
Assemble a series of operations and labels into bytecode, analyse its
stack usage and replace the bytecode and stack size of this code
object. Can also (optionally) change the target python version.
Arguments:
ops(list): The opcodes (and labels) to assemble into bytecode.
target: The opcode specification of the targeted python
version. If this is ``None`` the specification of the currently
running python version will be used.
Returns:
CodeObject: A reference to this :class:`CodeObject`.
|
373,482
|
def write_dfile(self):
f_in = self.tempfiles.get_tempfile(prefix="bmds-", suffix=".(d)")
with open(f_in, "w") as f:
f.write(self.as_dfile())
return f_in
|
Write the generated d_file to a temporary file.
|
373,483
|
def _get_unknown_value(self):
label_set = set(self.labels)
value = 0
while value in label_set:
value += 1
return value
|
Finds the smallest integer value >=0 that is not in `labels`
:return: Value that is not in the labels
:rtype: int
|
373,484
|
def normalize_paths(value, parent=os.curdir):
if not value:
return []
if isinstance(value, list):
return value
paths = []
for path in value.split():
path = path.strip()
if in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip())
return paths
|
Parse a comma-separated list of paths.
Return a list of absolute paths.
|
373,485
|
def inv(z: int) -> int:
z2 = z * z % q
z9 = pow2(z2, 2) * z % q
z11 = z9 * z2 % q
z2_5_0 = (z11 * z11) % q * z9 % q
z2_10_0 = pow2(z2_5_0, 5) * z2_5_0 % q
z2_20_0 = pow2(z2_10_0, 10) * z2_10_0 % q
z2_40_0 = pow2(z2_20_0, 20) * z2_20_0 % q
z2_50_0 = pow2(z2_40_0, 10) * z2_10_0 % q
z2_100_0 = pow2(z2_50_0, 50) * z2_50_0 % q
z2_200_0 = pow2(z2_100_0, 100) * z2_100_0 % q
z2_250_0 = pow2(z2_200_0, 50) * z2_50_0 % q
return pow2(z2_250_0, 5) * z11 % q
|
$= z^{-1} mod q$, for z != 0
|
373,486
|
def describe(cwd,
rev=,
user=None,
password=None,
ignore_retcode=False,
output_encoding=None):
cwd = _expand_path(cwd, user)
command = [, ]
if _LooseVersion(version(versioninfo=False)) >= _LooseVersion():
command.append()
command.append(rev)
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)[]
|
Returns the `git-describe(1)`_ string (or the SHA1 hash if there are no
tags) for the given revision.
cwd
The path to the git checkout
rev : HEAD
The revision to describe
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-describe(1)`: http://git-scm.com/docs/git-describe
CLI Examples:
.. code-block:: bash
salt myminion git.describe /path/to/repo
salt myminion git.describe /path/to/repo develop
|
373,487
|
def get_list_store(data_frame):
df_py_dtypes = get_py_dtypes(data_frame)
list_store = gtk.ListStore(*df_py_dtypes.dtype)
for i, row_i in data_frame.iterrows():
list_store.append(row_i.tolist())
return df_py_dtypes, list_store
|
Return a `pandas.DataFrame` containing Python type information for the
columns in `data_frame` and a `gtk.ListStore` matching the contents of the
data frame.
Args:
data_frame (pandas.DataFrame) : Data frame containing data columns.
Returns:
(tuple) : The first element is a data frame as returned by
`get_py_dtypes` and the second element is a `gtk.ListStore`
matching the contents of the data frame.
|
373,488
|
def GenerateDateTripsDeparturesList(self, date_start, date_end):
service_id_to_trips = defaultdict(lambda: 0)
service_id_to_departures = defaultdict(lambda: 0)
for trip in self.GetTripList():
headway_start_times = trip.GetFrequencyStartTimes()
if headway_start_times:
trip_runs = len(headway_start_times)
else:
trip_runs = 1
service_id_to_trips[trip.service_id] += trip_runs
service_id_to_departures[trip.service_id] += (
(trip.GetCountStopTimes() - 1) * trip_runs)
date_services = self.GetServicePeriodsActiveEachDate(date_start, date_end)
date_trips = []
for date, services in date_services:
day_trips = sum(service_id_to_trips[s.service_id] for s in services)
day_departures = sum(
service_id_to_departures[s.service_id] for s in services)
date_trips.append((date, day_trips, day_departures))
return date_trips
|
Return a list of (date object, number of trips, number of departures).
The list is generated for dates in the range [date_start, date_end).
Args:
date_start: The first date in the list, a date object
date_end: The first date after the list, a date object
Returns:
a list of (date object, number of trips, number of departures) tuples
|
373,489
|
def reset(self):
self._text = None
self._markdown = False
self._channel = Incoming.DEFAULT_CHANNEL
self._attachments = []
return self
|
Reset stream.
|
373,490
|
def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.name is not None:
_dict[] = self.name
if hasattr(self, ) and self.limit is not None:
_dict[] = self.limit
return _dict
|
Return a json dictionary representing this model.
|
373,491
|
def _is_json_serialized_jws(self, json_jws):
json_ser_keys = {"payload", "signatures"}
flattened_json_ser_keys = {"payload", "signature"}
if not json_ser_keys.issubset(
json_jws.keys()) and not flattened_json_ser_keys.issubset(
json_jws.keys()):
return False
return True
|
Check if we've got a JSON serialized signed JWT.
:param json_jws: The message
:return: True/False
|
373,492
|
def _trace_summary(self):
for (i, (val, args)) in enumerate(self.trace):
if args is StopIteration:
info = "Terminated"
else:
pprint = .join( + .join( % (k,v)
for (k,v) in arg.items()) + for arg in args)
info = ("exploring arguments [%s]" % pprint )
if i == 0: print("Step %d: Initially %s." % (i, info))
else: print("Step %d: %s after receiving input(s) %s." % (i, info.capitalize(), val))
|
Summarizes the trace of values used to update the DynamicArgs
and the arguments subsequently returned. May be used to
implement the summary method.
|
373,493
|
def add_network_profile(self, obj, params):
network_id = self._send_cmd_to_wpas(obj[], , True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj[],
.format(network_id, params.ssid))
key_mgmt =
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt =
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt =
else:
key_mgmt =
if key_mgmt:
self._send_cmd_to_wpas(
obj[],
.format(
network_id,
key_mgmt))
proto =
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto =
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto =
if proto:
self._send_cmd_to_wpas(
obj[],
.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj[],
.format(network_id, params.key))
return params
|
Add an AP profile for connecting to afterward.
|
373,494
|
def reject_sender(self, link_handle, pn_condition=None):
link = self._sender_links.get(link_handle)
if not link:
raise Exception("Invalid link_handle: %s" % link_handle)
link.reject(pn_condition)
link.destroy()
|
Rejects the SenderLink, and destroys the handle.
|
373,495
|
def boundaries(self, boundaryEdges=True, featureAngle=65, nonManifoldEdges=True):
fe = vtk.vtkFeatureEdges()
fe.SetInputData(self.polydata())
fe.SetBoundaryEdges(boundaryEdges)
if featureAngle:
fe.FeatureEdgesOn()
fe.SetFeatureAngle(featureAngle)
else:
fe.FeatureEdgesOff()
fe.SetNonManifoldEdges(nonManifoldEdges)
fe.ColoringOff()
fe.Update()
return Actor(fe.GetOutput(), c="p").lw(5)
|
Return an ``Actor`` that shows the boundary lines of an input mesh.
:param bool boundaryEdges: Turn on/off the extraction of boundary edges.
:param float featureAngle: Specify the feature angle for extracting feature edges.
:param bool nonManifoldEdges: Turn on/off the extraction of non-manifold edges.
|
373,496
|
def get(self, flex_sched_rule_id):
path = .join([, flex_sched_rule_id])
return self.rachio.get(path)
|
Retrieve the information for a flexscheduleRule entity.
|
373,497
|
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, vswitch in six.iteritems(self._network_vswitch_map):
if port_id in vswitch[]:
return (network_id, vswitch)
return (None, None)
|
Get the vswitch name for the received port id.
|
373,498
|
def guess_export_format(filename, data, **kwargs):
(_,filename) = os.path.split(filename)
fnm = filename.lower()
fmt = None
es = sorted(((k,e) for (k,es) in six.iteritems(exporters) for e in es[1]),
key=lambda x:-len(x[1]))
for (k,e) in es:
if fnm.endswith(( + e) if e[0] != else e):
return k
for (k,(_,_,sniff)) in six.iteritems(exporters):
try:
if sniff(filename, data, **kwargs): return k
except Exception: pass
return None
|
guess_export_format(filename, data) attempts to guess the export file format for the given
filename and data (to be exported); it does this guessing by looking at the file extension and
using registered sniff-tests from exporters. It will not attempt to save the file, so if the
extension of the filename is missing, it is less likely that this function will deduce the
file-type (though save will often succeeed at extracting the data by trying all types
exhaustively). If guess_export_format cannot deduce the format, it yields None.
Note that if the filename has an extension that is recognized by neuropythy but the data itself
is inappropriate for that format, this function will never look beyond the extention in the
filename; neither this function nor save perform that level of deduction.
Keyword arguments that are passed to save should also be passed to guess_export_format.
|
373,499
|
def _run_lint_on_file(file_path,
linter_functions,
tool_options,
fix_what_you_can):
with io.open(file_path, "r+", encoding="utf-8") as found_file:
file_contents = found_file.read()
file_lines = file_contents.splitlines(True)
try:
errors = lint(file_path[len(os.getcwd()) + 1:],
file_contents,
linter_functions,
**tool_options)
except RuntimeError as err:
msg = (
.format(file_path, str(err)))
errors = [("polysquarelinter/failure",
LinterFailure(msg, 0, None))]
if fix_what_you_can:
for error_index, error in enumerate(errors):
if error[1].replacement is not None:
_apply_replacement(error, found_file, file_lines)
errors[error_index] = (error[0],
LinterFailure(error[1].description +
" ... FIXED",
error[1].line,
error[1].replacement))
break
return [FileLinterFailure(file_path, e) for e in errors]
|
Run each function in linter_functions on filename.
If fix_what_you_can is specified, then the first error that has a
possible replacement will be automatically fixed on this file.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.