code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
"""
Get a tarball, extract it, change to that directory, yield, then
clean up.
`runner` is the function to invoke commands.
`pushd` is a context manager for changing the directory.
"""
if target_dir is None:
target_dir = os.path.basename(url).replace('.tar.gz', '').replace(
'.tgz', '')
if runner is None:
runner = functools.partial(subprocess.check_call, shell=True)
# In the tar command, use --strip-components=1 to strip the first path and
# then
# use -C to cause the files to be extracted to {target_dir}. This ensures
# that we always know where the files were extracted.
runner('mkdir {target_dir}'.format(**vars()))
try:
getter = 'wget {url} -O -'
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
cmd = ' | '.join((getter, extract))
runner(cmd.format(compression=infer_compression(url), **vars()))
with pushd(target_dir):
yield target_dir
finally:
runner('rm -Rf {target_dir}'.format(**vars()))
|
Get a tarball, extract it, change to that directory, yield, then
clean up.
`runner` is the function to invoke commands.
`pushd` is a context manager for changing the directory.
|
def delete_external_feed_groups(self, group_id, external_feed_id):
"""
Delete an external feed.
Deletes the external feed.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - group_id
"""ID"""
path["group_id"] = group_id
# REQUIRED - PATH - external_feed_id
"""ID"""
path["external_feed_id"] = external_feed_id
self.logger.debug("DELETE /api/v1/groups/{group_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/groups/{group_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
|
Delete an external feed.
Deletes the external feed.
|
def get_device_hybrid_interfaces(auth, url, devid=None, devip=None):
"""
Function takes devId as input to RESTFUL call to HP IMC platform
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: list of dictionaries where each element of the list represents an interface which
has been configured as a
VLAN access port
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> hybrid_interfaces = get_device_hybrid_interfaces('10', auth.creds, auth.url)
>>> assert type(access_interfaces) is list
>>> assert (len(access_interfaces[0])) is 2
>>> assert 'ifIndex' in access_interfaces[0]
>>> assert 'pvid' in access_interfaces[0]
"""
if devip is not None:
devid = get_dev_details(devip, auth, url)['id']
get_hybrid_interface_vlan_url = "/imcrs/vlan/hybrid?devId=" + str(devid) + \
"&start=1&size=500&total=false"
f_url = url + get_hybrid_interface_vlan_url
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
dev_hybrid_interfaces = (json.loads(response.text))
if len(dev_hybrid_interfaces) == 2:
dev_hybrid = dev_hybrid_interfaces['hybridIf']
if isinstance(dev_hybrid, dict):
dev_hybrid = [dev_hybrid]
return dev_hybrid
else:
dev_hybrid_interfaces['hybridIf'] = ["No hybrid inteface"]
return dev_hybrid_interfaces['hybridIf']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_device_hybrid_interfaces: An Error has occured"
|
Function takes devId as input to RESTFUL call to HP IMC platform
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param devid: str requires devid of the target device
:param devip: str of ipv4 address of the target device
:return: list of dictionaries where each element of the list represents an interface which
has been configured as a
VLAN access port
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.vlanm import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> hybrid_interfaces = get_device_hybrid_interfaces('10', auth.creds, auth.url)
>>> assert type(access_interfaces) is list
>>> assert (len(access_interfaces[0])) is 2
>>> assert 'ifIndex' in access_interfaces[0]
>>> assert 'pvid' in access_interfaces[0]
|
def body_block_supplementary_material_render(supp_tags, base_url=None):
"""fig and media tag caption may have supplementary material"""
source_data = []
for supp_tag in supp_tags:
for block_content in body_block_content_render(supp_tag, base_url=base_url):
if block_content != {}:
if "content" in block_content:
del block_content["content"]
source_data.append(block_content)
return source_data
|
fig and media tag caption may have supplementary material
|
def _encode(data, convert_to_float):
"""Convert the Python values to values suitable to send to Octave.
"""
ctf = convert_to_float
# Handle variable pointer.
if isinstance(data, (OctaveVariablePtr)):
return _encode(data.value, ctf)
# Handle a user defined object.
if isinstance(data, OctaveUserClass):
return _encode(OctaveUserClass.to_value(data), ctf)
# Handle a function pointer.
if isinstance(data, (OctaveFunctionPtr, MatlabFunction)):
raise Oct2PyError('Cannot write Octave functions')
# Handle matlab objects.
if isinstance(data, MatlabObject):
view = data.view(np.ndarray)
out = MatlabObject(data, data.classname)
for name in out.dtype.names:
out[name] = _encode(view[name], ctf)
return out
# Handle pandas series and dataframes
if isinstance(data, (DataFrame, Series)):
return _encode(data.values, ctf)
# Extract and encode values from dict-like objects.
if isinstance(data, dict):
out = dict()
for (key, value) in data.items():
out[key] = _encode(value, ctf)
return out
# Send None as nan.
if data is None:
return np.NaN
# Sets are treated like lists.
if isinstance(data, set):
return _encode(list(data), ctf)
# Lists can be interpreted as numeric arrays or cell arrays.
if isinstance(data, list):
if _is_simple_numeric(data):
return _encode(np.array(data), ctf)
return _encode(tuple(data), ctf)
# Tuples are handled as cells.
if isinstance(data, tuple):
obj = np.empty(len(data), dtype=object)
for (i, item) in enumerate(data):
obj[i] = _encode(item, ctf)
return obj
# Sparse data must be floating type.
if isinstance(data, spmatrix):
return data.astype(np.float64)
# Return other data types unchanged.
if not isinstance(data, np.ndarray):
return data
# Extract and encode data from object-like arrays.
if data.dtype.kind in 'OV':
out = np.empty(data.size, dtype=data.dtype)
for (i, item) in enumerate(data.ravel()):
if data.dtype.names:
for name in data.dtype.names:
out[i][name] = _encode(item[name], ctf)
else:
out[i] = _encode(item, ctf)
return out.reshape(data.shape)
# Complex 128 is the highest supported by savemat.
if data.dtype.name == 'complex256':
return data.astype(np.complex128)
# Convert to float if applicable.
if ctf and data.dtype.kind in 'ui':
return data.astype(np.float64)
# Return standard array.
return data
|
Convert the Python values to values suitable to send to Octave.
|
def select_mask(cls, dataset, selection):
"""
Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e tuple ranges, slices, sets, lists or literals)
return a boolean mask over the rows in the Dataset object that
have been selected.
"""
select_mask = None
for dim, k in selection.items():
if isinstance(k, tuple):
k = slice(*k)
masks = []
alias = dataset.get_dimension(dim).name
series = dataset.data[alias]
if isinstance(k, slice):
if k.start is not None:
# Workaround for dask issue #3392
kval = util.numpy_scalar_to_python(k.start)
masks.append(kval <= series)
if k.stop is not None:
kval = util.numpy_scalar_to_python(k.stop)
masks.append(series < kval)
elif isinstance(k, (set, list)):
iter_slc = None
for ik in k:
mask = series == ik
if iter_slc is None:
iter_slc = mask
else:
iter_slc |= mask
masks.append(iter_slc)
elif callable(k):
masks.append(k(series))
else:
masks.append(series == k)
for mask in masks:
if select_mask is not None:
select_mask &= mask
else:
select_mask = mask
return select_mask
|
Given a Dataset object and a dictionary with dimension keys and
selection keys (i.e tuple ranges, slices, sets, lists or literals)
return a boolean mask over the rows in the Dataset object that
have been selected.
|
def handle_validation_error(self, error, bundle_errors):
"""Called when an error is raised while parsing. Aborts the request
with a 400 status and an error message
:param error: the error that was raised
:param bundle_errors: do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
error_str = six.text_type(error)
error_msg = self.help.format(error_msg=error_str) if self.help else error_str
msg = {self.name: error_msg}
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return error, msg
flask_restful.abort(400, message=msg)
|
Called when an error is raised while parsing. Aborts the request
with a 400 status and an error message
:param error: the error that was raised
:param bundle_errors: do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
|
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False,
pred_contribs=False, approx_contribs=False, pred_interactions=False,
validate_features=True):
"""
Predict with data.
.. note:: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``bst.copy()`` to make copies
of model object and then call ``predict()``.
.. note:: Using ``predict()`` with DART booster
If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only
some of the trees will be evaluated. This will produce incorrect results if ``data`` is
not the training data. To obtain correct results on test sets, set ``ntree_limit`` to
a nonzero value, e.g.
.. code-block:: python
preds = bst.predict(dtest, ntree_limit=num_round)
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
pred_contribs : bool
When this is True the output will be a matrix of size (nsample, nfeats + 1)
with each record indicating the feature contributions (SHAP values) for that
prediction. The sum of all feature contributions is equal to the raw untransformed
margin value of the prediction. Note the final column is the bias term.
approx_contribs : bool
Approximate the contributions of each feature
pred_interactions : bool
When this is True the output will be a matrix of size (nsample, nfeats + 1, nfeats + 1)
indicating the SHAP interaction values for each pair of features. The sum of each
row (or column) of the interaction values equals the corresponding SHAP value (from
pred_contribs), and the sum of the entire matrix equals the raw untransformed margin
value of the prediction. Note the last row and column correspond to the bias term.
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
if pred_contribs:
option_mask |= 0x04
if approx_contribs:
option_mask |= 0x08
if pred_interactions:
option_mask |= 0x10
if validate_features:
self._validate_features(data)
length = c_bst_ulong()
preds = ctypes.POINTER(ctypes.c_float)()
_check_call(_LIB.XGBoosterPredict(self.handle, data.handle,
ctypes.c_int(option_mask),
ctypes.c_uint(ntree_limit),
ctypes.byref(length),
ctypes.byref(preds)))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
chunk_size = int(preds.size / nrow)
if pred_interactions:
ngroup = int(chunk_size / ((data.num_col() + 1) * (data.num_col() + 1)))
if ngroup == 1:
preds = preds.reshape(nrow, data.num_col() + 1, data.num_col() + 1)
else:
preds = preds.reshape(nrow, ngroup, data.num_col() + 1, data.num_col() + 1)
elif pred_contribs:
ngroup = int(chunk_size / (data.num_col() + 1))
if ngroup == 1:
preds = preds.reshape(nrow, data.num_col() + 1)
else:
preds = preds.reshape(nrow, ngroup, data.num_col() + 1)
else:
preds = preds.reshape(nrow, chunk_size)
return preds
|
Predict with data.
.. note:: This function is not thread safe.
For each booster object, predict can only be called from one thread.
If you want to run prediction using multiple thread, call ``bst.copy()`` to make copies
of model object and then call ``predict()``.
.. note:: Using ``predict()`` with DART booster
If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only
some of the trees will be evaluated. This will produce incorrect results if ``data`` is
not the training data. To obtain correct results on test sets, set ``ntree_limit`` to
a nonzero value, e.g.
.. code-block:: python
preds = bst.predict(dtest, ntree_limit=num_round)
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
pred_contribs : bool
When this is True the output will be a matrix of size (nsample, nfeats + 1)
with each record indicating the feature contributions (SHAP values) for that
prediction. The sum of all feature contributions is equal to the raw untransformed
margin value of the prediction. Note the final column is the bias term.
approx_contribs : bool
Approximate the contributions of each feature
pred_interactions : bool
When this is True the output will be a matrix of size (nsample, nfeats + 1, nfeats + 1)
indicating the SHAP interaction values for each pair of features. The sum of each
row (or column) of the interaction values equals the corresponding SHAP value (from
pred_contribs), and the sum of the entire matrix equals the raw untransformed margin
value of the prediction. Note the last row and column correspond to the bias term.
validate_features : bool
When this is True, validate that the Booster's and data's feature_names are identical.
Otherwise, it is assumed that the feature_names are the same.
Returns
-------
prediction : numpy array
|
def sanitize(self, val):
"""Given a Variable and a value, cleans it out"""
if self.type == NUMBER:
try:
return clamp(self.min, self.max, float(val))
except ValueError:
return 0.0
elif self.type == TEXT:
try:
return unicode(str(val), "utf_8", "replace")
except:
return ""
elif self.type == BOOLEAN:
if unicode(val).lower() in ("true", "1", "yes"):
return True
else:
return False
|
Given a Variable and a value, cleans it out
|
def run_cli():
"Command line interface to hiwenet."
features_path, groups_path, weight_method, num_bins, edge_range, \
trim_outliers, trim_percentile, return_networkx_graph, out_weights_path = parse_args()
# TODO add the possibility to process multiple combinations of parameters: diff subjects, diff metrics
# for features_path to be a file containing multiple subjects (one/line)
# -w could take multiple values kldiv,histint,
# each line: input_features_path,out_weights_path
features, groups = read_features_and_groups(features_path, groups_path)
extract(features, groups, weight_method=weight_method, num_bins=num_bins,
edge_range=edge_range, trim_outliers=trim_outliers, trim_percentile=trim_percentile,
return_networkx_graph=return_networkx_graph, out_weights_path=out_weights_path)
|
Command line interface to hiwenet.
|
def escape_velocity(M,R):
"""
escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s.
"""
ve = np.sqrt(2.*grav_const*M*msun_g/(R*rsun_cm))
ve = ve*1.e-5
return ve
|
escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s.
|
def _RemoveForwardedIps(self, forwarded_ips, interface):
"""Remove the forwarded IP addresses from the network interface.
Args:
forwarded_ips: list, the forwarded IP address strings to delete.
interface: string, the output device to use.
"""
for address in forwarded_ips:
self.ip_forwarding_utils.RemoveForwardedIp(address, interface)
|
Remove the forwarded IP addresses from the network interface.
Args:
forwarded_ips: list, the forwarded IP address strings to delete.
interface: string, the output device to use.
|
def restart_container(self, ip):
"""重启容器
重启指定IP的容器。
Args:
- ip: 容器ip
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/containers/{1}/restart'.format(self.host, ip)
return self.__post(url)
|
重启容器
重启指定IP的容器。
Args:
- ip: 容器ip
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回空dict{},失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
|
def single(C, namespace=None):
"""An element maker with a single namespace that uses that namespace as the default"""
if namespace is None:
B = C()._
else:
B = C(default=namespace, _=namespace)._
return B
|
An element maker with a single namespace that uses that namespace as the default
|
def get_thermostat_state_by_name(self, name):
"""Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
"""
self._validate_thermostat_state_name(name)
return next((state for state in self.thermostat_states
if state.name.lower() == name.lower()), None)
|
Retrieves a thermostat state object by its assigned name
:param name: The name of the thermostat state
:return: The thermostat state object
|
def clean_username(self):
"""
Ensure the username doesn't exist or contain invalid chars.
We limit it to slugifiable chars since it's used as the slug
for the user's profile view.
"""
username = self.cleaned_data.get("username")
if username.lower() != slugify(username).lower():
raise forms.ValidationError(
ugettext("Username can only contain letters, numbers, dashes "
"or underscores."))
lookup = {"username__iexact": username}
try:
User.objects.exclude(id=self.instance.id).get(**lookup)
except User.DoesNotExist:
return username
raise forms.ValidationError(
ugettext("This username is already registered"))
|
Ensure the username doesn't exist or contain invalid chars.
We limit it to slugifiable chars since it's used as the slug
for the user's profile view.
|
def oem_init(self):
"""Initialize the command object for OEM capabilities
A number of capabilities are either totally OEM defined or
else augmented somehow by knowledge of the OEM. This
method does an interrogation to identify the OEM.
"""
if self._oemknown:
return
self._oem, self._oemknown = get_oem_handler(self._get_device_id(),
self)
|
Initialize the command object for OEM capabilities
A number of capabilities are either totally OEM defined or
else augmented somehow by knowledge of the OEM. This
method does an interrogation to identify the OEM.
|
def device_measurement(device,
ts=None,
part=None,
result=None,
code=None,
**kwargs):
"""Returns a JSON MeasurementPayload ready to be send through a
transport.
If `ts` is not given, the current time is used. `part` is an
optional `Part` object, and `result` and `code` are the respective
fields of the `Measurement` object. All other arguments are
interpreted as dimensions.
Minimal example, using a `Device` object to send two
measurements:
>>> d = Device("12345")
>>> def publish(msg):
... pass
>>> publish(d.measurement(temperature=22.8))
>>> publish(d.measurement(pressure=4.1))
"""
if ts is None:
ts = local_now()
payload = MeasurementPayload(device=device, part=part)
m = Measurement(ts, result, code, list(kwargs))
payload.measurements.append(m)
m.add_sample(ts, **kwargs)
return dumps(payload)
|
Returns a JSON MeasurementPayload ready to be send through a
transport.
If `ts` is not given, the current time is used. `part` is an
optional `Part` object, and `result` and `code` are the respective
fields of the `Measurement` object. All other arguments are
interpreted as dimensions.
Minimal example, using a `Device` object to send two
measurements:
>>> d = Device("12345")
>>> def publish(msg):
... pass
>>> publish(d.measurement(temperature=22.8))
>>> publish(d.measurement(pressure=4.1))
|
def IndexedDB_requestData(self, securityOrigin, databaseName,
objectStoreName, indexName, skipCount, pageSize, **kwargs):
"""
Function path: IndexedDB.requestData
Domain: IndexedDB
Method name: requestData
Parameters:
Required arguments:
'securityOrigin' (type: string) -> Security origin.
'databaseName' (type: string) -> Database name.
'objectStoreName' (type: string) -> Object store name.
'indexName' (type: string) -> Index name, empty string for object store data requests.
'skipCount' (type: integer) -> Number of records to skip.
'pageSize' (type: integer) -> Number of records to fetch.
Optional arguments:
'keyRange' (type: KeyRange) -> Key range.
Returns:
'objectStoreDataEntries' (type: array) -> Array of object store data entries.
'hasMore' (type: boolean) -> If true, there are more entries to fetch in the given range.
Description: Requests data from object store or index.
"""
assert isinstance(securityOrigin, (str,)
), "Argument 'securityOrigin' must be of type '['str']'. Received type: '%s'" % type(
securityOrigin)
assert isinstance(databaseName, (str,)
), "Argument 'databaseName' must be of type '['str']'. Received type: '%s'" % type(
databaseName)
assert isinstance(objectStoreName, (str,)
), "Argument 'objectStoreName' must be of type '['str']'. Received type: '%s'" % type(
objectStoreName)
assert isinstance(indexName, (str,)
), "Argument 'indexName' must be of type '['str']'. Received type: '%s'" % type(
indexName)
assert isinstance(skipCount, (int,)
), "Argument 'skipCount' must be of type '['int']'. Received type: '%s'" % type(
skipCount)
assert isinstance(pageSize, (int,)
), "Argument 'pageSize' must be of type '['int']'. Received type: '%s'" % type(
pageSize)
expected = ['keyRange']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['keyRange']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('IndexedDB.requestData',
securityOrigin=securityOrigin, databaseName=databaseName,
objectStoreName=objectStoreName, indexName=indexName, skipCount=
skipCount, pageSize=pageSize, **kwargs)
return subdom_funcs
|
Function path: IndexedDB.requestData
Domain: IndexedDB
Method name: requestData
Parameters:
Required arguments:
'securityOrigin' (type: string) -> Security origin.
'databaseName' (type: string) -> Database name.
'objectStoreName' (type: string) -> Object store name.
'indexName' (type: string) -> Index name, empty string for object store data requests.
'skipCount' (type: integer) -> Number of records to skip.
'pageSize' (type: integer) -> Number of records to fetch.
Optional arguments:
'keyRange' (type: KeyRange) -> Key range.
Returns:
'objectStoreDataEntries' (type: array) -> Array of object store data entries.
'hasMore' (type: boolean) -> If true, there are more entries to fetch in the given range.
Description: Requests data from object store or index.
|
def getInput():
"""Read the input buffer without blocking the system."""
input = ''
if sys.platform == 'win32':
import msvcrt
if msvcrt.kbhit(): # Check for a keyboard hit.
input += msvcrt.getch()
print_(input)
else:
time.sleep(.1)
else: # Other platforms
# Posix will work with sys.stdin or sys.stdin.fileno()
# Mac needs the file descriptor.
# This solution does not work for windows since select
# expects a socket, and I have no idea how to create a
# socket from standard input.
sock = sys.stdin.fileno()
# select(rlist, wlist, xlist, timeout)
while len(select.select([sock], [], [], 0.1)[0]) > 0:
input += decode(os.read(sock, 4096))
return input
|
Read the input buffer without blocking the system.
|
def register(cls, barset, name=None):
""" Register a new BarSet as a member/attribute of this class.
Returns the new BarSet.
Arguments:
barset : An existing BarSet, or an iterable of strings.
name : New name for the BarSet, also used as the
classes attribute name.
If the `barset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
"""
return cls_register(cls, barset, BarSet, ('wrapper', ), name=name)
|
Register a new BarSet as a member/attribute of this class.
Returns the new BarSet.
Arguments:
barset : An existing BarSet, or an iterable of strings.
name : New name for the BarSet, also used as the
classes attribute name.
If the `barset` object has not `name` attribute,
this argument is required. It must not be empty
when given.
|
def get_and_update(cls, id, **kwargs):
"""Returns an updated instance of the service's model class.
Args:
model: the model to update
**kwargs: update parameters
"""
model = cls.get(id)
for k, v in cls._preprocess_params(kwargs).items():
setattr(model, k, v)
cls.session.commit()
return model
|
Returns an updated instance of the service's model class.
Args:
model: the model to update
**kwargs: update parameters
|
def reload(self):
'''
Clear plugin manager state and reload plugins.
This method will make use of :meth:`clear` and :meth:`load_plugin`,
so all internal state will be cleared, and all plugins defined in
:data:`self.app.config['plugin_modules']` will be loaded.
'''
self.clear()
for plugin in self.app.config.get('plugin_modules', ()):
self.load_plugin(plugin)
|
Clear plugin manager state and reload plugins.
This method will make use of :meth:`clear` and :meth:`load_plugin`,
so all internal state will be cleared, and all plugins defined in
:data:`self.app.config['plugin_modules']` will be loaded.
|
def init_runner(self, parser, tracers, projinfo):
''' initial some instances for preparing to run test case
@note: should not override
@param parser: instance of TestCaseParser
@param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2}
@param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"]
yaml case like:
- project:
name: xxx
module: xxxx
dict case like:
{"project": {"name": xxx, "module": xxxx}}
'''
self.parser = parser
self.tracers = tracers
self.proj_info = projinfo
|
initial some instances for preparing to run test case
@note: should not override
@param parser: instance of TestCaseParser
@param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2}
@param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"]
yaml case like:
- project:
name: xxx
module: xxxx
dict case like:
{"project": {"name": xxx, "module": xxxx}}
|
def _fully_random_weights(n_features, lam_scale, prng):
"""Generate a symmetric random matrix with zeros along the diagonal."""
weights = np.zeros((n_features, n_features))
n_off_diag = int((n_features ** 2 - n_features) / 2)
weights[np.triu_indices(n_features, k=1)] = 0.1 * lam_scale * prng.randn(
n_off_diag
) + (0.25 * lam_scale)
weights[weights < 0] = 0
weights = weights + weights.T
return weights
|
Generate a symmetric random matrix with zeros along the diagonal.
|
def ConnectionUpdate(self, settings):
'''Update settings on a connection.
settings is a String String Variant Map Map. See
https://developer.gnome.org/NetworkManager/0.9/spec.html
#type-String_String_Variant_Map_Map
'''
connection_path = self.connection_path
NM = dbusmock.get_object(MANAGER_OBJ)
settings_obj = dbusmock.get_object(SETTINGS_OBJ)
main_connections = settings_obj.ListConnections()
if connection_path not in main_connections:
raise dbus.exceptions.DBusException(
'Connection %s does not exist' % connection_path,
name=MANAGER_IFACE + '.DoesNotExist',)
# Take care not to overwrite the secrets
for setting_name in settings:
setting = settings[setting_name]
for k in setting:
if setting_name not in self.settings:
self.settings[setting_name] = {}
self.settings[setting_name][k] = setting[k]
self.EmitSignal(CSETTINGS_IFACE, 'Updated', '', [])
auto_connect = False
if 'autoconnect' in settings['connection']:
auto_connect = settings['connection']['autoconnect']
if auto_connect:
dev = None
devices = NM.GetDevices()
# Grab the first device.
if len(devices) > 0:
dev = devices[0]
if dev:
activate_connection(NM, connection_path, dev, connection_path)
return connection_path
|
Update settings on a connection.
settings is a String String Variant Map Map. See
https://developer.gnome.org/NetworkManager/0.9/spec.html
#type-String_String_Variant_Map_Map
|
def _check_lods(parts, tumor_thresh, normal_thresh, indexes):
"""Ensure likelihoods for tumor and normal pass thresholds.
Skipped if no FreeBayes GL annotations available.
"""
try:
gl_index = parts[8].split(":").index("GL")
except ValueError:
return True
try:
tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."]
if tumor_gls:
tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls)))
else:
tumor_lod = -1.0
# No GL information, no tumor call (so fail it)
except IndexError:
tumor_lod = -1.0
try:
normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."]
if normal_gls:
normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls)))
else:
normal_lod = normal_thresh
# No GL inofmration, no normal call (so pass it)
except IndexError:
normal_lod = normal_thresh
return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
|
Ensure likelihoods for tumor and normal pass thresholds.
Skipped if no FreeBayes GL annotations available.
|
def p_case_clause(self, p):
"""case_clause : CASE expr COLON source_elements"""
p[0] = self.asttypes.Case(expr=p[2], elements=p[4])
p[0].setpos(p)
|
case_clause : CASE expr COLON source_elements
|
def from_key(cls, *args):
"""
Return flyweight object with specified key, if it has already been created.
Returns:
cls or None: Previously constructed flyweight object with given
key or None if key not found
"""
key = args if len(args) > 1 else args[0]
return cls._instances.get(key, None)
|
Return flyweight object with specified key, if it has already been created.
Returns:
cls or None: Previously constructed flyweight object with given
key or None if key not found
|
def add_component(self, kind, **kwargs):
"""
Add a new component (star or orbit) to the system. If not provided,
'component' (the name of the new star or orbit) will be created for
you and can be accessed by the 'component' attribute of the returned
ParameterSet.
>>> b.add_component(component.star)
or
>>> b.add_component('orbit', period=2.5)
Available kinds include:
* :func:`phoebe.parameters.component.star`
* :func:`phoebe.parameters.component.orbit`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.component` module
(ie. 'star', 'orbit')
:type kind: str or callable
:parameter str component: (optional) name of the newly-created
component
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
"""
func = _get_add_func(component, kind)
if kwargs.get('component', False) is None:
# then we want to apply the default below, so let's pop for now
_ = kwargs.pop('component')
kwargs.setdefault('component',
self._default_label(func.func_name,
**{'context': 'component',
'kind': func.func_name}))
if kwargs.pop('check_label', True):
self._check_label(kwargs['component'])
params, constraints = func(**kwargs)
metawargs = {'context': 'component',
'component': kwargs['component'],
'kind': func.func_name}
self._attach_params(params, **metawargs)
redo_kwargs = deepcopy(kwargs)
redo_kwargs['func'] = func.func_name
self._add_history(redo_func='add_component',
redo_kwargs=redo_kwargs,
undo_func='remove_component',
undo_kwargs={'component': kwargs['component']})
for constraint in constraints:
self.add_constraint(*constraint)
# since we've already processed (so that we can get the new qualifiers),
# we'll only raise a warning
self._kwargs_checks(kwargs, warning_only=True)
# return params
return self.get_component(**metawargs)
|
Add a new component (star or orbit) to the system. If not provided,
'component' (the name of the new star or orbit) will be created for
you and can be accessed by the 'component' attribute of the returned
ParameterSet.
>>> b.add_component(component.star)
or
>>> b.add_component('orbit', period=2.5)
Available kinds include:
* :func:`phoebe.parameters.component.star`
* :func:`phoebe.parameters.component.orbit`
:parameter kind: function to call that returns a
ParameterSet or list of parameters. This must either be
a callable function that accepts nothing but default
values, or the name of a function (as a string) that can
be found in the :mod:`phoebe.parameters.component` module
(ie. 'star', 'orbit')
:type kind: str or callable
:parameter str component: (optional) name of the newly-created
component
:parameter **kwargs: default values for any of the newly-created
parameters
:return: :class:`phoebe.parameters.parameters.ParameterSet` of
all parameters that have been added
:raises NotImplementedError: if required constraint is not implemented
|
def set_timezone(self, timezone: str):
""" sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
"""
data = {"timezoneId": timezone}
return self._restCall("home/setTimezone", body=json.dumps(data))
|
sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
|
def get_mysql_cfg():
"""
Get the appropriate MySQL configuration
"""
environment = get_project_configuration()['environment']
cfg = get_database_configuration()
if environment == 'production':
mysql = cfg['mysql_online']
else:
mysql = cfg['mysql_dev']
return mysql
|
Get the appropriate MySQL configuration
|
def save(self, obj):
"""Add ``obj`` to the SQLAlchemy session and commit the changes back to
the database.
:param obj: SQLAlchemy object being saved
:returns: The saved object
"""
session = self.get_db_session()
session.add(obj)
session.commit()
return obj
|
Add ``obj`` to the SQLAlchemy session and commit the changes back to
the database.
:param obj: SQLAlchemy object being saved
:returns: The saved object
|
def _convert_pooling_param(param):
"""Convert the pooling layer parameter
"""
param_string = "pooling_convention='full', "
if param.global_pooling:
param_string += "global_pool=True, kernel=(1,1)"
else:
param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % (
param.pad, param.pad, param.kernel_size, param.kernel_size,
param.stride, param.stride)
if param.pool == 0:
param_string += ", pool_type='max'"
elif param.pool == 1:
param_string += ", pool_type='avg'"
else:
raise ValueError("Unknown Pooling Method!")
return param_string
|
Convert the pooling layer parameter
|
def parseConfig(opt):
"""Parse configuration
:params opt: dict-like object with config and messages keys
:returns: restarter, path
"""
places = ctllib.Places(config=opt['config'], messages=opt['messages'])
restarter = functools.partial(ctllib.restart, places)
path = filepath.FilePath(opt['config'])
return restarter, path
|
Parse configuration
:params opt: dict-like object with config and messages keys
:returns: restarter, path
|
def init_class(self, class_, step_func=None):
"""
This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE).
"""
if self.is_class_initialized(class_):
l.debug("Class %r already initialized.", class_)
return
l.debug("Initialize class %r.", class_)
self.initialized_classes.add(class_)
if not class_.is_loaded:
l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_)
return
clinit_method = resolve_method(self.state, '<clinit>', class_.name,
include_superclasses=False, init_class=False)
if clinit_method.is_loaded:
javavm_simos = self.state.project.simos
clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0),
base_state=self.state,
ret_addr=SootAddressTerminator())
simgr = self.state.project.factory.simgr(clinit_state)
l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method)
simgr.run(step_func=step_func)
l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method)
# The only thing that can be updated during initialization are
# static or rather global information, which are either stored on
# the heap or in the vm_static_table
self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy()
self.state.memory.heap = simgr.deadended[-1].memory.heap.copy()
else:
l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.")
|
This method simulates the loading of a class by the JVM, during which
parts of the class (e.g. static fields) are initialized. For this, we
run the class initializer method <clinit> (if available) and update
the state accordingly.
Note: Initialization is skipped, if the class has already been
initialized (or if it's not loaded in CLE).
|
def printStatistics(completion, concordance, tpedSamples, oldSamples, prefix):
"""Print the statistics in a file.
:param completion: the completion of each duplicated samples.
:param concordance: the concordance of each duplicated samples.
:param tpedSamples: the updated position of the samples in the tped
containing only duplicated samples.
:param oldSamples: the original duplicated sample positions.
:param prefix: the prefix of all the files.
:type completion: :py:class:`numpy.array`
:type concordance: dict
:type tpedSamples: dict
:type oldSamples: dict
:type prefix: str
:returns: the completion for each duplicated samples, as a
:py:class:`numpy.array`.
Prints the statistics (completion of each samples and pairwise concordance
between duplicated samples) in a file (``prefix.summary``).
"""
# Compute the completion percentage on none zero values
none_zero_indexes = np.where(completion[1] != 0)
completionPercentage = np.zeros(len(completion[0]), dtype=float)
completionPercentage[none_zero_indexes] = np.true_divide(
completion[0, none_zero_indexes],
completion[1, none_zero_indexes],
)
# The output file containing the summary statistics (for each of the
# duplicated samples, print the mean concordance and the completion).
outputFile = None
try:
outputFile = open(prefix + ".summary", "w")
except IOError:
msg = "%(prefix)s.summary: can't write file" % locals()
raise ProgramError(msg)
print >>outputFile, "\t".join(["origIndex", "dupIndex", "famID", "indID",
"% completion", "completion",
"mean concordance"])
for sampleID, indexes in tpedSamples.iteritems():
for i, index in enumerate(indexes):
# The indexes
toPrint = [str(oldSamples[sampleID][i]+1), str(index+1)]
# The samples
toPrint.extend(list(sampleID))
# The completion
toPrint.append("%.8f" % completionPercentage[index])
toPrint.append("%d/%d" % (completion[0][index],
completion[1][index]))
# The concordance (not on total values = 0)
indexToKeep = list(set(range(len(indexes))) - set([i]))
values = np.ravel(
np.asarray(concordance[sampleID][0][i, indexToKeep])
)
total_values = np.ravel(
np.asarray(concordance[sampleID][1][i, indexToKeep])
)
currConcordance = np.zeros(len(indexToKeep), dtype=float)
none_zero_indexes = np.where(total_values != 0)
currConcordance[none_zero_indexes] = np.true_divide(
values[none_zero_indexes],
total_values[none_zero_indexes],
)
currConcordance = np.mean(currConcordance)
toPrint.append("%.8f" % currConcordance)
print >>outputFile, "\t".join(toPrint)
# Closing the output file
outputFile.close()
return completionPercentage
|
Print the statistics in a file.
:param completion: the completion of each duplicated samples.
:param concordance: the concordance of each duplicated samples.
:param tpedSamples: the updated position of the samples in the tped
containing only duplicated samples.
:param oldSamples: the original duplicated sample positions.
:param prefix: the prefix of all the files.
:type completion: :py:class:`numpy.array`
:type concordance: dict
:type tpedSamples: dict
:type oldSamples: dict
:type prefix: str
:returns: the completion for each duplicated samples, as a
:py:class:`numpy.array`.
Prints the statistics (completion of each samples and pairwise concordance
between duplicated samples) in a file (``prefix.summary``).
|
def set_color(index, color):
"""Convert a hex color to a text color sequence."""
if OS == "Darwin" and index < 20:
return "\033]P%1x%s\033\\" % (index, color.strip("#"))
return "\033]4;%s;%s\033\\" % (index, color)
|
Convert a hex color to a text color sequence.
|
def fetch_token(self, **kwargs):
"""Fetch a new token using the supplied code.
:param str code: A previously obtained auth code.
"""
if 'client_secret' not in kwargs:
kwargs.update(client_secret=self.client_secret)
return self.session.fetch_token(token_url, **kwargs)
|
Fetch a new token using the supplied code.
:param str code: A previously obtained auth code.
|
def get_trend(self):
"""
Get the trend for the last two metric values using the interval defined in the metric
:return: a tuple with the metric value for the last interval and the
trend percentage between the last two intervals
"""
""" """
# TODO: We just need the last two periods, not the full ts
ts = self.get_ts()
last = ts['value'][len(ts['value']) - 1]
prev = ts['value'][len(ts['value']) - 2]
trend = last - prev
trend_percentage = None
if last == 0:
if prev > 0:
trend_percentage = -100
else:
trend_percentage = 0
else:
trend_percentage = int((trend / last) * 100)
return (last, trend_percentage)
|
Get the trend for the last two metric values using the interval defined in the metric
:return: a tuple with the metric value for the last interval and the
trend percentage between the last two intervals
|
def commit(self, message, parent_commits=None, head=True, author=None,
committer=None, author_date=None, commit_date=None,
skip_hooks=False):
"""Commit the current default index file, creating a commit object.
For more information on the arguments, see tree.commit.
:note: If you have manually altered the .entries member of this instance,
don't forget to write() your changes to disk beforehand.
Passing skip_hooks=True is the equivalent of using `-n`
or `--no-verify` on the command line.
:return: Commit object representing the new commit"""
if not skip_hooks:
run_commit_hook('pre-commit', self)
self._write_commit_editmsg(message)
run_commit_hook('commit-msg', self, self._commit_editmsg_filepath())
message = self._read_commit_editmsg()
self._remove_commit_editmsg()
tree = self.write_tree()
rval = Commit.create_from_tree(self.repo, tree, message, parent_commits,
head, author=author, committer=committer,
author_date=author_date, commit_date=commit_date)
if not skip_hooks:
run_commit_hook('post-commit', self)
return rval
|
Commit the current default index file, creating a commit object.
For more information on the arguments, see tree.commit.
:note: If you have manually altered the .entries member of this instance,
don't forget to write() your changes to disk beforehand.
Passing skip_hooks=True is the equivalent of using `-n`
or `--no-verify` on the command line.
:return: Commit object representing the new commit
|
def _is_valid_channel(self, channel,
conda_url='https://conda.anaconda.org'):
"""Callback for is_valid_channel."""
if channel.startswith('https://') or channel.startswith('http://'):
url = channel
else:
url = "{0}/{1}".format(conda_url, channel)
if url[-1] == '/':
url = url[:-1]
plat = self._conda_api.get_platform()
repodata_url = "{0}/{1}/{2}".format(url, plat, 'repodata.json')
try:
r = requests.head(repodata_url, proxies=self.proxy_servers)
value = r.status_code in [200]
except Exception as error:
logger.error(str(error))
value = False
return value
|
Callback for is_valid_channel.
|
def inject(self):
"""
Recursively inject aXe into all iframes and the top level document.
:param script_url: location of the axe-core script.
:type script_url: string
"""
with open(self.script_url, "r", encoding="utf8") as f:
self.selenium.execute_script(f.read())
|
Recursively inject aXe into all iframes and the top level document.
:param script_url: location of the axe-core script.
:type script_url: string
|
def _mute(self):
""" mute vlc """
if self.muted:
self._sendCommand("volume {}\n".format(self.actual_volume))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC unmuted: {0} ({1}%)'.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume)))
else:
if self.actual_volume == -1:
self._get_volume()
self._sendCommand("volume 0\n")
if logger.isEnabledFor(logging.DEBUG):
logger.debug('VLC muted: 0 (0%)')
|
mute vlc
|
def _clear(self):
"""
Helper that clears the composition.
"""
draw = ImageDraw.Draw(self._background_image)
draw.rectangle(self._device.bounding_box,
fill="black")
del draw
|
Helper that clears the composition.
|
def remove_image_info_cb(self, gshell, channel, iminfo):
"""Delete entries related to deleted image."""
chname = channel.name
if chname not in self.name_dict:
return
fileDict = self.name_dict[chname]
name = iminfo.name
if name not in fileDict:
return
del fileDict[name]
self.logger.debug('{0} removed from ChangeHistory'.format(name))
if not self.gui_up:
return False
self.clear_selected_history()
self.recreate_toc()
|
Delete entries related to deleted image.
|
def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): # noqa: E501
"""replace_namespaced_stateful_set_scale # noqa: E501
replace scale of the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
else:
(data) = self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) # noqa: E501
return data
|
replace_namespaced_stateful_set_scale # noqa: E501
replace scale of the specified StatefulSet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1Scale
If the method is called asynchronously,
returns the request thread.
|
def Trans(self, stateFrom, *condAndNextState):
"""
:param stateFrom: apply when FSM is in this state
:param condAndNextState: tupes (condition, newState),
last does not to have condition
:attention: transitions has priority, first has the biggest
:attention: if stateFrom is None it is evaluated as default
"""
top = []
last = True
for cAndS in reversed(condAndNextState):
if last is True:
last = False
# if this is last trans. it does not have to condition
try:
condition, newvalue = cAndS
except TypeError:
top = self.stateReg(cAndS)
continue
top = []
else:
condition, newvalue = cAndS
# building decision tree
top = \
If(condition,
self.stateReg(newvalue)
).Else(
top
)
if stateFrom is None:
return Switch.Default(self, top)
else:
return Switch.Case(self, stateFrom, top)
|
:param stateFrom: apply when FSM is in this state
:param condAndNextState: tupes (condition, newState),
last does not to have condition
:attention: transitions has priority, first has the biggest
:attention: if stateFrom is None it is evaluated as default
|
def retrieve(cls, *args, **kwargs):
"""Return parent method."""
return super(Subscription, cls).retrieve(*args, **kwargs)
|
Return parent method.
|
def get_path(self):
"""
Calculate item's path in configuration tree.
Use this sparingly -- path is calculated by going up the configuration tree.
For a large number of items, it is more efficient to use iterators that return paths
as keys.
Path value is stable only once the configuration tree is completely initialised.
"""
if self.section:
return self.section.get_path() + (self.name,)
else:
return self.name,
|
Calculate item's path in configuration tree.
Use this sparingly -- path is calculated by going up the configuration tree.
For a large number of items, it is more efficient to use iterators that return paths
as keys.
Path value is stable only once the configuration tree is completely initialised.
|
def _rotate(img, angle):
'''
angle [DEG]
'''
s = img.shape
if angle == 0:
return img
else:
M = cv2.getRotationMatrix2D((s[1] // 2,
s[0] // 2), angle, 1)
return cv2.warpAffine(img, M, (s[1], s[0]))
|
angle [DEG]
|
def back_slash_to_front_converter(string):
"""
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
"""
try:
if not string or not isinstance(string, str):
return string
return string.replace('\\', '/')
except Exception:
return string
|
Replacing all \ in the str to /
:param string: single string to modify
:type string: str
|
def imap(self, coords):
"""Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
"""
for tr in self.transforms:
coords = tr.imap(coords)
return coords
|
Inverse map coordinates
Parameters
----------
coords : array-like
Coordinates to inverse map.
Returns
-------
coords : ndarray
Coordinates.
|
def exceptions(self):
"""A dict of dates -> [Period time tuples] representing exceptions
to the base recurrence pattern."""
ex = {}
for sd in self.root.xpath('exceptions/exception'):
bits = str(sd.text).split(' ')
date = text_to_date(bits.pop(0))
ex.setdefault(date, []).extend([
_time_text_to_period(t)
for t in bits
])
return ex
|
A dict of dates -> [Period time tuples] representing exceptions
to the base recurrence pattern.
|
def is_stable(self,species):
'''
This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013.
'''
element_name_of_iso = species.split('-')[0]
try:
a_of_iso = int(species.split('-')[1])
except ValueError: # if the species name contains in addition to the
# mass number some letters, e.g. for isomere, then
# we assume it is unstable. This is not correct but
# related to the fact that in nugridse.py we do not
# identify species properly by the three numbers A, Z
# and isomeric_state. We should do that!!!!!!
a_of_iso = 999
idp_of_element_in_stable_names = self.stable_names.index(element_name_of_iso)
if a_of_iso in self.stable_el[idp_of_element_in_stable_names][1:]:
return True
else:
return False
|
This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013.
|
def put_task(self, dp, callback=None):
"""
Same as in :meth:`AsyncPredictorBase.put_task`.
"""
f = Future()
if callback is not None:
f.add_done_callback(callback)
self.input_queue.put((dp, f))
return f
|
Same as in :meth:`AsyncPredictorBase.put_task`.
|
def SegmentProd(a, ids):
"""
Segmented prod op.
"""
func = lambda idxs: reduce(np.multiply, a[idxs])
return seg_map(func, a, ids),
|
Segmented prod op.
|
def avg(self, property):
"""Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
"""
self.__prepare()
return self.sum(property) / self.count()
|
Getting average according to given property
:@param property
:@type property: string
:@return average: int/float
|
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False):
"""
Subclasses may call this to add sources and source_filters arguments.
Args:
group: arparse.ArgumentGroup, the extension argument group
allow_filters: bool, Whether the extension wishes to expose a
source_filters argument.
prefix: str, arguments have to be namespaced.
"""
prefix = prefix or cls.argument_prefix
group.add_argument("--%s-sources" % prefix,
action="store", nargs="+",
dest="%s_sources" % prefix.replace('-', '_'),
help="%s source files to parse" % prefix)
if allow_filters:
group.add_argument("--%s-source-filters" % prefix,
action="store", nargs="+",
dest="%s_source_filters" % prefix.replace(
'-', '_'),
help="%s source files to ignore" % prefix)
if add_root_paths:
group.add_argument("--%s-source-roots" % prefix,
action="store", nargs="+",
dest="%s_source_roots" % prefix.replace(
'-', '_'),
help="%s source root directories allowing files "
"to be referenced relatively to those" % prefix)
|
Subclasses may call this to add sources and source_filters arguments.
Args:
group: arparse.ArgumentGroup, the extension argument group
allow_filters: bool, Whether the extension wishes to expose a
source_filters argument.
prefix: str, arguments have to be namespaced.
|
def register_name(self, register_index):
"""Retrives and returns the name of an ARM CPU register.
Args:
self (JLink): the ``JLink`` instance
register_index (int): index of the register whose name to retrieve
Returns:
Name of the register.
"""
result = self._dll.JLINKARM_GetRegisterName(register_index)
return ctypes.cast(result, ctypes.c_char_p).value.decode()
|
Retrives and returns the name of an ARM CPU register.
Args:
self (JLink): the ``JLink`` instance
register_index (int): index of the register whose name to retrieve
Returns:
Name of the register.
|
def output(self, _in, out, **kwargs):
"""Wrap translation in Angular module."""
out.write(
'angular.module("{0}", ["gettext"]).run('
'["gettextCatalog", function (gettextCatalog) {{'.format(
self.catalog_name
)
)
out.write(_in.read())
out.write('}]);')
|
Wrap translation in Angular module.
|
def generateDrawSpecs(self, p):
"""
Calls tickValues() and tickStrings() to determine where and how ticks should
be drawn, then generates from this a set of drawing commands to be
interpreted by drawPicture().
"""
profiler = debug.Profiler()
# bounds = self.boundingRect()
bounds = self.mapRectFromParent(self.geometry())
linkedView = self.linkedView()
if linkedView is None or self.grid is False:
tickBounds = bounds
else:
tickBounds = linkedView.mapRectToItem(self, linkedView.boundingRect())
if self.orientation == 'left':
span = (bounds.topRight(), bounds.bottomRight())
tickStart = tickBounds.right()
tickStop = bounds.right()
tickDir = -1
axis = 0
elif self.orientation == 'right':
span = (bounds.topLeft(), bounds.bottomLeft())
tickStart = tickBounds.left()
tickStop = bounds.left()
tickDir = 1
axis = 0
elif self.orientation == 'top':
span = (bounds.bottomLeft(), bounds.bottomRight())
tickStart = tickBounds.bottom()
tickStop = bounds.bottom()
tickDir = -1
axis = 1
elif self.orientation == 'bottom':
span = (bounds.topLeft(), bounds.topRight())
tickStart = tickBounds.top()
tickStop = bounds.top()
tickDir = 1
axis = 1
# print tickStart, tickStop, span
## determine size of this item in pixels
points = list(map(self.mapToDevice, span))
if None in points:
return
lengthInPixels = Point(points[1] - points[0]).length()
if lengthInPixels == 0:
return
# Determine major / minor / subminor axis ticks
if self._tickLevels is None:
tickLevels = self.tickValues(self.range[0], self.range[1], lengthInPixels)
tickStrings = None
else:
## parse self.tickLevels into the formats returned by tickLevels() and tickStrings()
tickLevels = []
tickStrings = []
for level in self._tickLevels:
values = []
strings = []
tickLevels.append((None, values))
tickStrings.append(strings)
for val, strn in level:
values.append(val)
strings.append(strn)
## determine mapping between tick values and local coordinates
dif = self.range[1] - self.range[0]
if dif == 0:
xScale = 1
offset = 0
else:
if axis == 0:
xScale = -bounds.height() / dif
offset = self.range[0] * xScale - bounds.height()
else:
xScale = bounds.width() / dif
offset = self.range[0] * xScale
xRange = [x * xScale - offset for x in self.range]
xMin = min(xRange)
xMax = max(xRange)
profiler('init')
tickPositions = [] # remembers positions of previously drawn ticks
## compute coordinates to draw ticks
## draw three different intervals, long ticks first
tickSpecs = []
for i in range(len(tickLevels)):
tickPositions.append([])
ticks = tickLevels[i][1]
## length of tick
tickLength = self.style['tickLength'] / ((i * 0.5) + 1.0)
lineAlpha = 255 / (i + 1)
if self.grid is not False:
lineAlpha *= self.grid / 255. * np.clip((0.05 * lengthInPixels / (len(ticks) + 1)), 0., 1.)
for v in ticks:
## determine actual position to draw this tick
x = (v * xScale) - offset
if x < xMin or x > xMax: ## last check to make sure no out-of-bounds ticks are drawn
tickPositions[i].append(None)
continue
tickPositions[i].append(x)
p1 = [x, x]
p2 = [x, x]
p1[axis] = tickStart
p2[axis] = tickStop
if self.grid is False:
p2[axis] += tickLength * tickDir
tickPen = self.pen()
color = tickPen.color()
color.setAlpha(lineAlpha)
tickPen.setColor(color)
tickSpecs.append((tickPen, Point(p1), Point(p2)))
profiler('compute ticks')
if self.style['stopAxisAtTick'][0] is True:
stop = max(span[0].y(), min(map(min, tickPositions)))
if axis == 0:
span[0].setY(stop)
else:
span[0].setX(stop)
if self.style['stopAxisAtTick'][1] is True:
stop = min(span[1].y(), max(map(max, tickPositions)))
if axis == 0:
span[1].setY(stop)
else:
span[1].setX(stop)
axisSpec = (self.pen(), span[0], span[1])
textOffset = self.style['tickTextOffset'][axis] ## spacing between axis and text
# if self.style['autoExpandTextSpace'] is True:
# textWidth = self.textWidth
# textHeight = self.textHeight
# else:
# textWidth = self.style['tickTextWidth'] ## space allocated for horizontal text
# textHeight = self.style['tickTextHeight'] ## space allocated for horizontal text
textSize2 = 0
textRects = []
textSpecs = [] ## list of draw
# If values are hidden, return early
if not self.style['showValues']:
return (axisSpec, tickSpecs, textSpecs)
for i in range(min(len(tickLevels), self.style['maxTextLevel'] + 1)):
## Get the list of strings to display for this level
if tickStrings is None:
spacing, values = tickLevels[i]
strings = self.tickStrings(values, self.autoSIPrefixScale * self.scale, spacing)
else:
strings = tickStrings[i]
if len(strings) == 0:
continue
## ignore strings belonging to ticks that were previously ignored
for j in range(len(strings)):
if tickPositions[i][j] is None:
strings[j] = None
## Measure density of text; decide whether to draw this level
rects = []
for s in strings:
if s is None:
rects.append(None)
else:
br = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, asUnicode(s))
## boundingRect is usually just a bit too large
## (but this probably depends on per-font metrics?)
br.setHeight(br.height() * 1.4)
rects.append(br)
textRects.append(rects[-1])
if len(textRects) > 0:
## measure all text, make sure there's enough room
if axis == 0:
textSize = np.sum([r.height() for r in textRects])
textSize2 = np.max([r.width() for r in textRects])
else:
textSize = np.sum([r.width() for r in textRects])
textSize2 = np.max([r.height() for r in textRects])
else:
textSize = 0
textSize2 = 0
if i > 0: ## always draw top level
## If the strings are too crowded, stop drawing text now.
## We use three different crowding limits based on the number
## of texts drawn so far.
textFillRatio = float(textSize) / lengthInPixels
finished = False
for nTexts, limit in self.style['textFillLimits']:
if len(textSpecs) >= nTexts and textFillRatio >= limit:
finished = True
break
if finished:
break
# spacing, values = tickLevels[best]
# strings = self.tickStrings(values, self.scale, spacing)
# Determine exactly where tick text should be drawn
for j in range(len(strings)):
vstr = strings[j]
if vstr is None: ## this tick was ignored because it is out of bounds
continue
vstr = asUnicode(vstr)
x = tickPositions[i][j]
# textRect = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, vstr)
textRect = rects[j]
height = textRect.height()
width = textRect.width()
# self.textHeight = height
offset = max(0, self.style['tickLength']) + textOffset
if self.orientation == 'left':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop - offset - width, x - (height / 2), width, height)
elif self.orientation == 'right':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
rect = QtCore.QRectF(tickStop + offset, x - (height / 2), width, height)
elif self.orientation == 'top':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignBottom
rect = QtCore.QRectF(x - width / 2., tickStop - offset - height, width, height)
elif self.orientation == 'bottom':
textFlags = QtCore.Qt.TextDontClip | QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop
rect = QtCore.QRectF(x - width / 2., tickStop + offset, width, height)
# p.setPen(self.pen())
# p.drawText(rect, textFlags, vstr)
textSpecs.append((rect, textFlags, vstr))
profiler('compute text')
## update max text size if needed.
self._updateMaxTextSize(textSize2)
return (axisSpec, tickSpecs, textSpecs)
|
Calls tickValues() and tickStrings() to determine where and how ticks should
be drawn, then generates from this a set of drawing commands to be
interpreted by drawPicture().
|
def attach_tcp_service(cls, tcp_service: TCPService):
""" Attaches a service for hosting
:param tcp_service: A TCPService instance
"""
if cls._services['_tcp_service'] is None:
cls._services['_tcp_service'] = tcp_service
cls._set_bus(tcp_service)
else:
warnings.warn('TCP service is already attached')
|
Attaches a service for hosting
:param tcp_service: A TCPService instance
|
def destroy(self):
"""
Delete the page. May delete the whole document if it's actually the
last page.
"""
logger.info("Destroying page: %s" % self)
if self.doc.nb_pages <= 1:
self.doc.destroy()
return
doc_pages = self.doc.pages[:]
current_doc_nb_pages = self.doc.nb_pages
paths = [
self.__get_box_path(),
self.__get_img_path(),
self._get_thumb_path(),
]
for path in paths:
if self.fs.exists(path):
self.fs.unlink(path)
for page_nb in range(self.page_nb + 1, current_doc_nb_pages):
page = doc_pages[page_nb]
page.change_index(offset=-1)
|
Delete the page. May delete the whole document if it's actually the
last page.
|
def paste(region, img, left, above, right, down):
"""
将扣的图粘贴到制定图片上
当你粘贴矩形选区的时候必须保证尺寸一致。此外,矩形选区不能在图像外。然而你不必保证矩形选区和原图的颜色模式一致,
因为矩形选区会被自动转换颜色,遗憾的是,只能扣矩形图。
:param region: 扣出的图
:param img: 指定图片
:param left: 左
:param above: 上
:param right: 右
:param down: 下
:return: 被修改过的图片对象,还在内存中,未保存。
"""
region = region.transpose(Image.ROTATE_180)
box = (left, above, right, down)
img.paste(region, box)
return img
|
将扣的图粘贴到制定图片上
当你粘贴矩形选区的时候必须保证尺寸一致。此外,矩形选区不能在图像外。然而你不必保证矩形选区和原图的颜色模式一致,
因为矩形选区会被自动转换颜色,遗憾的是,只能扣矩形图。
:param region: 扣出的图
:param img: 指定图片
:param left: 左
:param above: 上
:param right: 右
:param down: 下
:return: 被修改过的图片对象,还在内存中,未保存。
|
def matches(self, properties):
"""
Tests if the given criterion matches this LDAP criterion
:param properties: A dictionary of properties
:return: True if the properties matches this criterion, else False
"""
try:
# Use the comparator
return self.comparator(self.value, properties[self.name])
except KeyError:
# Criterion key is not in the properties
return False
|
Tests if the given criterion matches this LDAP criterion
:param properties: A dictionary of properties
:return: True if the properties matches this criterion, else False
|
def price_dataframe(symbols='sp5002012',
start=datetime.datetime(2008, 1, 1),
end=datetime.datetime(2009, 12, 31),
price_type='actual_close',
cleaner=clean_dataframe,
):
"""Retrieve the prices of a list of equities as a DataFrame (columns = symbols)
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"]
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
Yahoo data stops at 2013/1/1
"""
if isinstance(price_type, basestring):
price_type = [price_type]
start = util.normalize_date(start or datetime.date(2008, 1, 1))
end = util.normalize_date(end or datetime.date(2009, 12, 31))
symbols = normalize_symbols(symbols)
t = du.getNYSEdays(start, end, datetime.timedelta(hours=16))
df = clean_dataframes(dataobj.get_data(t, symbols, price_type))
if not df or len(df) > 1:
return cleaner(df)
else:
return cleaner(df[0])
|
Retrieve the prices of a list of equities as a DataFrame (columns = symbols)
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"]
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
Yahoo data stops at 2013/1/1
|
def validate(self):
"""Returns whether this plugin does what it claims to have done"""
try:
response = self.client.get_access_key_last_used(
AccessKeyId=self.access_key_id
)
username = response['UserName']
access_keys = self.client.list_access_keys(
UserName=username
)
for key in access_keys['AccessKeyMetadata']:
if \
(key['AccessKeyId'] == self.access_key_id)\
and (key['Status'] == 'Inactive'):
return True
return False
except Exception as e:
logger.info(
"Failed to validate key disable for "
"key {id} due to: {e}.".format(
e=e, id=self.access_key_id
)
)
return False
|
Returns whether this plugin does what it claims to have done
|
def from_file(cls, filename, sr=22050):
""" Loads an audiofile, uses sr=22050 by default. """
y, sr = librosa.load(filename, sr=sr)
return cls(y, sr)
|
Loads an audiofile, uses sr=22050 by default.
|
def urlencode(self):
"""
Convert dictionary into a query string; keys are
assumed to always be str
"""
output = ('%s=%s' % (k, quote(v)) for k, v in self.items())
return '&'.join(output)
|
Convert dictionary into a query string; keys are
assumed to always be str
|
def _validate_frequency(cls, index, freq, **kwargs):
"""
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
"""
if is_period_dtype(cls):
# Frequency validation is not meaningful for Period Array/Index
return None
inferred = index.inferred_freq
if index.size == 0 or inferred == freq.freqstr:
return None
try:
on_freq = cls._generate_range(start=index[0], end=None,
periods=len(index), freq=freq,
**kwargs)
if not np.array_equal(index.asi8, on_freq.asi8):
raise ValueError
except ValueError as e:
if "non-fixed" in str(e):
# non-fixed frequencies are not meaningful for timedelta64;
# we retain that error message
raise e
# GH#11587 the main way this is reached is if the `np.array_equal`
# check above is False. This can also be reached if index[0]
# is `NaT`, in which case the call to `cls._generate_range` will
# raise a ValueError, which we re-raise with a more targeted
# message.
raise ValueError('Inferred frequency {infer} from passed values '
'does not conform to passed frequency {passed}'
.format(infer=inferred, passed=freq.freqstr))
|
Validate that a frequency is compatible with the values of a given
Datetime Array/Index or Timedelta Array/Index
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
The index on which to determine if the given frequency is valid
freq : DateOffset
The frequency to validate
|
def add(self, scheduler, max_iteration, bigdl_type="float"):
"""
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
"""
return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
|
Add a learning rate scheduler to the contained `schedules`
:param scheduler: learning rate scheduler to be add
:param max_iteration: iteration numbers this scheduler will run
|
def create_route(self, uri, sub_service):
"""Create the route for the URI.
:param uri: string - URI to be routed
:param sub_service: boolean - is the URI for a sub-service
:returns: n/a
"""
if uri not in self.routes.keys():
logger.debug('Service ({0}): Creating routes'
.format(self.name))
self.routes[uri] = {
'regex': StackInABoxService.get_service_regex(self.base_url,
uri,
sub_service),
'uri': uri,
'handlers': StackInABoxServiceRouter(self.name,
uri,
None,
self)
}
|
Create the route for the URI.
:param uri: string - URI to be routed
:param sub_service: boolean - is the URI for a sub-service
:returns: n/a
|
def add_nodes(self, nodes, nesting=1):
"""
Adds edges indicating the call-tree for the procedures listed in
the nodes.
"""
hopNodes = set() # nodes in this hop
hopEdges = [] # edges in this hop
# get nodes and edges for this hop
for i, n in zip(range(len(nodes)), nodes):
r, g, b = rainbowcolour(i, len(nodes))
colour = '#%02X%02X%02X' % (r, g, b)
for p in n.calls:
if p not in hopNodes:
hopNodes.add(p)
hopEdges.append((n, p, 'solid', colour))
for p in getattr(n, 'interfaces', []):
if p not in hopNodes:
hopNodes.add(p)
hopEdges.append((n, p, 'dashed', colour))
# add nodes, edges and attributes to the graph if maximum number of
# nodes is not exceeded
if self.add_to_graph(hopNodes, hopEdges, nesting):
self.dot.attr('graph', size='11.875,1000.0')
self.dot.attr('graph', concentrate='false')
|
Adds edges indicating the call-tree for the procedures listed in
the nodes.
|
def get_deposit_address(self, currency):
"""Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
"""
data = {
'currency': currency
}
return self._get('deposit-addresses', True, data=data)
|
Get deposit address for a currency
https://docs.kucoin.com/#get-deposit-address
:param currency: Name of currency
:type currency: string
.. code:: python
address = client.get_deposit_address('NEO')
:returns: ApiResponse
.. code:: python
{
"address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",
"memo": "5c247c8a03aa677cea2a251d"
}
:raises: KucoinResponseException, KucoinAPIException
|
def _recalculate_extents_and_offsets(self, index, logical_block_size):
# type: (int, int) -> Tuple[int, int]
'''
Internal method to recalculate the extents and offsets associated with
children of this directory record.
Parameters:
index - The index at which to start the recalculation.
logical_block_size - The block size to use for comparisons.
Returns:
A tuple where the first element is the total number of extents required
by the children and where the second element is the offset into the
last extent currently being used.
'''
if index == 0:
dirrecord_offset = 0
num_extents = 1
else:
dirrecord_offset = self.children[index - 1].offset_to_here
num_extents = self.children[index - 1].extents_to_here
for i in range(index, len(self.children)):
c = self.children[i]
dirrecord_len = c.dr_len
if (dirrecord_offset + dirrecord_len) > logical_block_size:
num_extents += 1
dirrecord_offset = 0
dirrecord_offset += dirrecord_len
c.extents_to_here = num_extents
c.offset_to_here = dirrecord_offset
c.index_in_parent = i
return num_extents, dirrecord_offset
|
Internal method to recalculate the extents and offsets associated with
children of this directory record.
Parameters:
index - The index at which to start the recalculation.
logical_block_size - The block size to use for comparisons.
Returns:
A tuple where the first element is the total number of extents required
by the children and where the second element is the offset into the
last extent currently being used.
|
def _build_basemap(self):
'''
Creates the map according to the input configuration
'''
if self.config['min_lon'] >= self.config['max_lon']:
raise ValueError('Upper limit of long is smaller than lower limit')
if self.config['min_lon'] >= self.config['max_lon']:
raise ValueError('Upper limit of long is smaller than lower limit')
# Corners of the map
lowcrnrlat = self.config['min_lat']
lowcrnrlon = self.config['min_lon']
uppcrnrlat = self.config['max_lat']
uppcrnrlon = self.config['max_lon']
if 'resolution' not in self.config.keys():
self.config['resolution'] = 'l'
lat0 = lowcrnrlat + ((uppcrnrlat - lowcrnrlat) / 2)
lon0 = lowcrnrlon + ((uppcrnrlon - lowcrnrlon) / 2)
if (uppcrnrlat - lowcrnrlat) >= (uppcrnrlon - lowcrnrlon):
fig_aspect = PORTRAIT_ASPECT
else:
fig_aspect = LANDSCAPE_ASPECT
if self.ax is None:
self.fig, self.ax = plt.subplots(figsize=fig_aspect,
facecolor='w',
edgecolor='k')
else:
self.fig = self.ax.get_figure()
if self.title:
self.ax.set_title(self.title, fontsize=16)
parallels = np.arange(-90., 90., self.lat_lon_spacing)
meridians = np.arange(0., 360., self.lat_lon_spacing)
# Build Map
# Do not import Basemap at top level since it's an optional feature
# and it would break doctests
from mpl_toolkits.basemap import Basemap
self.m = Basemap(
llcrnrlon=lowcrnrlon, llcrnrlat=lowcrnrlat,
urcrnrlon=uppcrnrlon, urcrnrlat=uppcrnrlat,
projection='stere', resolution=self.config['resolution'],
area_thresh=1000.0, lat_0=lat0, lon_0=lon0, ax=self.ax)
self.m.drawcountries()
self.m.drawmapboundary()
self.m.drawcoastlines()
self.m.drawstates()
self.m.drawparallels(parallels, labels=[1, 0, 0, 0], fontsize=12)
self.m.drawmeridians(meridians, labels=[0, 0, 0, 1], fontsize=12)
self.m.fillcontinents(color='wheat')
|
Creates the map according to the input configuration
|
def augpath(path, augsuf='', augext='', augpref='', augdir=None, newext=None,
newfname=None, ensure=False, prefix=None, suffix=None):
"""
augments end of path before the extension.
augpath
Args:
path (str):
augsuf (str): augment filename before extension
Returns:
str: newpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug'
>>> newpath = augpath(path, augsuf)
>>> result = str(newpath)
>>> print(result)
somefile_aug.txt
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug2'
>>> newext = '.bak'
>>> augdir = 'backup'
>>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir)
>>> result = str(newpath)
>>> print(result)
backup/somefile_aug2.bak
"""
if prefix is not None:
augpref = prefix
if suffix is not None:
augsuf = suffix
# Breakup path
dpath, fname = split(path)
fname_noext, ext = splitext(fname)
if newfname is not None:
fname_noext = newfname
# Augment ext
if newext is None:
newext = ext
# Augment fname
new_fname = ''.join((augpref, fname_noext, augsuf, newext, augext))
# Augment dpath
if augdir is not None:
new_dpath = join(dpath, augdir)
if ensure:
# create new dir if needebe
ensuredir(new_dpath)
else:
new_dpath = dpath
# Recombine into new path
newpath = join(new_dpath, new_fname)
return newpath
|
augments end of path before the extension.
augpath
Args:
path (str):
augsuf (str): augment filename before extension
Returns:
str: newpath
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug'
>>> newpath = augpath(path, augsuf)
>>> result = str(newpath)
>>> print(result)
somefile_aug.txt
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_path import * # NOQA
>>> path = 'somefile.txt'
>>> augsuf = '_aug2'
>>> newext = '.bak'
>>> augdir = 'backup'
>>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir)
>>> result = str(newpath)
>>> print(result)
backup/somefile_aug2.bak
|
def get_publisher():
"""get_publisher"""
log.info("initializing publisher")
pub = None
auth_url = ""
if FORWARD_ENDPOINT_TYPE == "redis":
auth_url = FORWARD_BROKER_URL
else:
auth_url = FORWARD_BROKER_URL
pub = Publisher(name="{}_{}".format(SOURCE, "-redis"),
auth_url=auth_url,
ssl_options=FORWARD_SSL_OPTIONS)
log.info("publisher={}".format(pub))
return pub
|
get_publisher
|
def versions(self):
""" Read versions from the table
The versions are kept in cache for the next reads.
"""
if self._versions is None:
with self.database.cursor_autocommit() as cursor:
query = """
SELECT number,
date_start,
date_done,
log,
addons
FROM {}
""".format(self.table_name)
cursor.execute(query)
rows = cursor.fetchall()
versions = []
for row in rows:
row = list(row)
# convert 'addons' to json
row[4] = json.loads(row[4]) if row[4] else []
versions.append(
self.VersionRecord(*row)
)
self._versions = versions
return self._versions
|
Read versions from the table
The versions are kept in cache for the next reads.
|
def adjacency2graph(adjacency, edge_type=None, adjust=1, **kwargs):
"""Takes an adjacency list, dict, or matrix and returns a graph.
The purpose of this function is take an adjacency list (or matrix)
and return a :class:`.QueueNetworkDiGraph` that can be used with a
:class:`.QueueNetwork` instance. The Graph returned has the
``edge_type`` edge property set for each edge. Note that the graph may
be altered.
Parameters
----------
adjacency : dict or :class:`~numpy.ndarray`
An adjacency list as either a dict, or an adjacency matrix.
adjust : int ``{1, 2}`` (optional, default: 1)
Specifies what to do when the graph has terminal vertices
(nodes with no out-edges). Note that if ``adjust`` is not 2
then it is assumed to be 1. There are two choices:
* ``adjust = 1``: A loop is added to each terminal node in the
graph, and their ``edge_type`` of that loop is set to 0.
* ``adjust = 2``: All edges leading to terminal nodes have
their ``edge_type`` set to 0.
**kwargs :
Unused.
Returns
-------
out : :any:`networkx.DiGraph`
A directed graph with the ``edge_type`` edge property.
Raises
------
TypeError
Is raised if ``adjacency`` is not a dict or
:class:`~numpy.ndarray`.
Examples
--------
If terminal nodes are such that all in-edges have edge type ``0``
then nothing is changed. However, if a node is a terminal node then
a loop is added with edge type 0.
>>> import queueing_tool as qt
>>> adj = {
... 0: {1: {}},
... 1: {2: {},
... 3: {}},
... 3: {0: {}}}
>>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}}
>>> # A loop will be added to vertex 2
>>> g = qt.adjacency2graph(adj, edge_type=eTy)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),
(2, {2: {'edge_type': 0}}),
(3, {0: {'edge_type': 1}})]
You can use a dict of lists to represent the adjacency list.
>>> adj = {0 : [1], 1: [2, 3], 3: [0]}
>>> g = qt.adjacency2graph(adj, edge_type=eTy)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),
(2, {2: {'edge_type': 0}}),
(3, {0: {'edge_type': 1}})]
Alternatively, you could have this function adjust the edges that
lead to terminal vertices by changing their edge type to 0:
>>> # The graph is unaltered
>>> g = qt.adjacency2graph(adj, edge_type=eTy, adjust=2)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}),
(2, {}),
(3, {0: {'edge_type': 1}})]
"""
if isinstance(adjacency, np.ndarray):
adjacency = _matrix2dict(adjacency)
elif isinstance(adjacency, dict):
adjacency = _dict2dict(adjacency)
else:
msg = ("If the adjacency parameter is supplied it must be a "
"dict, or a numpy.ndarray.")
raise TypeError(msg)
if edge_type is None:
edge_type = {}
else:
if isinstance(edge_type, np.ndarray):
edge_type = _matrix2dict(edge_type, etype=True)
elif isinstance(edge_type, dict):
edge_type = _dict2dict(edge_type)
for u, ty in edge_type.items():
for v, et in ty.items():
adjacency[u][v]['edge_type'] = et
g = nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())
adjacency = nx.to_dict_of_dicts(g)
adjacency = _adjacency_adjust(adjacency, adjust, True)
return nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())
|
Takes an adjacency list, dict, or matrix and returns a graph.
The purpose of this function is take an adjacency list (or matrix)
and return a :class:`.QueueNetworkDiGraph` that can be used with a
:class:`.QueueNetwork` instance. The Graph returned has the
``edge_type`` edge property set for each edge. Note that the graph may
be altered.
Parameters
----------
adjacency : dict or :class:`~numpy.ndarray`
An adjacency list as either a dict, or an adjacency matrix.
adjust : int ``{1, 2}`` (optional, default: 1)
Specifies what to do when the graph has terminal vertices
(nodes with no out-edges). Note that if ``adjust`` is not 2
then it is assumed to be 1. There are two choices:
* ``adjust = 1``: A loop is added to each terminal node in the
graph, and their ``edge_type`` of that loop is set to 0.
* ``adjust = 2``: All edges leading to terminal nodes have
their ``edge_type`` set to 0.
**kwargs :
Unused.
Returns
-------
out : :any:`networkx.DiGraph`
A directed graph with the ``edge_type`` edge property.
Raises
------
TypeError
Is raised if ``adjacency`` is not a dict or
:class:`~numpy.ndarray`.
Examples
--------
If terminal nodes are such that all in-edges have edge type ``0``
then nothing is changed. However, if a node is a terminal node then
a loop is added with edge type 0.
>>> import queueing_tool as qt
>>> adj = {
... 0: {1: {}},
... 1: {2: {},
... 3: {}},
... 3: {0: {}}}
>>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}}
>>> # A loop will be added to vertex 2
>>> g = qt.adjacency2graph(adj, edge_type=eTy)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),
(2, {2: {'edge_type': 0}}),
(3, {0: {'edge_type': 1}})]
You can use a dict of lists to represent the adjacency list.
>>> adj = {0 : [1], 1: [2, 3], 3: [0]}
>>> g = qt.adjacency2graph(adj, edge_type=eTy)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}),
(2, {2: {'edge_type': 0}}),
(3, {0: {'edge_type': 1}})]
Alternatively, you could have this function adjust the edges that
lead to terminal vertices by changing their edge type to 0:
>>> # The graph is unaltered
>>> g = qt.adjacency2graph(adj, edge_type=eTy, adjust=2)
>>> ans = qt.graph2dict(g)
>>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE
[(0, {1: {'edge_type': 1}}),
(1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}),
(2, {}),
(3, {0: {'edge_type': 1}})]
|
def index_template(self, tpl):
"""
Indexes a template by `name` into the `name_to_template` dictionary.
:param tpl: The template to index
:type tpl: alignak.objects.item.Item
:return: None
"""
objcls = self.inner_class.my_type
name = getattr(tpl, 'name', '')
if not name:
mesg = "a %s template has been defined without name, from: %s" % \
(objcls, tpl.imported_from)
tpl.add_error(mesg)
elif name in self.name_to_template:
tpl = self.manage_conflict(tpl, name)
self.name_to_template[name] = tpl
logger.debug("Indexed a %s template: %s, uses: %s",
tpl.my_type, name, getattr(tpl, 'use', 'Nothing'))
return tpl
|
Indexes a template by `name` into the `name_to_template` dictionary.
:param tpl: The template to index
:type tpl: alignak.objects.item.Item
:return: None
|
def add_suffix(file_path, suffix='modified', sep='_', ext=None):
"""Adds suffix to a file name seperated by an underscore and returns file path."""
return _add_suffix(file_path, suffix, sep, ext)
|
Adds suffix to a file name seperated by an underscore and returns file path.
|
def tab(self, n=1, interval=0, pre_dl=None, post_dl=None):
"""Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval.
**中文文档**
以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
"""
self.delay(pre_dl)
self.k.tap_key(self.k.tab_key, n, interval)
self.delay(post_dl)
|
Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval.
**中文文档**
以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
|
def paint_cube(self, x, y):
"""
Paints a cube at a certain position a color.
Parameters
----------
x: int
Horizontal position of the upper left corner of the cube.
y: int
Vertical position of the upper left corner of the cube.
"""
# get the color
color = self.next_color()
# calculate the position
cube_pos = [x, y, x + self.cube_size, y + self.cube_size]
# draw the cube
draw = ImageDraw.Draw(im=self.image)
draw.rectangle(xy=cube_pos, fill=color)
|
Paints a cube at a certain position a color.
Parameters
----------
x: int
Horizontal position of the upper left corner of the cube.
y: int
Vertical position of the upper left corner of the cube.
|
def get_schema_validator(self, schema_name):
"""
Had to remove the id property from map.json or it uses URLs for validation
See various issues at https://github.com/Julian/jsonschema/pull/306
"""
if schema_name not in self.schemas:
schema_file = self.get_schema_file(schema_name)
with open(schema_file) as f:
try:
jsn_schema = json.load(f)
except ValueError as ex:
log.error("Could not load %s", schema_file)
raise ex
schemas_folder = self.get_schemas_folder()
root_schema_path = self.get_schema_path(schemas_folder)
resolver = jsonschema.RefResolver(root_schema_path, None)
# cache the schema for future use
self.schemas[schema_name] = (jsn_schema, resolver)
else:
jsn_schema, resolver = self.schemas[schema_name]
validator = jsonschema.Draft4Validator(schema=jsn_schema, resolver=resolver)
# validator.check_schema(jsn_schema) # check schema is valid
return validator
|
Had to remove the id property from map.json or it uses URLs for validation
See various issues at https://github.com/Julian/jsonschema/pull/306
|
def turn_left():
"""turns RedBot to the Left"""
motors.left_motor(150) # spin CCW
motors.right_motor(150) # spin CCW
board.sleep(0.5)
motors.brake();
board.sleep(0.1)
|
turns RedBot to the Left
|
def _get_leader_for_partition(self, topic, partition):
"""
Returns the leader for a partition or None if the partition exists
but has no leader.
Raises:
UnknownTopicOrPartitionError: If the topic or partition is not part
of the metadata.
LeaderNotAvailableError: If the server has metadata, but there is no
current leader.
"""
key = TopicPartition(topic, partition)
# Use cached metadata if it is there
if self.topics_to_brokers.get(key) is not None:
return self.topics_to_brokers[key]
# Otherwise refresh metadata
# If topic does not already exist, this will raise
# UnknownTopicOrPartitionError if not auto-creating
# LeaderNotAvailableError otherwise until partitions are created
self.load_metadata_for_topics(topic)
# If the partition doesn't actually exist, raise
if partition not in self.topic_partitions.get(topic, []):
raise UnknownTopicOrPartitionError(key)
# If there's no leader for the partition, raise
leader = self.topic_partitions[topic][partition]
if leader == -1:
raise LeaderNotAvailableError((topic, partition))
# Otherwise return the BrokerMetadata
return self.brokers[leader]
|
Returns the leader for a partition or None if the partition exists
but has no leader.
Raises:
UnknownTopicOrPartitionError: If the topic or partition is not part
of the metadata.
LeaderNotAvailableError: If the server has metadata, but there is no
current leader.
|
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None):
"""Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
"""
if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]):
# Needed info is constant, so we construct in numpy
if num_lower < 0:
num_lower = rows - 1
if num_upper < 0:
num_upper = cols - 1
lower_mask = np.tri(cols, rows, num_lower).T
upper_mask = np.tri(rows, cols, num_upper)
band = np.ones((rows, cols)) * lower_mask * upper_mask
if out_shape:
band = band.reshape(out_shape)
band = tf.constant(band, tf.float32)
else:
band = tf.matrix_band_part(
tf.ones([rows, cols]), tf.cast(num_lower, tf.int64),
tf.cast(num_upper, tf.int64))
if out_shape:
band = tf.reshape(band, out_shape)
return band
|
Matrix band part of ones.
Args:
rows: int determining number of rows in output
cols: int
num_lower: int, maximum distance backward. Negative values indicate
unlimited.
num_upper: int, maximum distance forward. Negative values indicate
unlimited.
out_shape: shape to reshape output by.
Returns:
Tensor of size rows * cols reshaped into shape out_shape.
|
def handle_lock_expired(
payment_state: InitiatorPaymentState,
state_change: ReceiveLockExpired,
channelidentifiers_to_channels: ChannelMap,
block_number: BlockNumber,
) -> TransitionResult[InitiatorPaymentState]:
"""Initiator also needs to handle LockExpired messages when refund transfers are involved.
A -> B -> C
- A sends locked transfer to B
- B attempted to forward to C but has not enough capacity
- B sends a refund transfer with the same secrethash back to A
- When the lock expires B will also send a LockExpired message to A
- A needs to be able to properly process it
Related issue: https://github.com/raiden-network/raiden/issues/3183
"""
initiator_state = payment_state.initiator_transfers.get(state_change.secrethash)
if not initiator_state:
return TransitionResult(payment_state, list())
channel_identifier = initiator_state.channel_identifier
channel_state = channelidentifiers_to_channels.get(channel_identifier)
if not channel_state:
return TransitionResult(payment_state, list())
secrethash = initiator_state.transfer.lock.secrethash
result = channel.handle_receive_lock_expired(
channel_state=channel_state,
state_change=state_change,
block_number=block_number,
)
assert result.new_state, 'handle_receive_lock_expired should not delete the task'
if not channel.get_lock(result.new_state.partner_state, secrethash):
transfer = initiator_state.transfer
unlock_failed = EventUnlockClaimFailed(
identifier=transfer.payment_identifier,
secrethash=transfer.lock.secrethash,
reason='Lock expired',
)
result.events.append(unlock_failed)
return TransitionResult(payment_state, result.events)
|
Initiator also needs to handle LockExpired messages when refund transfers are involved.
A -> B -> C
- A sends locked transfer to B
- B attempted to forward to C but has not enough capacity
- B sends a refund transfer with the same secrethash back to A
- When the lock expires B will also send a LockExpired message to A
- A needs to be able to properly process it
Related issue: https://github.com/raiden-network/raiden/issues/3183
|
def dp004(self, value=None):
""" Corresponds to IDD Field `dp004`
Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `dp004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dp004`'.format(value))
self._dp004 = value
|
Corresponds to IDD Field `dp004`
Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `dp004`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
def update_cloud_integration(self, id, **kwargs): # noqa: E501
"""Update a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\", \"externalId\":\"wave123\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_cloud_integration_with_http_info(id, **kwargs) # noqa: E501
return data
|
Update a specific cloud integration # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_cloud_integration(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::<accountid>:role/<rolename>\", \"externalId\":\"wave123\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre>
:return: ResponseContainerCloudIntegration
If the method is called asynchronously,
returns the request thread.
|
def set_sticker_position_in_set(self, sticker, position):
"""
Use this method to move a sticker in a set created by the bot to a specific position . Returns True on success.
https://core.telegram.org/bots/api#setstickerpositioninset
Parameters:
:param sticker: File identifier of the sticker
:type sticker: str|unicode
:param position: New sticker position in the set, zero-based
:type position: int
Returns:
:return: Returns True on success
:rtype: bool
"""
assert_type_or_raise(sticker, unicode_type, parameter_name="sticker")
assert_type_or_raise(position, int, parameter_name="position")
result = self.do("setStickerPositionInSet", sticker=sticker, position=position)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
try:
return from_array_list(bool, result, list_level=0, is_builtin=True)
except TgApiParseException:
logger.debug("Failed parsing as primitive bool", exc_info=True)
# end try
# no valid parsing so far
raise TgApiParseException("Could not parse result.") # See debug log for details!
# end if return_python_objects
return result
|
Use this method to move a sticker in a set created by the bot to a specific position . Returns True on success.
https://core.telegram.org/bots/api#setstickerpositioninset
Parameters:
:param sticker: File identifier of the sticker
:type sticker: str|unicode
:param position: New sticker position in the set, zero-based
:type position: int
Returns:
:return: Returns True on success
:rtype: bool
|
def tidy(args):
"""
%prog tidy agpfile componentfasta
Given an agp file, run through the following steps:
1. Trim components with dangling N's
2. Merge adjacent gaps
3. Trim gaps at the end of an object
4. Reindex the agp
Final output is in `.tidy.agp`.
"""
p = OptionParser(tidy.__doc__)
p.add_option("--nogaps", default=False, action="store_true",
help="Remove all gap lines [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
agpfile, componentfasta = args
originalagpfile = agpfile
# Step 1: Trim terminal Ns
tmpfasta = "tmp.fasta"
trimmed_agpfile = build([agpfile, componentfasta, tmpfasta,
"--newagp", "--novalidate"])
os.remove(tmpfasta)
agpfile = trimmed_agpfile
agpfile = reindex([agpfile, "--inplace"])
# Step 2: Merge adjacent gaps
merged_agpfile = gaps([agpfile, "--merge"])
os.remove(agpfile)
# Step 3: Trim gaps at the end of object
agpfile = merged_agpfile
agp = AGP(agpfile)
newagpfile = agpfile.replace(".agp", ".fixed.agp")
fw = open(newagpfile, "w")
for object, a in groupby(agp, key=lambda x: x.object):
a = list(a)
if a[0].is_gap:
g, a = a[0], a[1:]
logging.debug("Trim beginning Ns({0}) of {1}".\
format(g.gap_length, object))
if a and a[-1].is_gap:
a, g = a[:-1], a[-1]
logging.debug("Trim trailing Ns({0}) of {1}".\
format(g.gap_length, object))
print("\n".join(str(x) for x in a), file=fw)
fw.close()
os.remove(agpfile)
# Step 4: Final reindex
agpfile = newagpfile
reindex_opts = [agpfile, "--inplace"]
if opts.nogaps:
reindex_opts += ["--nogaps"]
agpfile = reindex(reindex_opts)
tidyagpfile = originalagpfile.replace(".agp", ".tidy.agp")
shutil.move(agpfile, tidyagpfile)
logging.debug("File written to `{0}`.".format(tidyagpfile))
return tidyagpfile
|
%prog tidy agpfile componentfasta
Given an agp file, run through the following steps:
1. Trim components with dangling N's
2. Merge adjacent gaps
3. Trim gaps at the end of an object
4. Reindex the agp
Final output is in `.tidy.agp`.
|
def parse_python_version(output):
"""Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
"""
version_line = output.split("\n", 1)[0]
version_pattern = re.compile(
r"""
^ # Beginning of line.
Python # Literally "Python".
\s # Space.
(?P<major>\d+) # Major = one or more digits.
\. # Dot.
(?P<minor>\d+) # Minor = one or more digits.
(?: # Unnamed group for dot-micro.
\. # Dot.
(?P<micro>\d+) # Micro = one or more digit.
)? # Micro is optional because pypa/pipenv#1893.
.* # Trailing garbage.
$ # End of line.
""",
re.VERBOSE,
)
match = version_pattern.match(version_line)
if not match:
return None
return match.groupdict(default="0")
|
Parse a Python version output returned by `python --version`.
Return a dict with three keys: major, minor, and micro. Each value is a
string containing a version part.
Note: The micro part would be `'0'` if it's missing from the input string.
|
def get_lines(data_nts, prtfmt=None, nt_fields=None, **kws):
"""Print list of namedtuples into a table using prtfmt."""
lines = []
# optional keyword args: prt_if sort_by
if prtfmt is None:
prtfmt = mk_fmtfld(data_nts[0], kws.get('joinchr', ' '), kws.get('eol', '\n'))
# if nt_fields arg is None, use fields from prtfmt string.
if nt_fields is not None:
_chk_flds_fmt(nt_fields, prtfmt)
if 'sort_by' in kws:
data_nts = sorted(data_nts, key=kws['sort_by'])
prt_if = kws.get('prt_if', None)
for data_nt in data_nts:
if prt_if is None or prt_if(data_nt):
lines.append(prtfmt.format(**data_nt._asdict()))
return lines
|
Print list of namedtuples into a table using prtfmt.
|
def encode_fetch_request(cls, client_id, correlation_id, payloads=None,
max_wait_time=100, min_bytes=4096):
"""
Encodes some FetchRequest structs
:param bytes client_id:
:param int correlation_id:
:param list payloads: list of :class:`FetchRequest`
:param int max_wait_time: how long to block waiting on min_bytes of data
:param int min_bytes:
the minimum number of bytes to accumulate before returning the
response
"""
payloads = [] if payloads is None else payloads
grouped_payloads = group_by_topic_and_partition(payloads)
message = cls._encode_message_header(client_id, correlation_id,
KafkaCodec.FETCH_KEY)
assert isinstance(max_wait_time, int)
# -1 is the replica id
message += struct.pack('>iiii', -1, max_wait_time, min_bytes,
len(grouped_payloads))
for topic, topic_payloads in grouped_payloads.items():
message += write_short_ascii(topic)
message += struct.pack('>i', len(topic_payloads))
for partition, payload in topic_payloads.items():
message += struct.pack('>iqi', partition, payload.offset,
payload.max_bytes)
return message
|
Encodes some FetchRequest structs
:param bytes client_id:
:param int correlation_id:
:param list payloads: list of :class:`FetchRequest`
:param int max_wait_time: how long to block waiting on min_bytes of data
:param int min_bytes:
the minimum number of bytes to accumulate before returning the
response
|
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
|
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
|
def setupMovie(self):
"""Setup button handler."""
if self.state == self.INIT:
self.sendRtspRequest(self.SETUP)
|
Setup button handler.
|
def get_issuer(self):
"""
Gets the Issuer of the Logout Response Message
:return: The Issuer
:rtype: string
"""
issuer = None
issuer_nodes = self.__query('/samlp:LogoutResponse/saml:Issuer')
if len(issuer_nodes) == 1:
issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0])
return issuer
|
Gets the Issuer of the Logout Response Message
:return: The Issuer
:rtype: string
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.