code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def create_api_integration(restApiId, resourcePath, httpMethod, integrationType, integrationHttpMethod,
uri, credentials, requestParameters=None, requestTemplates=None,
region=None, key=None, keyid=None, profile=None):
'''
Creates an integration for a given method in a given API.
If integrationType is MOCK, uri and credential parameters will be ignored.
uri is in the form of (substitute APIGATEWAY_REGION and LAMBDA_FUNC_ARN)
"arn:aws:apigateway:APIGATEWAY_REGION:lambda:path/2015-03-31/functions/LAMBDA_FUNC_ARN/invocations"
credentials is in the form of an iam role name or role arn.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_integration restApiId resourcePath httpMethod \\
integrationType integrationHttpMethod uri credentials ['{}' ['{}']]
'''
try:
credentials = _get_role_arn(credentials, region=region, key=key, keyid=keyid, profile=profile)
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
requestParameters = dict() if requestParameters is None else requestParameters
requestTemplates = dict() if requestTemplates is None else requestTemplates
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if httpMethod.lower() == 'options':
uri = ""
credentials = ""
integration = conn.put_integration(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod,
type=integrationType, integrationHttpMethod=integrationHttpMethod,
uri=uri, credentials=credentials, requestParameters=requestParameters,
requestTemplates=requestTemplates)
return {'created': True, 'integration': integration}
return {'created': False, 'error': 'no such resource'}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)}
|
Creates an integration for a given method in a given API.
If integrationType is MOCK, uri and credential parameters will be ignored.
uri is in the form of (substitute APIGATEWAY_REGION and LAMBDA_FUNC_ARN)
"arn:aws:apigateway:APIGATEWAY_REGION:lambda:path/2015-03-31/functions/LAMBDA_FUNC_ARN/invocations"
credentials is in the form of an iam role name or role arn.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_integration restApiId resourcePath httpMethod \\
integrationType integrationHttpMethod uri credentials ['{}' ['{}']]
|
def find_all_checks(self, **kwargs):
"""
Finds all checks for this entity with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
checks = self._check_manager.find_all_checks(**kwargs)
for check in checks:
check.set_entity(self)
return checks
|
Finds all checks for this entity with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
|
def setFlag(self, flag, state=True):
"""
Sets whether or not the given flag is enabled or disabled.
:param flag | <XExporter.Flags>
"""
has_flag = self.testFlag(flag)
if has_flag and not state:
self.setFlags(self.flags() ^ flag)
elif not has_flag and state:
self.setFlags(self.flags() | flag)
|
Sets whether or not the given flag is enabled or disabled.
:param flag | <XExporter.Flags>
|
def _get_face2(shape=None, face_r=1.0, smile_r1=0.5, smile_r2=0.7, eye_r=0.2):
"""
Create 2D binar face
:param shape:
:param face_r:
:param smile_r1:
:param smile_r2:
:param eye_r:
:return:
"""
# data3d = np.zeros([1,7,7], dtype=np.int16)
if shape is None:
shape = [32, 32]
center = (np.asarray(shape) - 1) / 2.0
r = np.min(center) * face_r
# np.min(np.asarray(shape) / 2.0)
# shape = data3d.shape[1:]
# data3d[center[0], center[1], center[2]] = 1
x, y = np.meshgrid(range(shape[1]), range(shape[0]))
head = (x - center[0]) ** 2 + (y - center[1]) ** 2 < r ** 2
smile = (
((x - center[0]) ** 2 + (y - center[1]) ** 2 < (r * smile_r2) ** 2)
& (y > (center[1] + 0.3 * r))
& ((x - center[0]) ** 2 + (y - center[1]) ** 2 >= (r * smile_r1) ** 2)
)
smile
e1c = center + r * np.array([-0.35, -0.2])
e2c = center + r * np.array([0.35, -0.2])
eyes = (x - e1c[0]) ** 2 + (y - e1c[1]) ** 2 <= (r * eye_r) ** 2
eyes += (x - e2c[0]) ** 2 + (y - e1c[1]) ** 2 <= (r * eye_r) ** 2
face = head & ~smile & ~eyes
return face
|
Create 2D binar face
:param shape:
:param face_r:
:param smile_r1:
:param smile_r2:
:param eye_r:
:return:
|
def init(self, projectname=None, description=None, **kwargs):
"""
Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created
"""
self.app_main(**kwargs)
experiments = self.config.experiments
experiment = self._experiment
if experiment is None and not experiments:
experiment = self.name + '_exp0'
elif experiment is None:
try:
experiment = utils.get_next_name(self.experiment)
except ValueError:
raise ValueError(
"Could not estimate an experiment id! Please use the "
"experiment argument to provide an id.")
self.experiment = experiment
if self.is_archived(experiment):
raise ValueError(
"The specified experiment has already been archived! Run "
"``%s -id %s unarchive`` first" % (self.name, experiment))
if projectname is None:
projectname = self.projectname
else:
self.projectname = projectname
self.logger.info("Initializing experiment %s of project %s",
experiment, projectname)
exp_dict = experiments.setdefault(experiment, OrderedDict())
if description is not None:
exp_dict['description'] = description
exp_dict['project'] = projectname
exp_dict['expdir'] = exp_dir = osp.join('experiments', experiment)
exp_dir = osp.join(self.config.projects[projectname]['root'], exp_dir)
exp_dict['timestamps'] = OrderedDict()
if not os.path.exists(exp_dir):
self.logger.debug(" Creating experiment directory %s", exp_dir)
os.makedirs(exp_dir)
self.fix_paths(exp_dict)
return exp_dict
|
Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created
|
def problem_id(self, value):
"""The problem_id property.
Args:
value (string). the property value.
"""
if value == self._defaults['problemId'] and 'problemId' in self._values:
del self._values['problemId']
else:
self._values['problemId'] = value
|
The problem_id property.
Args:
value (string). the property value.
|
def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
'''Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
'''
# Get the axes handle
ax, new_axes = __get_axes(ax=ax)
# Make sure we have at least two dimensions
sources = np.atleast_2d(sources)
if labels is None:
labels = ['Source {:d}'.format(_) for _ in range(len(sources))]
kwargs.setdefault('scaling', 'spectrum')
# The cumulative spectrogram across sources
# is used to establish the reference power
# for each individual source
cumspec = None
specs = []
for i, src in enumerate(sources):
freqs, times, spec = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy()
else:
cumspec += spec
ref_max = cumspec.max()
ref_min = ref_max * 1e-6
color_conv = ColorConverter()
for i, spec in enumerate(specs):
# For each source, grab a new color from the cycler
# Then construct a colormap that interpolates from
# [transparent white -> new color]
color = next(ax._get_lines.prop_cycler)['color']
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i],
[(1.0, 1.0, 1.0, 0.0),
color])
ax.pcolormesh(times, freqs, spec,
cmap=cmap,
norm=LogNorm(vmin=ref_min, vmax=ref_max),
shading='gouraud',
label=labels[i])
# Attach a 0x0 rect to the axis with the corresponding label
# This way, it will show up in the legend
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i]))
if new_axes:
ax.axis('tight')
return ax
|
Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
|
def install_package_to_venv(self):
'''
Installs package given as first argument to virtualenv without
dependencies
'''
try:
self.env.install(self.name, force=True, options=["--no-deps"])
except (ve.PackageInstallationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException(
'Failed to install package to virtualenv')
self.dirs_after_install.fill(self.temp_dir + '/venv/')
|
Installs package given as first argument to virtualenv without
dependencies
|
def product(pc, service, attrib, sku):
"""
Get a list of a service's products.
The list will be in the given region, matching the specific terms and
any given attribute filters or a SKU.
"""
pc.service = service.lower()
pc.sku = sku
pc.add_attributes(attribs=attrib)
click.echo("Service Alias: {0}".format(pc.service_alias))
click.echo("URL: {0}".format(pc.service_url))
click.echo("Region: {0}".format(pc.region))
click.echo("Product Terms: {0}".format(pc.terms))
click.echo("Filtering Attributes: {0}".format(pc.attributes))
prods = pyutu.find_products(pc)
for p in prods:
click.echo("Product SKU: {0} product: {1}".format(
p, json.dumps(prods[p], indent=2, sort_keys=True))
)
click.echo("Total Products Found: {0}".format(len(prods)))
click.echo("Time: {0} secs".format(time.process_time()))
|
Get a list of a service's products.
The list will be in the given region, matching the specific terms and
any given attribute filters or a SKU.
|
def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('b')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
return sas.get_token()
|
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
|
def _get_svc_list(service_status):
'''
Returns all service statuses
'''
prefix = '/etc/rc.d/'
ret = set()
lines = glob.glob('{0}*'.format(prefix))
for line in lines:
svc = _get_svc(line, service_status)
if svc is not None:
ret.add(svc)
return sorted(ret)
|
Returns all service statuses
|
def modify_fk_constraint(apps, schema_editor):
"""
Delete's the current foreign key contraint on the outbound field, and adds
it again, but this time with an ON DELETE clause
"""
model = apps.get_model("message_sender", "OutboundSendFailure")
table = model._meta.db_table
with schema_editor.connection.cursor() as cursor:
constraints = schema_editor.connection.introspection.get_constraints(
cursor, table
)
[constraint] = filter(lambda c: c[1]["foreign_key"], constraints.items())
[name, _] = constraint
sql_delete_fk = (
"SET CONSTRAINTS {name} IMMEDIATE; "
"ALTER TABLE {table} DROP CONSTRAINT {name}"
).format(table=schema_editor.quote_name(table), name=schema_editor.quote_name(name))
schema_editor.execute(sql_delete_fk)
field = model.outbound.field
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
sql_create_fk = (
"ALTER TABLE {table} ADD CONSTRAINT {name} FOREIGN KEY "
"({column}) REFERENCES {to_table} ({to_column}) "
"ON DELETE CASCADE {deferrable};"
).format(
table=schema_editor.quote_name(table),
name=schema_editor.quote_name(name),
column=schema_editor.quote_name(field.column),
to_table=schema_editor.quote_name(to_table),
to_column=schema_editor.quote_name(to_column),
deferrable=schema_editor.connection.ops.deferrable_sql(),
)
schema_editor.execute(sql_create_fk)
|
Delete's the current foreign key contraint on the outbound field, and adds
it again, but this time with an ON DELETE clause
|
def inner(a,b):
'''
inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
matrices when encountered. This does not error check for bad dimensionality.
If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
equivalent to as if b were a matrix with 1 column.
'''
if sps.issparse(a): return a.dot(b)
else: a = np.asarray(a)
if len(a.shape) == 0: return a*b
if sps.issparse(b):
if len(a.shape) == 1: return b.T.dot(a)
else: return b.T.dot(a.T).T
else: b = np.asarray(b)
if len(b.shape) == 0: return a*b
if len(a.shape) == 1 and len(b.shape) == 2: return np.dot(b.T, a)
else: return np.dot(a,b)
|
inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
matrices when encountered. This does not error check for bad dimensionality.
If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
equivalent to as if b were a matrix with 1 column.
|
def train_model(params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
cache_directory: str = None,
cache_prefix: str = None) -> Model:
"""
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
Parameters
----------
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
cache_directory : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
cache_prefix : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
Returns
-------
best_model: ``Model``
The model with the best epoch weights.
"""
prepare_environment(params)
create_serialization_dir(params, serialization_dir, recover, force)
stdout_handler = prepare_global_logging(serialization_dir, file_friendly_logging)
cuda_device = params.params.get('trainer').get('cuda_device', -1)
check_for_gpu(cuda_device)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
evaluate_on_test = params.pop_bool("evaluate_on_test", False)
trainer_type = params.get("trainer", {}).get("type", "default")
if trainer_type == "default":
# Special logic to instantiate backward-compatible trainer.
pieces = TrainerPieces.from_params(params, # pylint: disable=no-member
serialization_dir,
recover,
cache_directory,
cache_prefix)
trainer = Trainer.from_params(
model=pieces.model,
serialization_dir=serialization_dir,
iterator=pieces.iterator,
train_data=pieces.train_dataset,
validation_data=pieces.validation_dataset,
params=pieces.params,
validation_iterator=pieces.validation_iterator)
evaluation_iterator = pieces.validation_iterator or pieces.iterator
evaluation_dataset = pieces.test_dataset
else:
trainer = TrainerBase.from_params(params, serialization_dir, recover)
# TODO(joelgrus): handle evaluation in the general case
evaluation_iterator = evaluation_dataset = None
params.assert_empty('base train command')
try:
metrics = trainer.train()
except KeyboardInterrupt:
# if we have completed an epoch, try to create a model archive.
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info("Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights.")
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
# Evaluate
if evaluation_dataset and evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = evaluate(trainer.model, evaluation_dataset, evaluation_iterator,
cuda_device=trainer._cuda_devices[0], # pylint: disable=protected-access,
# TODO(brendanr): Pass in an arg following Joel's trainer refactor.
batch_weight_key="")
for key, value in test_metrics.items():
metrics["test_" + key] = value
elif evaluation_dataset:
logger.info("To evaluate on the test set after training, pass the "
"'evaluate_on_test' flag, or use the 'allennlp evaluate' command.")
cleanup_global_logging(stdout_handler)
# Now tar up results
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
dump_metrics(os.path.join(serialization_dir, "metrics.json"), metrics, log=True)
# We count on the trainer to have the model with best weights
return trainer.model
|
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
Parameters
----------
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
cache_directory : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
cache_prefix : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
Returns
-------
best_model: ``Model``
The model with the best epoch weights.
|
def set_euk_hmm(self, args):
'Set the hmm used by graftM to cross check for euks.'
if hasattr(args, 'euk_hmm_file'):
pass
elif not hasattr(args, 'euk_hmm_file'):
# set to path based on the location of bin/graftM, which has
# a more stable relative path to the HMM when installed through
# pip.
setattr(args, 'euk_hmm_file', os.path.join(os.path.dirname(inspect.stack()[-1][1]),'..','share', '18S.hmm'))
else:
raise Exception('Programming Error: setting the euk HMM')
|
Set the hmm used by graftM to cross check for euks.
|
def send(self, request, headers=None, content=None, **kwargs):
"""Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
"""
# "content" and "headers" are deprecated, only old SDK
if headers:
request.headers.update(headers)
if not request.files and request.data is None and content is not None:
request.add_content(content)
# End of deprecation
response = None
kwargs.setdefault('stream', True)
try:
pipeline_response = self.config.pipeline.run(request, **kwargs)
# There is too much thing that expects this method to return a "requests.Response"
# to break it in a compatible release.
# Also, to be pragmatic in the "sync" world "requests" rules anyway.
# However, attach the Universal HTTP response
# to get the streaming generator.
response = pipeline_response.http_response.internal_response
response._universal_http_response = pipeline_response.http_response
response.context = pipeline_response.context
return response
finally:
self._close_local_session_if_necessary(response, kwargs['stream'])
|
Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
|
def cursor_position_changed(self):
"""Brace matching"""
if self.bracepos is not None:
self.__highlight(self.bracepos, cancel=True)
self.bracepos = None
cursor = self.textCursor()
if cursor.position() == 0:
return
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor)
text = to_text_string(cursor.selectedText())
pos1 = cursor.position()
if text in (')', ']', '}'):
pos2 = self.find_brace_match(pos1, text, forward=False)
elif text in ('(', '[', '{'):
pos2 = self.find_brace_match(pos1, text, forward=True)
else:
return
if pos2 is not None:
self.bracepos = (pos1, pos2)
self.__highlight(self.bracepos, color=self.matched_p_color)
else:
self.bracepos = (pos1,)
self.__highlight(self.bracepos, color=self.unmatched_p_color)
|
Brace matching
|
def convertMzml(mzmlPath, outputDirectory=None):
"""Imports an mzml file and converts it to a MsrunContainer file
:param mzmlPath: path of the mzml file
:param outputDirectory: directory where the MsrunContainer file should be written
if it is not specified, the output directory is set to the mzml files directory.
"""
outputDirectory = outputDirectory if outputDirectory is not None else os.path.dirname(mzmlPath)
msrunContainer = importMzml(mzmlPath)
msrunContainer.setPath(outputDirectory)
msrunContainer.save()
|
Imports an mzml file and converts it to a MsrunContainer file
:param mzmlPath: path of the mzml file
:param outputDirectory: directory where the MsrunContainer file should be written
if it is not specified, the output directory is set to the mzml files directory.
|
def page(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of TriggerInstance records from the API.
Request is executed immediately
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
"""
params = values.of({
'Recurring': recurring,
'TriggerBy': trigger_by,
'UsageCategory': usage_category,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return TriggerPage(self._version, response, self._solution)
|
Retrieve a single page of TriggerInstance records from the API.
Request is executed immediately
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
|
def sort(self, *sorting, **kwargs):
"""Sort resources."""
sorting_ = []
for name, desc in sorting:
field = self.meta.model._meta.fields.get(name)
if field is None:
continue
if desc:
field = field.desc()
sorting_.append(field)
if sorting_:
return self.collection.order_by(*sorting_)
return self.collection
|
Sort resources.
|
def get_destination(self, filepath, targetdir=None):
"""
Return destination path from given source file path.
Destination is allways a file with extension ``.css``.
Args:
filepath (str): A file path. The path is allways relative to
sources directory. If not relative, ``targetdir`` won't be
joined.
absolute (bool): If given will be added at beginning of file
path.
Returns:
str: Destination filepath.
"""
dst = self.change_extension(filepath, 'css')
if targetdir:
dst = os.path.join(targetdir, dst)
return dst
|
Return destination path from given source file path.
Destination is allways a file with extension ``.css``.
Args:
filepath (str): A file path. The path is allways relative to
sources directory. If not relative, ``targetdir`` won't be
joined.
absolute (bool): If given will be added at beginning of file
path.
Returns:
str: Destination filepath.
|
def copy_function(func, name=None):
"""Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name.
"""
code = func.__code__
newname = name or func.__name__
newcode = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
newname,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
newfunc = FunctionType(
newcode,
func.__globals__,
newname,
func.__defaults__,
func.__closure__,
)
newfunc.__dict__.update(func.__dict__)
return newfunc
|
Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name.
|
def _set_system_mode(self, v, load=False):
"""
Setter method for system_mode, mapped from YANG variable /hardware/system_mode (system-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 0}, u'npb': {'value': 1}},), is_leaf=True, yang_name="system-mode", rest_name="system-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set system mode', u'callpoint': u'ha_system_mode_callpoint', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='system-mode-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """system_mode must be of a type compatible with system-mode-type""",
'defined-type': "brocade-hardware:system-mode-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 0}, u'npb': {'value': 1}},), is_leaf=True, yang_name="system-mode", rest_name="system-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set system mode', u'callpoint': u'ha_system_mode_callpoint', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='system-mode-type', is_config=True)""",
})
self.__system_mode = t
if hasattr(self, '_set'):
self._set()
|
Setter method for system_mode, mapped from YANG variable /hardware/system_mode (system-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_mode() directly.
|
def _wpad(l, windowsize, stepsize):
"""
Parameters
l - The length of the input array
windowsize - the size of each window of samples
stepsize - the number of samples to move the window each step
Returns
The length the input array should be so that no samples are leftover
"""
if l <= windowsize:
return windowsize
nsteps = ((l // stepsize) * stepsize)
overlap = (windowsize - stepsize)
if overlap:
return nsteps + overlap
diff = (l - nsteps)
left = max(0, windowsize - diff)
return l + left if diff else l
|
Parameters
l - The length of the input array
windowsize - the size of each window of samples
stepsize - the number of samples to move the window each step
Returns
The length the input array should be so that no samples are leftover
|
def read(self, gpio):
"""
Returns the GPIO level.
gpio:= 0-53.
...
yield from pi.set_mode(23, pigpio.INPUT)
yield from pi.set_pull_up_down(23, pigpio.PUD_DOWN)
print(yield from pi.read(23))
0
yield from pi.set_pull_up_down(23, pigpio.PUD_UP)
print(yield from pi.read(23))
1
...
"""
res = yield from self._pigpio_aio_command(_PI_CMD_READ, gpio, 0)
return _u2i(res)
|
Returns the GPIO level.
gpio:= 0-53.
...
yield from pi.set_mode(23, pigpio.INPUT)
yield from pi.set_pull_up_down(23, pigpio.PUD_DOWN)
print(yield from pi.read(23))
0
yield from pi.set_pull_up_down(23, pigpio.PUD_UP)
print(yield from pi.read(23))
1
...
|
def update_asset_browser(self, project, releasetype):
"""update the assetbrowser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the assets
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
"""
if project is None:
self.assetbrws.set_model(None)
return
assetmodel = self.create_asset_model(project, releasetype)
self.assetbrws.set_model(assetmodel)
|
update the assetbrowser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the assets
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
|
def construct_graph(sakefile, settings):
"""
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
"""
verbose = settings["verbose"]
sprint = settings["sprint"]
G = nx.DiGraph()
sprint("Going to construct Graph", level="verbose")
for target in sakefile:
if target == "all":
# we don't want this node
continue
if "formula" not in sakefile[target]:
# that means this is a meta target
for atomtarget in sakefile[target]:
if atomtarget == "help":
continue
sprint("Adding '{}'".format(atomtarget), level="verbose")
data_dict = sakefile[target][atomtarget]
data_dict["parent"] = target
G.add_node(atomtarget, **data_dict)
else:
sprint("Adding '{}'".format(target), level="verbose")
G.add_node(target, **sakefile[target])
sprint("Nodes are built\nBuilding connections", level="verbose")
for node in G.nodes(data=True):
sprint("checking node {} for dependencies".format(node[0]),
level="verbose")
# normalize all paths in output
for k, v in node[1].items():
if v is None: node[1][k] = []
if "output" in node[1]:
for index, out in enumerate(node[1]['output']):
node[1]['output'][index] = clean_path(node[1]['output'][index])
if "dependencies" not in node[1]:
continue
sprint("it has dependencies", level="verbose")
connects = []
# normalize all paths in dependencies
for index, dep in enumerate(node[1]['dependencies']):
dep = os.path.normpath(dep)
shrt = "dependencies"
node[1]['dependencies'][index] = clean_path(node[1][shrt][index])
for node in G.nodes(data=True):
connects = []
if "dependencies" not in node[1]:
continue
for dep in node[1]['dependencies']:
matches = check_for_dep_in_outputs(dep, verbose, G)
if not matches:
continue
for match in matches:
sprint("Appending {} to matches".format(match), level="verbose")
connects.append(match)
if connects:
for connect in connects:
G.add_edge(connect, node[0])
return G
|
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
|
def report(ctx, board, done, output):
ctx.obj['board_id'] = board
ts = TrelloStats(ctx.obj)
"""
Reporting mode - Daily snapshots of a board for ongoing reporting:
-> trellis report --board=87hiudhw
--spend
--revenue
--done=Done
"""
ct = cycle_time(ts, board, done)
env = get_env()
# Get all render functions from the module and filter out the ones we don't want.
render_functions = [target for target in
dir(sys.modules['trellostats.reports'])
if target.startswith("render_") and
target.endswith(output)]
for render_func in render_functions:
print globals()[render_func](env, **dict(cycle_time=ct))
|
Reporting mode - Daily snapshots of a board for ongoing reporting:
-> trellis report --board=87hiudhw
--spend
--revenue
--done=Done
|
def VerifyRow(self, parser_mediator, row):
"""Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
"""
# Sleuthkit version 3 format:
# MD5|name|inode|mode_as_string|UID|GID|size|atime|mtime|ctime|crtime
# 0|/lost+found|11|d/drwx------|0|0|12288|1337961350|1337961350|1337961350|0
if row['md5'] != '0' and not self._MD5_RE.match(row['md5']):
return False
# Check if the following columns contain a base 10 integer value if set.
for column_name in (
'uid', 'gid', 'size', 'atime', 'mtime', 'ctime', 'crtime'):
column_value = row.get(column_name, None)
if not column_value:
continue
try:
int(column_value, 10)
except (TypeError, ValueError):
return False
return True
|
Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
|
def calculate_r_matrices(fine_states, reduced_matrix_elements, q=None,
numeric=True, convention=1):
ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
⎡ √3⋅r ⎤
⎢ 0 0 ──── 0 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ -√15⋅r √15⋅r ⎥
⎢ 0 ─────── 0 0 0 ───── 0 0 ⎥
⎢ 12 60 ⎥
⎢ ⎥
⎢ -√15⋅r √5⋅r ⎥
⎢ 0 0 ─────── 0 0 0 ──── 0 ⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 0 0 0 0 ───── ⎥
⎢ 20 ⎥
⎢ ⎥
⎢√2⋅r -√6⋅r ⎥
⎢──── 0 0 0 ────── 0 0 0 ⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ r -r ⎥
⎢ 0 ─ 0 0 0 ─── 0 0 ⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢ √3⋅r -r ⎥
⎢ 0 0 ──── 0 0 0 ─── 0 ⎥
⎢ 12 4 ⎥
⎢ ⎥
⎢ -√6⋅r ⎥
⎢ 0 0 0 0 0 0 0 ──────⎥
⎢ 12 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎢ r ⎥
⎢ 0 0 0 ─ 0 0 0 0 ⎥
⎢ 2 ⎥
⎢ ⎥
⎢ √6⋅r ⎥
⎢ 0 0 0 0 ──── 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 0 0 ───── 0 0 ⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √5⋅r ⎥
⎢ 0 0 0 0 0 0 ──── 0 ⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √15⋅r ⎥
⎢ 0 0 0 0 0 0 0 ───── ⎥
⎢ 30 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎣ 0 0 0 0 0 0 0 0 ⎦
>>> pprint(r[1][8:,:8])
⎡ -√3⋅r ⎤
⎢ 0 ────── 0 0 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢√15⋅r -√5⋅r ⎥
⎢───── 0 0 0 ────── 0 0 0 ⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ -√15⋅r ⎥
⎢ 0 0 0 0 0 ─────── 0 0 ⎥
⎢ 30 ⎥
⎢ ⎥
⎢ -√15⋅r -√5⋅r ⎥
⎢ 0 0 ─────── 0 0 0 ────── 0 ⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 0 0 ──── 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ r √3⋅r ⎥
⎢ ─ 0 0 0 ──── 0 0 0 ⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 ──── 0 0 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ r -√3⋅r ⎥
⎢ 0 0 ─ 0 0 0 ────── 0 ⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ -√3⋅r ⎥
⎢ 0 0 0 0 0 0 0 ──────⎥
⎢ 6 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 0 0 ──── 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ √30⋅r ⎥
⎢ 0 0 0 0 ───── 0 0 0 ⎥
⎢ 15 ⎥
⎢ ⎥
⎢ √15⋅r ⎥
⎢ 0 0 0 0 0 ───── 0 0 ⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √30⋅r ⎥
⎢ 0 0 0 0 0 0 ───── 0 ⎥
⎢ 15 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 0 0 0 0 0 0 ──── ⎥
⎢ 6 ⎥
⎢ ⎥
⎣ 0 0 0 0 0 0 0 0 ⎦
>>> pprint(r[2][8:,:8])
⎡√3⋅r ⎤
⎢──── 0 0 0 0 0 0 0⎥
⎢ 6 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 ───── 0 0 0 0⎥
⎢ 20 ⎥
⎢ ⎥
⎢√15⋅r √5⋅r ⎥
⎢───── 0 0 0 ──── 0 0 0⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ √15⋅r √15⋅r ⎥
⎢ 0 ───── 0 0 0 ───── 0 0⎥
⎢ 12 60 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √6⋅r ⎥
⎢ 0 0 0 ──── 0 0 0 0⎥
⎢ 12 ⎥
⎢ ⎥
⎢√3⋅r r ⎥
⎢──── 0 0 0 ─ 0 0 0⎥
⎢ 12 4 ⎥
⎢ ⎥
⎢ r r ⎥
⎢ 0 ─ 0 0 0 ─ 0 0⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢ √2⋅r √6⋅r ⎥
⎢ 0 0 ──── 0 0 0 ──── 0⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √15⋅r ⎥
⎢ 0 0 0 ───── 0 0 0 0⎥
⎢ 30 ⎥
⎢ ⎥
⎢ √5⋅r ⎥
⎢ 0 0 0 0 ──── 0 0 0⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 0 0 ───── 0 0⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √6⋅r ⎥
⎢ 0 0 0 0 0 0 ──── 0⎥
⎢ 6 ⎥
⎢ ⎥
⎢ r⎥
⎢ 0 0 0 0 0 0 0 ─⎥
⎣ 2⎦
"""
magnetic_states = make_list_of_states(fine_states, 'magnetic', verbose=0)
aux = calculate_boundaries(fine_states, magnetic_states)
index_list_fine, index_list_hyperfine = aux
Ne = len(magnetic_states)
r = [[[0 for j in range(Ne)] for i in range(Ne)] for p in range(3)]
II = fine_states[0].i
for p in [-1, 0, 1]:
for i in range(Ne):
ei = magnetic_states[i]
ii = fine_index(i, index_list_fine)
for j in range(Ne):
ej = magnetic_states[j]
jj = fine_index(j, index_list_fine)
reduced_matrix_elementij = reduced_matrix_elements[ii][jj]
if reduced_matrix_elementij != 0:
ji = ei.j; jj = ej.j
fi = ei.f; fj = ej.f
mi = ei.m; mj = ej.m
rpij = matrix_element(ji, fi, mi, jj, fj, mj,
II, reduced_matrix_elementij, p,
numeric=numeric,
convention=convention)
if q == 1:
r[p+1][i][j] = rpij*delta_lesser(i, j)
elif q == -1:
r[p+1][i][j] = rpij*delta_greater(i, j)
else:
r[p+1][i][j] = rpij
if not numeric:
r = [Matrix(ri) for ri in r]
return r
|
ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
⎡ √3⋅r ⎤
⎢ 0 0 ──── 0 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ -√15⋅r √15⋅r ⎥
⎢ 0 ─────── 0 0 0 ───── 0 0 ⎥
⎢ 12 60 ⎥
⎢ ⎥
⎢ -√15⋅r √5⋅r ⎥
⎢ 0 0 ─────── 0 0 0 ──── 0 ⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 0 0 0 0 ───── ⎥
⎢ 20 ⎥
⎢ ⎥
⎢√2⋅r -√6⋅r ⎥
⎢──── 0 0 0 ────── 0 0 0 ⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ r -r ⎥
⎢ 0 ─ 0 0 0 ─── 0 0 ⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢ √3⋅r -r ⎥
⎢ 0 0 ──── 0 0 0 ─── 0 ⎥
⎢ 12 4 ⎥
⎢ ⎥
⎢ -√6⋅r ⎥
⎢ 0 0 0 0 0 0 0 ──────⎥
⎢ 12 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎢ r ⎥
⎢ 0 0 0 ─ 0 0 0 0 ⎥
⎢ 2 ⎥
⎢ ⎥
⎢ √6⋅r ⎥
⎢ 0 0 0 0 ──── 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 0 0 ───── 0 0 ⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √5⋅r ⎥
⎢ 0 0 0 0 0 0 ──── 0 ⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √15⋅r ⎥
⎢ 0 0 0 0 0 0 0 ───── ⎥
⎢ 30 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎣ 0 0 0 0 0 0 0 0 ⎦
>>> pprint(r[1][8:,:8])
⎡ -√3⋅r ⎤
⎢ 0 ────── 0 0 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢√15⋅r -√5⋅r ⎥
⎢───── 0 0 0 ────── 0 0 0 ⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ -√15⋅r ⎥
⎢ 0 0 0 0 0 ─────── 0 0 ⎥
⎢ 30 ⎥
⎢ ⎥
⎢ -√15⋅r -√5⋅r ⎥
⎢ 0 0 ─────── 0 0 0 ────── 0 ⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 0 0 ──── 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ r √3⋅r ⎥
⎢ ─ 0 0 0 ──── 0 0 0 ⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 ──── 0 0 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ r -√3⋅r ⎥
⎢ 0 0 ─ 0 0 0 ────── 0 ⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ -√3⋅r ⎥
⎢ 0 0 0 0 0 0 0 ──────⎥
⎢ 6 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 0 0 ──── 0 0 0 0 ⎥
⎢ 6 ⎥
⎢ ⎥
⎢ √30⋅r ⎥
⎢ 0 0 0 0 ───── 0 0 0 ⎥
⎢ 15 ⎥
⎢ ⎥
⎢ √15⋅r ⎥
⎢ 0 0 0 0 0 ───── 0 0 ⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √30⋅r ⎥
⎢ 0 0 0 0 0 0 ───── 0 ⎥
⎢ 15 ⎥
⎢ ⎥
⎢ √3⋅r ⎥
⎢ 0 0 0 0 0 0 0 ──── ⎥
⎢ 6 ⎥
⎢ ⎥
⎣ 0 0 0 0 0 0 0 0 ⎦
>>> pprint(r[2][8:,:8])
⎡√3⋅r ⎤
⎢──── 0 0 0 0 0 0 0⎥
⎢ 6 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 ───── 0 0 0 0⎥
⎢ 20 ⎥
⎢ ⎥
⎢√15⋅r √5⋅r ⎥
⎢───── 0 0 0 ──── 0 0 0⎥
⎢ 12 20 ⎥
⎢ ⎥
⎢ √15⋅r √15⋅r ⎥
⎢ 0 ───── 0 0 0 ───── 0 0⎥
⎢ 12 60 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √6⋅r ⎥
⎢ 0 0 0 ──── 0 0 0 0⎥
⎢ 12 ⎥
⎢ ⎥
⎢√3⋅r r ⎥
⎢──── 0 0 0 ─ 0 0 0⎥
⎢ 12 4 ⎥
⎢ ⎥
⎢ r r ⎥
⎢ 0 ─ 0 0 0 ─ 0 0⎥
⎢ 4 4 ⎥
⎢ ⎥
⎢ √2⋅r √6⋅r ⎥
⎢ 0 0 ──── 0 0 0 ──── 0⎥
⎢ 4 12 ⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ 0 0 0 0 0 0 0 0⎥
⎢ ⎥
⎢ √15⋅r ⎥
⎢ 0 0 0 ───── 0 0 0 0⎥
⎢ 30 ⎥
⎢ ⎥
⎢ √5⋅r ⎥
⎢ 0 0 0 0 ──── 0 0 0⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √10⋅r ⎥
⎢ 0 0 0 0 0 ───── 0 0⎥
⎢ 10 ⎥
⎢ ⎥
⎢ √6⋅r ⎥
⎢ 0 0 0 0 0 0 ──── 0⎥
⎢ 6 ⎥
⎢ ⎥
⎢ r⎥
⎢ 0 0 0 0 0 0 0 ─⎥
⎣ 2⎦
|
def download_file(save_path, file_url):
""" Download file from http url link """
r = requests.get(file_url) # create HTTP response object
with open(save_path, 'wb') as f:
f.write(r.content)
return save_path
|
Download file from http url link
|
def _on(on_signals, callback, max_calls=None):
"""
Proxy for `smokesignal.on`, which is compatible as both a function call and
a decorator. This method cannot be used as a decorator
:param signals: A single signal or list/tuple of signals that callback should respond to
:param callback: A callable that should repond to supplied signal(s)
:param max_calls: Integer maximum calls for callback. None for no limit.
"""
if not callable(callback):
raise AssertionError('Signal callbacks must be callable')
# Support for lists of signals
if not isinstance(on_signals, (list, tuple)):
on_signals = [on_signals]
callback._max_calls = max_calls
# Register the callback
for signal in on_signals:
receivers[signal].add(callback)
# Setup responds_to partial for use later
if not hasattr(callback, 'responds_to'):
callback.responds_to = partial(responds_to, callback)
# Setup signals partial for use later.
if not hasattr(callback, 'signals'):
callback.signals = partial(signals, callback)
# Setup disconnect partial for user later
if not hasattr(callback, 'disconnect'):
callback.disconnect = partial(disconnect, callback)
# Setup disconnect_from partial for user later
if not hasattr(callback, 'disconnect_from'):
callback.disconnect_from = partial(disconnect_from, callback)
return callback
|
Proxy for `smokesignal.on`, which is compatible as both a function call and
a decorator. This method cannot be used as a decorator
:param signals: A single signal or list/tuple of signals that callback should respond to
:param callback: A callable that should repond to supplied signal(s)
:param max_calls: Integer maximum calls for callback. None for no limit.
|
def _get_model_fitting(self, mf_id):
"""
Retreive model fitting with identifier 'mf_id' from the list of model
fitting objects stored in self.model_fitting
"""
for model_fitting in self.model_fittings:
if model_fitting.activity.id == mf_id:
return model_fitting
raise Exception("Model fitting activity with id: " + str(mf_id) +
" not found.")
|
Retreive model fitting with identifier 'mf_id' from the list of model
fitting objects stored in self.model_fitting
|
def upload_files(self, abspaths, relpaths, remote_objects):
"""
Determines files to be uploaded and call ``upload_file`` on each.
"""
for relpath in relpaths:
abspath = [p for p in abspaths if p[len(self.file_root):] == relpath][0]
cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
if relpath in remote_objects:
self.update_count += 1
else:
self.create_count += 1
self.upload_file(abspath, relpath)
|
Determines files to be uploaded and call ``upload_file`` on each.
|
def is_BF_hypergraph(self):
"""Indicates whether the hypergraph is a BF-hypergraph.
A BF-hypergraph consists of only B-hyperedges and F-hyperedges.
See "is_B_hypergraph" or "is_F_hypergraph" for more details.
:returns: bool -- True iff the hypergraph is an F-hypergraph.
"""
for hyperedge_id in self._hyperedge_attributes:
tail = self.get_hyperedge_tail(hyperedge_id)
head = self.get_hyperedge_head(hyperedge_id)
if len(tail) > 1 and len(head) > 1:
return False
return True
|
Indicates whether the hypergraph is a BF-hypergraph.
A BF-hypergraph consists of only B-hyperedges and F-hyperedges.
See "is_B_hypergraph" or "is_F_hypergraph" for more details.
:returns: bool -- True iff the hypergraph is an F-hypergraph.
|
def as_sql(self, *args, **kwargs):
"""
Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype:
"""
CTEQuery._remove_cte_where(self.query)
return super(self.__class__, self).as_sql(*args, **kwargs)
|
Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype:
|
def diffusion_correlated(diffusion_constant=0.2, exposure_time=0.05,
samples=40, phi=0.25):
"""
Calculate the (perhaps) correlated diffusion effect between particles
during the exposure time of the confocal microscope. diffusion_constant is
in terms of seconds and pixel sizes exposure_time is in seconds
1 micron radius particle:
D = kT / (6 a\pi\eta)
for 80/20 g/w (60 mPas), 3600 nm^2/sec ~ 0.15 px^2/sec
for 100 % w (0.9 mPas), ~ 10.1 px^2/sec
a full 60 layer scan takes 0.1 sec, so a particle is 0.016 sec exposure
"""
radius = 5
psfsize = np.array([2.0, 1.0, 3.0])/2
pos, rad, tile = nbody.initialize_particles(N=50, phi=phi, polydispersity=0.0)
sim = nbody.BrownianHardSphereSimulation(
pos, rad, tile, D=diffusion_constant, dt=exposure_time/samples
)
sim.dt = 1e-2
sim.relax(2000)
sim.dt = exposure_time/samples
# move the center to index 0 for easier analysis later
c = ((sim.pos - sim.tile.center())**2).sum(axis=-1).argmin()
pc = sim.pos[c].copy()
sim.pos[c] = sim.pos[0]
sim.pos[0] = pc
# which particles do we want to simulate motion for? particle
# zero and its neighbors
mask = np.zeros_like(sim.rad).astype('bool')
neigh = sim.neighbors(3*radius, 0)
for i in neigh+[0]:
mask[i] = True
img = np.zeros(sim.tile.shape)
s0 = runner.create_state(img, sim.pos, sim.rad, ignoreimage=True)
# add up a bunch of trajectories
finalimage = 0*s0.get_model_image()[s0.inner]
position = 0*s0.obj.pos
for i in xrange(samples):
sim.step(1, mask=mask)
s0.obj.pos = sim.pos.copy() + s0.pad
s0.reset()
finalimage += s0.get_model_image()[s0.inner]
position += s0.obj.pos
finalimage /= float(samples)
position /= float(samples)
# place that into a new image at the expected parameters
s = runner.create_state(img, sim.pos, sim.rad, ignoreimage=True)
s.reset()
# measure the true inferred parameters
return s, finalimage, position
|
Calculate the (perhaps) correlated diffusion effect between particles
during the exposure time of the confocal microscope. diffusion_constant is
in terms of seconds and pixel sizes exposure_time is in seconds
1 micron radius particle:
D = kT / (6 a\pi\eta)
for 80/20 g/w (60 mPas), 3600 nm^2/sec ~ 0.15 px^2/sec
for 100 % w (0.9 mPas), ~ 10.1 px^2/sec
a full 60 layer scan takes 0.1 sec, so a particle is 0.016 sec exposure
|
def tmpdir():
"""
Create a tempdir context for the cwd and remove it after.
"""
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
|
Create a tempdir context for the cwd and remove it after.
|
def get_workflow_status_of(brain_or_object, state_var="review_state"):
"""Get the current workflow status of the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param state_var: The name of the state variable
:type state_var: string
:returns: Status
:rtype: str
"""
workflow = get_tool("portal_workflow")
obj = get_object(brain_or_object)
return workflow.getInfoFor(ob=obj, name=state_var)
|
Get the current workflow status of the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param state_var: The name of the state variable
:type state_var: string
:returns: Status
:rtype: str
|
def gaussian(data, mean, covariance):
"""!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
"""
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float('inf')
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
|
!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
|
def remove_field(self, name):
"""https://github.com/frictionlessdata/tableschema-py#schema
"""
field = self.get_field(name)
if field:
predicat = lambda field: field.get('name') != name
self.__current_descriptor['fields'] = filter(
predicat, self.__current_descriptor['fields'])
self.__build()
return field
|
https://github.com/frictionlessdata/tableschema-py#schema
|
def _parse_xmatch_catalog_header(xc, xk):
'''
This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column)
'''
catdef = []
# read in this catalog and transparently handle gzipped files
if xc.endswith('.gz'):
infd = gzip.open(xc,'rb')
else:
infd = open(xc,'rb')
# read in the defs
for line in infd:
if line.decode().startswith('#'):
catdef.append(
line.decode().replace('#','').strip().rstrip('\n')
)
if not line.decode().startswith('#'):
break
if not len(catdef) > 0:
LOGERROR("catalog definition not parseable "
"for catalog: %s, skipping..." % xc)
return None
catdef = ' '.join(catdef)
catdefdict = json.loads(catdef)
catdefkeys = [x['key'] for x in catdefdict['columns']]
catdefdtypes = [x['dtype'] for x in catdefdict['columns']]
catdefnames = [x['name'] for x in catdefdict['columns']]
catdefunits = [x['unit'] for x in catdefdict['columns']]
# get the correct column indices and dtypes for the requested columns
# from the catdefdict
catcolinds = []
catcoldtypes = []
catcolnames = []
catcolunits = []
for xkcol in xk:
if xkcol in catdefkeys:
xkcolind = catdefkeys.index(xkcol)
catcolinds.append(xkcolind)
catcoldtypes.append(catdefdtypes[xkcolind])
catcolnames.append(catdefnames[xkcolind])
catcolunits.append(catdefunits[xkcolind])
return (infd, catdefdict,
catcolinds, catcoldtypes, catcolnames, catcolunits)
|
This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column)
|
async def retract(self, mount: top_types.Mount, margin: float):
""" Pull the specified mount up to its home position.
Works regardless of critical point or home status.
"""
smoothie_ax = Axis.by_mount(mount).name.upper()
async with self._motion_lock:
smoothie_pos = self._backend.fast_home(smoothie_ax, margin)
self._current_position = self._deck_from_smoothie(smoothie_pos)
|
Pull the specified mount up to its home position.
Works regardless of critical point or home status.
|
def record_iterator(xml):
"""
Iterate over all ``<record>`` tags in `xml`.
Args:
xml (str/file): Input string with XML. UTF-8 is prefered encoding,
unicode should be ok.
Yields:
MARCXMLRecord: For each corresponding ``<record>``.
"""
# handle file-like objects
if hasattr(xml, "read"):
xml = xml.read()
dom = None
try:
dom = dhtmlparser.parseString(xml)
except UnicodeError:
dom = dhtmlparser.parseString(xml.encode("utf-8"))
for record_xml in dom.findB("record"):
yield MARCXMLRecord(record_xml)
|
Iterate over all ``<record>`` tags in `xml`.
Args:
xml (str/file): Input string with XML. UTF-8 is prefered encoding,
unicode should be ok.
Yields:
MARCXMLRecord: For each corresponding ``<record>``.
|
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
|
Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
|
def pick_frequency_line(self, filename, frequency, cumulativefield='cumulative_frequency'):
'''Given a numeric frequency, pick a line from a csv with a cumulative frequency field'''
if resource_exists('censusname', filename):
with closing(resource_stream('censusname', filename)) as b:
g = codecs.iterdecode(b, 'ascii')
return self._pick_frequency_line(g, frequency, cumulativefield)
else:
with open(filename, encoding='ascii') as g:
return self._pick_frequency_line(g, frequency, cumulativefield)
|
Given a numeric frequency, pick a line from a csv with a cumulative frequency field
|
def add_deviation(self, dev, td=None):
"""
Add a deviation survey to this instance, and try to compute a position
log from it.
"""
self.deviation = dev
try:
self.compute_position_log(td=td)
except:
self.position = None
return
|
Add a deviation survey to this instance, and try to compute a position
log from it.
|
def publish(self, value):
"""
Accepts: float
Returns: float
"""
value = super(Float, self).publish(value)
if isinstance(value, int):
value = float(value)
return value
|
Accepts: float
Returns: float
|
def _set_ipv6_track(self, v, load=False):
"""
Setter method for ipv6_track, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_track is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_track() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6_track.ipv6_track, is_container='container', presence=False, yang_name="ipv6-track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track', u'alt-name': u'track'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_track must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipv6_track.ipv6_track, is_container='container', presence=False, yang_name="ipv6-track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Track', u'alt-name': u'track'}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='container', is_config=True)""",
})
self.__ipv6_track = t
if hasattr(self, '_set'):
self._set()
|
Setter method for ipv6_track, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_track is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_track() directly.
|
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
"""Compute the CTC loss."""
del model_hparams, vocab_size # unused arg
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights)
|
Compute the CTC loss.
|
def addVariantFeature(self,variantFeature):
'''Appends one VariantFeature to variantFeatures
'''
if isinstance(variantFeature, Feature):
self.features.append(variantFeature)
else:
raise(TypeError,
'variantFeature Type should be Feature, not %s' % type(
variantFeature)
)
|
Appends one VariantFeature to variantFeatures
|
def child_object(self):
""" Get Task child object class """
from . import types
child_klass = types.get(self.task_type.split('.')[1])
return child_klass.retrieve(self.task_id, client=self._client)
|
Get Task child object class
|
def Si_to_pandas_dict(S_dict):
"""Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of (None, None)
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
"""
problem = S_dict.problem
total_order = {
'ST': S_dict['ST'],
'ST_conf': S_dict['ST_conf']
}
first_order = {
'S1': S_dict['S1'],
'S1_conf': S_dict['S1_conf']
}
idx = None
second_order = None
if 'S2' in S_dict:
names = problem['names']
idx = list(combinations(names, 2))
second_order = {
'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]
for i in idx],
'S2_conf': [S_dict['S2_conf'][names.index(i[0]), names.index(i[1])]
for i in idx]
}
return total_order, first_order, (idx, second_order)
|
Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of (None, None)
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
|
def configure_logger(glob, multi_level,
relative=False, logfile=None, syslog=False):
"""
Logger configuration function for setting either a simple debug mode or a
multi-level one.
:param glob: globals dictionary
:param multi_level: boolean telling if multi-level debug is to be considered
:param relative: use relative time for the logging messages
:param logfile: log file to be saved (None means do not log to file)
:param syslog: enable logging to /var/log/syslog
"""
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG] \
if multi_level else [logging.INFO, logging.DEBUG]
try:
verbose = min(int(glob['args'].verbose), 3)
except AttributeError:
verbose = 0
glob['args']._debug_level = dl = levels[verbose]
logger.handlers = []
glob['logger'] = logger
handler = logging.StreamHandler()
formatter = logging.Formatter(glob['LOG_FORMAT'], glob['DATE_FORMAT'])
handler.setFormatter(formatter)
glob['logger'].addHandler(handler)
glob['logger'].setLevel(dl)
if relative:
coloredlogs.ColoredFormatter = RelativeTimeColoredFormatter
coloredlogs.install(dl,
logger=glob['logger'],
fmt=glob['LOG_FORMAT'],
datefmt=glob['DATE_FORMAT'],
milliseconds=glob['TIME_MILLISECONDS'],
syslog=syslog,
stream=logfile)
|
Logger configuration function for setting either a simple debug mode or a
multi-level one.
:param glob: globals dictionary
:param multi_level: boolean telling if multi-level debug is to be considered
:param relative: use relative time for the logging messages
:param logfile: log file to be saved (None means do not log to file)
:param syslog: enable logging to /var/log/syslog
|
def reciprocal_rank(
model,
test_interactions,
train_interactions=None,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
"""
Measure the reciprocal rank metric for a model: 1 / the rank of the highest
ranked positive example. A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing reciprocal rank scores for each user.
If there are no interactions for a given user the returned value will
be 0.0.
"""
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
ranks.data = 1.0 / (ranks.data + 1.0)
ranks = np.squeeze(np.array(ranks.max(axis=1).todense()))
if not preserve_rows:
ranks = ranks[test_interactions.getnnz(axis=1) > 0]
return ranks
|
Measure the reciprocal rank metric for a model: 1 / the rank of the highest
ranked positive example. A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing reciprocal rank scores for each user.
If there are no interactions for a given user the returned value will
be 0.0.
|
def xack(self, stream, group_name, id, *ids):
"""Acknowledge a message for a given consumer group"""
return self.execute(b'XACK', stream, group_name, id, *ids)
|
Acknowledge a message for a given consumer group
|
def filter(self, value, model=None, context=None):
"""
Filter
Performs value filtering and returns filtered result.
:param value: input value
:param model: parent model being validated
:param context: object, filtering context
:return: filtered value
"""
value = str(value)
return bleach.clean(text=value, **self.bleach_params)
|
Filter
Performs value filtering and returns filtered result.
:param value: input value
:param model: parent model being validated
:param context: object, filtering context
:return: filtered value
|
def shorten_duplicate_content_url(url):
"""Remove anchor part and trailing index.html from URL."""
if '#' in url:
url = url.split('#', 1)[0]
if url.endswith('index.html'):
return url[:-10]
if url.endswith('index.htm'):
return url[:-9]
return url
|
Remove anchor part and trailing index.html from URL.
|
def select_data(db_file, slab=None, facet=None):
"""Gathers relevant data from SQL database generated by CATHUB.
Parameters
----------
db_file : Path to database
slab : Which metal (slab) to select.
facet : Which facets to select.
Returns
-------
data : SQL cursor output.
"""
con = sql.connect(db_file)
cur = con.cursor()
if slab and facet:
select_command = 'select chemical_composition, facet, reactants, products, reaction_energy ' \
'from reaction where facet='+str(facet)+' and chemical_composition LIKE "%'+slab+'%";'
elif slab and not facet:
select_command = 'select chemical_composition, facet, reactants, products, reaction_energy ' \
'from reaction where chemical_composition LIKE "%'+slab+'%";'
else:
select_command = 'select chemical_composition, facet, reactants, products, reaction_energy from reaction;'
cur.execute(select_command)
data = cur.fetchall()
return(data)
|
Gathers relevant data from SQL database generated by CATHUB.
Parameters
----------
db_file : Path to database
slab : Which metal (slab) to select.
facet : Which facets to select.
Returns
-------
data : SQL cursor output.
|
def visit_dictcomp(self, node, parent):
"""visit a DictComp node by returning a fresh instance of it"""
newnode = nodes.DictComp(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.key, newnode),
self.visit(node.value, newnode),
[self.visit(child, newnode) for child in node.generators],
)
return newnode
|
visit a DictComp node by returning a fresh instance of it
|
def pot_to_requiv_contact(pot, q, sma, compno=1):
"""
TODO: add documentation
"""
return ConstraintParameter(pot._bundle, "pot_to_requiv_contact({}, {}, {}, {})".format(_get_expr(pot), _get_expr(q), _get_expr(sma), compno))
|
TODO: add documentation
|
def _get_es_version(self, config):
"""
Get the running version of elasticsearch.
"""
try:
data = self._get_data(config.url, config, send_sc=False)
# pre-release versions of elasticearch are suffixed with -rcX etc..
# peel that off so that the map below doesn't error out
version = data['version']['number'].split('-')[0]
version = [int(p) for p in version.split('.')[0:3]]
except AuthenticationError:
raise
except Exception as e:
self.warning("Error while trying to get Elasticsearch version from %s %s" % (config.url, str(e)))
version = [1, 0, 0]
self.service_metadata('version', version)
self.log.debug("Elasticsearch version is %s" % version)
return version
|
Get the running version of elasticsearch.
|
def connect(self):
"""
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
"""
if (self.__ser is not None):
serial = importlib.import_module("serial")
if self.__stopbits == 0:
self.__ser.stopbits = serial.STOPBITS_ONE
elif self.__stopbits == 1:
self.__ser.stopbits = serial.STOPBITS_TWO
elif self.__stopbits == 2:
self.__ser.stopbits = serial.STOPBITS_ONE_POINT_FIVE
if self.__parity == 0:
self.__ser.parity = serial.PARITY_EVEN
elif self.__parity == 1:
self.__ser.parity = serial.PARITY_ODD
elif self.__parity == 2:
self.__ser.parity = serial.PARITY_NONE
self.__ser = serial.Serial(self.serialPort, self.__baudrate, timeout=self.__timeout, parity=self.__ser.parity, stopbits=self.__ser.stopbits, xonxoff=0, rtscts=0)
self.__ser.writeTimeout = self.__timeout
#print (self.ser)
if (self.__tcpClientSocket is not None):
self.__tcpClientSocket.settimeout(5)
self.__tcpClientSocket.connect((self.__ipAddress, self.__port))
self.__connected = True
self.__thread = threading.Thread(target=self.__listen, args=())
self.__thread.start()
|
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
|
def search(self):
"""
Click on the Search button and wait for the
results page to be displayed
"""
self.q(css='button.btn').click()
GitHubSearchResultsPage(self.browser).wait_for_page()
|
Click on the Search button and wait for the
results page to be displayed
|
def exact_anniversaries(frequency, anniversary, start, finish):
"""
Returns the number of exact anniversaries if start and finish represent an anniversary.
ie..
exact_anniversaries(DATE_FREQUENCY_MONTHLY, 10, date(2012, 2, 10), date(2012, 3, 9)) returns 1
exact_anniversaries(DATE_FREQUENCY_MONTHLY, 10, date(2012, 2, 10), date(2012, 4, 9)) returns 2
"""
if frequency != DATE_FREQUENCY_MONTHLY:
raise DateFrequencyError("Only monthly date frequency is supported - not '%s'" % (frequency))
if start.day != anniversary:
return False
periods = 0
current = start
while current <= finish:
period_end = current + relativedelta(months=+1, days=-1)
if period_end <= finish:
periods += 1
else:
return False
current = current + relativedelta(months=+1)
return periods
|
Returns the number of exact anniversaries if start and finish represent an anniversary.
ie..
exact_anniversaries(DATE_FREQUENCY_MONTHLY, 10, date(2012, 2, 10), date(2012, 3, 9)) returns 1
exact_anniversaries(DATE_FREQUENCY_MONTHLY, 10, date(2012, 2, 10), date(2012, 4, 9)) returns 2
|
def list_scheduled_queries(self):
"""
List all scheduled_queries
:return: A list of all scheduled query dicts
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
url = 'https://logentries.com/rest/{account_id}/api/scheduled_queries/'.format(
account_id=self.account_id)
return self._api_get(url=url).get('scheduled_searches')
|
List all scheduled_queries
:return: A list of all scheduled query dicts
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
|
def write_records(records, output_file, split=False):
"""Write FASTA records
Write a FASTA file from an iterable of records.
Parameters
----------
records : iterable
Input records to write.
output_file : file, str or pathlib.Path
Output FASTA file to be written into.
split : bool, optional
If True, each record is written into its own separate file. Default is
False.
"""
if split:
for record in records:
with open(
"{}{}.fa".format(output_file, record.id), "w"
) as record_handle:
SeqIO.write(record, record_handle, "fasta")
else:
SeqIO.write(records, output_file, "fasta")
|
Write FASTA records
Write a FASTA file from an iterable of records.
Parameters
----------
records : iterable
Input records to write.
output_file : file, str or pathlib.Path
Output FASTA file to be written into.
split : bool, optional
If True, each record is written into its own separate file. Default is
False.
|
def dendrogram(adata: AnnData, groupby: str,
n_pcs: Optional[int]=None,
use_rep: Optional[str]=None,
var_names: Optional[List[str]]=None,
use_raw: Optional[bool]=None,
cor_method: Optional[str]='pearson',
linkage_method: Optional[str]='complete',
key_added: Optional[str]=None) -> None:
"""\
Computes a hierarchical clustering for the given `groupby` categories.
By default, the PCA representation is used unless `.X` has less than 50 variables.
Alternatively, a list of `var_names` (e.g. genes) can be given.
Average values of either `var_names` or components are used to compute a correlation matrix.
The hierarchical clustering can be visualized using `sc.pl.dendrogram` or multiple other
visualizations that can include a dendrogram: `matrixplot`, `heatmap`, `dotplot` and `stacked_violin`
.. note::
The computation of the hierarchical clustering is based on predefined groups and not
per cell. The correlation matrix is computed using by default pearson but other methods
are available.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
{n_pcs}
{use_rep}
var_names : `list of str` (default: None)
List of var_names to use for computing the hierarchical clustering. If `var_names` is given,
then `use_rep` and `n_pcs` is ignored.
use_raw : `bool`, optional (default: None)
Only when `var_names` is not None. Use `raw` attribute of `adata` if present.
cor_method : `str`, optional (default: `"pearson"`)
correlation method to use. Options are 'pearson', 'kendall', and 'spearman'
linkage_method : `str`, optional (default: `"complete"`)
linkage method to use. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
for more information.
key_added : : `str`, optional (default: `None`)
By default, the dendrogram information is added to `.uns['dendrogram_' + groupby]`. Notice
that the `groupby` information is added to the dendrogram.
Returns
-------
adata.uns['dendrogram'] (or instead of 'dendrogram' the value selected for `key_added`) is updated
with the dendrogram information
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, groupby='bulk_labels')
>>> sc.pl.dendrogram(adata)
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
"""
if groupby not in adata.obs_keys():
raise ValueError('groupby has to be a valid observation. Given value: {}, '
'valid observations: {}'.format(groupby, adata.obs_keys()))
if not is_categorical_dtype(adata.obs[groupby]):
# if the groupby column is not categorical, turn it into one
# by subdividing into `num_categories` categories
raise ValueError('groupby has to be a categorical observation. Given value: {}, '
'Column type: {}'.format(groupby, adata.obs[groupby].dtype))
if var_names is None:
rep_df = pd.DataFrame(choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs))
rep_df.set_index(adata.obs[groupby], inplace=True)
categories = rep_df.index.categories
else:
if use_raw is None and adata.raw is not None: use_raw = True
gene_names = adata.raw.var_names if use_raw else adata.var_names
from ..plotting._anndata import _prepare_dataframe
categories, rep_df = _prepare_dataframe(adata, gene_names, groupby, use_raw)
if key_added is None:
key_added = 'dendrogram_' + groupby
logg.info('Storing dendrogram info using `.uns[{!r}]`'.format(key_added))
# aggregate values within categories using 'mean'
mean_df = rep_df.groupby(level=0).mean()
import scipy.cluster.hierarchy as sch
corr_matrix = mean_df.T.corr(method=cor_method)
z_var = sch.linkage(corr_matrix, method=linkage_method)
dendro_info = sch.dendrogram(z_var, labels=categories, no_plot=True)
# order of groupby categories
categories_idx_ordered = dendro_info['leaves']
adata.uns[key_added] = {'linkage': z_var,
'groupby': groupby,
'use_rep': use_rep,
'cor_method': cor_method,
'linkage_method': linkage_method,
'categories_idx_ordered': categories_idx_ordered,
'dendrogram_info': dendro_info,
'correlation_matrix': corr_matrix.values}
|
\
Computes a hierarchical clustering for the given `groupby` categories.
By default, the PCA representation is used unless `.X` has less than 50 variables.
Alternatively, a list of `var_names` (e.g. genes) can be given.
Average values of either `var_names` or components are used to compute a correlation matrix.
The hierarchical clustering can be visualized using `sc.pl.dendrogram` or multiple other
visualizations that can include a dendrogram: `matrixplot`, `heatmap`, `dotplot` and `stacked_violin`
.. note::
The computation of the hierarchical clustering is based on predefined groups and not
per cell. The correlation matrix is computed using by default pearson but other methods
are available.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
{n_pcs}
{use_rep}
var_names : `list of str` (default: None)
List of var_names to use for computing the hierarchical clustering. If `var_names` is given,
then `use_rep` and `n_pcs` is ignored.
use_raw : `bool`, optional (default: None)
Only when `var_names` is not None. Use `raw` attribute of `adata` if present.
cor_method : `str`, optional (default: `"pearson"`)
correlation method to use. Options are 'pearson', 'kendall', and 'spearman'
linkage_method : `str`, optional (default: `"complete"`)
linkage method to use. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
for more information.
key_added : : `str`, optional (default: `None`)
By default, the dendrogram information is added to `.uns['dendrogram_' + groupby]`. Notice
that the `groupby` information is added to the dendrogram.
Returns
-------
adata.uns['dendrogram'] (or instead of 'dendrogram' the value selected for `key_added`) is updated
with the dendrogram information
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, groupby='bulk_labels')
>>> sc.pl.dendrogram(adata)
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
|
def conditions_list(self, conkey):
"""
Return a (possibly empty) list of conditions based on
conkey. The conditions are returned raw, not parsed.
conkey: str
for cond<n>, startcond<n> or stopcond<n>, specify only the
prefix. The list will be filled with all conditions.
"""
L = []
keys = [k for k in self.conditions if k.startswith(conkey)] # sloppy
if not keys:
raise KeyError(conkey)
for k in keys:
if self.conditions[k] is None:
continue
raw = self.conditions[k]
L.append(raw)
return L
|
Return a (possibly empty) list of conditions based on
conkey. The conditions are returned raw, not parsed.
conkey: str
for cond<n>, startcond<n> or stopcond<n>, specify only the
prefix. The list will be filled with all conditions.
|
def less(x, y):
"""
Return True if x < y and False otherwise.
This function returns False whenever x and/or y is a NaN.
"""
x = BigFloat._implicit_convert(x)
y = BigFloat._implicit_convert(y)
return mpfr.mpfr_less_p(x, y)
|
Return True if x < y and False otherwise.
This function returns False whenever x and/or y is a NaN.
|
def remove_regex(urls, regex):
"""
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
"""
if not regex:
return urls
# To avoid iterating over the characters of a string
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls
|
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
|
def result(self) -> workflow.IntervalGeneratorType:
"""
Generate intervals indicating the valid sentences.
"""
config = cast(SentenceSegementationConfig, self.config)
index = -1
labels = None
while True:
# 1. Find the start of the sentence.
start = -1
while True:
# Check the ``labels`` generated from step (2).
if labels is None:
# https://www.python.org/dev/peps/pep-0479/
try:
index, labels = next(self.index_labels_generator)
except StopIteration:
return
# Check if we found a valid sentence char.
if labels[SentenceValidCharacterLabeler]:
start = index
break
# Trigger next(...) action.
labels = None
index = -1
# 2. Find the ending.
end = -1
try:
while True:
index, labels = next(self.index_labels_generator)
# Detected invalid char.
if config.enable_strict_sentence_charset and \
not labels[SentenceValidCharacterLabeler] and \
not labels[WhitespaceLabeler]:
end = index
break
# Detected sentence ending.
if self._labels_indicate_sentence_ending(labels):
# Consume the ending span.
while True:
index, labels = next(self.index_labels_generator)
is_ending = (self._labels_indicate_sentence_ending(labels) or
(config.extend_ending_with_delimiters and
labels[DelimitersLabeler]))
if not is_ending:
end = index
break
# yeah we found the ending.
break
except StopIteration:
end = len(self.input_sequence)
# Trigger next(...) action.
labels = None
index = -1
yield start, end
|
Generate intervals indicating the valid sentences.
|
def _parse_file(self, file_obj):
"""Directly read from file handler.
Note that this will move the file pointer.
"""
byte_data = file_obj.read(self.size)
self._parse_byte_data(byte_data)
|
Directly read from file handler.
Note that this will move the file pointer.
|
def on_frame(self, frame_in):
"""On RPC Frame.
:param specification.Frame frame_in: Amqp frame.
:return:
"""
if frame_in.name not in self._request:
return False
uuid = self._request[frame_in.name]
if self._response[uuid]:
self._response[uuid].append(frame_in)
else:
self._response[uuid] = [frame_in]
return True
|
On RPC Frame.
:param specification.Frame frame_in: Amqp frame.
:return:
|
def isPairTag(self):
"""
Returns:
bool: True if this is pair tag - ``<body> .. </body>`` for example.
"""
if self.isComment() or self.isNonPairTag():
return False
if self.isEndTag():
return True
if self.isOpeningTag() and self.endtag:
return True
return False
|
Returns:
bool: True if this is pair tag - ``<body> .. </body>`` for example.
|
def convert_to_equivalent(self, unit, equivalence, **kwargs):
"""
Return a copy of the unyt_array in the units specified units, assuming
the given equivalency. The dimensions of the specified units and the
dimensions of the original array need not match so long as there is an
appropriate conversion in the specified equivalency.
Parameters
----------
unit : string
The unit that you wish to convert to.
equivalence : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> from unyt import K
>>> a = [10, 20, 30]*(1e7*K)
>>> a.convert_to_equivalent("keV", "thermal")
>>> a
unyt_array([ 8.6173324, 17.2346648, 25.8519972], 'keV')
"""
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
self.convert_to_units(conv_unit)
return
this_equiv = equivalence_registry[equivalence](in_place=True)
if self.has_equivalent(equivalence):
this_equiv.convert(self, conv_unit.dimensions, **kwargs)
self.convert_to_units(conv_unit)
else:
raise InvalidUnitEquivalence(equivalence, self.units, conv_unit)
|
Return a copy of the unyt_array in the units specified units, assuming
the given equivalency. The dimensions of the specified units and the
dimensions of the original array need not match so long as there is an
appropriate conversion in the specified equivalency.
Parameters
----------
unit : string
The unit that you wish to convert to.
equivalence : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> from unyt import K
>>> a = [10, 20, 30]*(1e7*K)
>>> a.convert_to_equivalent("keV", "thermal")
>>> a
unyt_array([ 8.6173324, 17.2346648, 25.8519972], 'keV')
|
def build_graph(self, regularizers=()):
'''Connect the layers in this network to form a computation graph.
Parameters
----------
regularizers : list of :class:`theanets.regularizers.Regularizer`
A list of the regularizers to apply while building the computation
graph.
Returns
-------
outputs : list of Theano variables
A list of expressions giving the output of each layer in the graph.
updates : list of update tuples
A list of updates that should be performed by a Theano function that
computes something using this graph.
'''
key = self._hash(regularizers)
if key not in self._graphs:
util.log('building computation graph')
for loss in self.losses:
loss.log()
for reg in regularizers:
reg.log()
outputs = {}
updates = []
for layer in self.layers:
out, upd = layer.connect(outputs)
for reg in regularizers:
reg.modify_graph(out)
outputs.update(out)
updates.extend(upd)
self._graphs[key] = outputs, updates
return self._graphs[key]
|
Connect the layers in this network to form a computation graph.
Parameters
----------
regularizers : list of :class:`theanets.regularizers.Regularizer`
A list of the regularizers to apply while building the computation
graph.
Returns
-------
outputs : list of Theano variables
A list of expressions giving the output of each layer in the graph.
updates : list of update tuples
A list of updates that should be performed by a Theano function that
computes something using this graph.
|
async def main(loop):
"""Log packets from Bus."""
# Setting debug
PYVLXLOG.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
PYVLXLOG.addHandler(stream_handler)
# Connecting to KLF 200
pyvlx = PyVLX('pyvlx.yaml', loop=loop)
await pyvlx.load_scenes()
await pyvlx.load_nodes()
# and wait, increase this timeout if you want to
# log for a longer time.:)
await asyncio.sleep(90)
# Cleanup, KLF 200 is terrible in handling lost connections
await pyvlx.disconnect()
|
Log packets from Bus.
|
def best_four_point_to_sell(self):
""" 判斷是否為四大賣點
:rtype: str or False
"""
result = []
if self.check_plus_bias_ratio() and \
(self.best_sell_1() or self.best_sell_2() or self.best_sell_3() or \
self.best_sell_4()):
if self.best_sell_1():
result.append(self.best_sell_1.__doc__.strip())
if self.best_sell_2():
result.append(self.best_sell_2.__doc__.strip())
if self.best_sell_3():
result.append(self.best_sell_3.__doc__.strip())
if self.best_sell_4():
result.append(self.best_sell_4.__doc__.strip())
result = ', '.join(result)
else:
result = False
return result
|
判斷是否為四大賣點
:rtype: str or False
|
def _set_scores(self):
"""
Compute anomaly scores for the time series by sliding both lagging window and future window.
"""
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
|
Compute anomaly scores for the time series by sliding both lagging window and future window.
|
def _evictStaleDevices(self):
"""
A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while.
"""
while self.running:
expiredDeviceIds = [key for key, value in self.devices.items() if value.hasExpired()]
for key in expiredDeviceIds:
logger.warning("Device timeout, removing " + key)
del self.devices[key]
time.sleep(1)
# TODO send reset after a device fails
logger.warning("DeviceCaretaker is now shutdown")
|
A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while.
|
def _httplib2_init(username, password):
"""Used to instantiate a regular HTTP request object"""
obj = httplib2.Http()
if username and password:
obj.add_credentials(username, password)
return obj
|
Used to instantiate a regular HTTP request object
|
def does_collection_exist(self, collection_name, database_name=None):
"""
Checks if a collection exists in CosmosDB.
"""
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
existing_container = list(self.get_conn().QueryContainers(
get_database_link(self.__get_database_name(database_name)), {
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": collection_name}
]
}))
if len(existing_container) == 0:
return False
return True
|
Checks if a collection exists in CosmosDB.
|
def p_edgesigs(self, p):
'edgesigs : edgesigs SENS_OR edgesig'
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1))
|
edgesigs : edgesigs SENS_OR edgesig
|
def cmd_signing_remove(self, args):
'''remove signing from server'''
if not self.master.mavlink20():
print("You must be using MAVLink2 for signing")
return
self.master.mav.setup_signing_send(self.target_system, self.target_component, [0]*32, 0)
self.master.disable_signing()
print("Removed signing")
|
remove signing from server
|
def _pretty_access_flags_gen(self):
"""
generator of the pretty access flags
"""
if self.is_public():
yield "public"
if self.is_final():
yield "final"
if self.is_abstract():
yield "abstract"
if self.is_interface():
if self.is_annotation():
yield "@interface"
else:
yield "interface"
if self.is_enum():
yield "enum"
|
generator of the pretty access flags
|
def describe_topic_rule(ruleName,
region=None, key=None, keyid=None, profile=None):
'''
Given a topic rule name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.describe_topic_rule myrule
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
rule = conn.get_topic_rule(ruleName=ruleName)
if rule and 'rule' in rule:
rule = rule['rule']
keys = ('ruleName', 'sql', 'description',
'actions', 'ruleDisabled')
return {'rule': dict([(k, rule.get(k)) for k in keys])}
else:
return {'rule': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
Given a topic rule name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.describe_topic_rule myrule
|
def on_open(self):
"""
Shows an open file dialog and open the file if the dialog was
accepted.
"""
filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, 'Open')
if filename:
self.open_file(filename)
self.actionRun.setEnabled(True)
self.actionConfigure_run.setEnabled(True)
|
Shows an open file dialog and open the file if the dialog was
accepted.
|
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
from datetime import datetime
from django.utils import timezone
now = timezone.now()
if isinstance(time, int):
diff = now - datetime.fromtimestamp(time)
elif isinstance(time, datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff // 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff // 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff // 7) + " weeks ago"
if day_diff < 365:
return str(day_diff // 30) + " months ago"
return str(day_diff // 365) + " years ago"
|
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
|
def package_username(repo):
'''
>>> package_user('fabsetup-theno-termdown')
(termdown, theno)
'''
package = repo.replace('-', '_')
username = repo.split('-')[1]
return package, username
|
>>> package_user('fabsetup-theno-termdown')
(termdown, theno)
|
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
"""
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
|
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
|
def _monitor(last_ping, stop_plugin, is_shutting_down, timeout=5):
"""Monitors health checks (pings) from the Snap framework.
If the plugin doesn't receive 3 consecutive health checks from Snap the
plugin will shutdown. The default timeout is set to 5 seconds.
"""
_timeout_count = 0
_last_check = time.time()
_sleep_interval = 1
# set _sleep_interval if less than the timeout
if timeout < _sleep_interval:
_sleep_interval = timeout
while True:
time.sleep(_sleep_interval)
# Signals that stop_plugin has been called
if is_shutting_down():
return
# have we missed a ping during the last timeout duration
if ((time.time() - _last_check) > timeout) and ((time.time() - last_ping()) > timeout):
# reset last_check
_last_check = time.time()
_timeout_count += 1
LOG.warning("Missed ping health check from the framework. " +
"({} of 3)".format(_timeout_count))
if _timeout_count >= 3:
stop_plugin()
return
elif (time.time() - last_ping()) <= timeout:
_timeout_count = 0
|
Monitors health checks (pings) from the Snap framework.
If the plugin doesn't receive 3 consecutive health checks from Snap the
plugin will shutdown. The default timeout is set to 5 seconds.
|
def write_ImageMapLine(tlx, tly, brx, bry, w, h, dpi, chr, segment_start, segment_end):
"""
Write out an image map area line with the coordinates passed to this
function
<area shape="rect" coords="tlx,tly,brx,bry" href="#chr7" title="chr7:100001..500001">
"""
tlx, brx = [canvas2px(x, w, dpi) for x in (tlx, brx)]
tly, bry = [canvas2px(y, h, dpi) for y in (tly, bry)]
chr, bac_list = chr.split(':')
return '<area shape="rect" coords="' + \
",".join(str(x) for x in (tlx, tly, brx, bry)) \
+ '" href="#' + chr + '"' \
+ ' title="' + chr + ':' + str(segment_start) + '..' + str(segment_end) + '"' \
+ ' />'
|
Write out an image map area line with the coordinates passed to this
function
<area shape="rect" coords="tlx,tly,brx,bry" href="#chr7" title="chr7:100001..500001">
|
def becomeMemberOf(self, groupRole):
"""
Instruct this (user or group) Role to become a member of a group role.
@param groupRole: The role that this group should become a member of.
"""
self.store.findOrCreate(RoleRelationship,
group=groupRole,
member=self)
|
Instruct this (user or group) Role to become a member of a group role.
@param groupRole: The role that this group should become a member of.
|
def RotateServerKey(cn=u"grr", keylength=4096):
"""This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized.
"""
ca_certificate = config.CONFIG["CA.certificate"]
ca_private_key = config.CONFIG["PrivateKeys.ca_key"]
if not ca_certificate or not ca_private_key:
raise ValueError("No existing CA certificate found.")
# Check the current certificate serial number
existing_cert = config.CONFIG["Frontend.certificate"]
serial_number = existing_cert.GetSerialNumber() + 1
EPrint("Generating new server key (%d bits, cn '%s', serial # %d)" %
(keylength, cn, serial_number))
server_private_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=keylength)
server_cert = key_utils.MakeCASignedCert(
str(cn),
server_private_key,
ca_certificate,
ca_private_key,
serial_number=serial_number)
EPrint("Updating configuration.")
config.CONFIG.Set("Frontend.certificate", server_cert.AsPEM())
config.CONFIG.Set("PrivateKeys.server_key", server_private_key.AsPEM())
config.CONFIG.Write()
EPrint("Server key rotated, please restart the GRR Frontends.")
|
This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized.
|
def dynamics(start, end=None):
"""
Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics.
You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff]
Args:
start: beginning dynamic marker, if no end is specified all notes will get this marker
end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker
Example usage:
s1 | dynamics('p') # play a sequence in piano
s2 | dynamics('p', 'ff') # crescendo from p to ff
s3 | dynamics('ff', 'p') # diminuendo from ff to p
"""
def _(sequence):
if start in _dynamic_markers_to_velocity:
start_velocity = _dynamic_markers_to_velocity[start]
start_marker = start
else:
raise ValueError("Unknown start dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys()))
if end is None:
end_velocity = start_velocity
end_marker = start_marker
elif end in _dynamic_markers_to_velocity:
end_velocity = _dynamic_markers_to_velocity[end]
end_marker = end
else:
raise ValueError("Unknown end dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys()))
retval = sequence.__class__([Point(point) for point in sequence._elements])
velocity_interval = (float(end_velocity) - float(start_velocity)) / (len(retval) - 1) if len(retval) > 1 else 0
velocities = [int(start_velocity + velocity_interval * pos) for pos in range(len(retval))]
# insert dynamics markers for lilypond
if start_velocity > end_velocity:
retval[0]["dynamic"] = "diminuendo"
retval[-1]["dynamic"] = end_marker
elif start_velocity < end_velocity:
retval[0]["dynamic"] = "crescendo"
retval[-1]["dynamic"] = end_marker
else:
retval[0]["dynamic"] = start_marker
for point, velocity in zip(retval, velocities):
point["velocity"] = velocity
return retval
return _
|
Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics.
You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff]
Args:
start: beginning dynamic marker, if no end is specified all notes will get this marker
end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker
Example usage:
s1 | dynamics('p') # play a sequence in piano
s2 | dynamics('p', 'ff') # crescendo from p to ff
s3 | dynamics('ff', 'p') # diminuendo from ff to p
|
def public_key(self):
"""
:return:
The PublicKey object for the public key this certificate contains
"""
if not self._public_key and self.sec_certificate_ref:
sec_public_key_ref_pointer = new(Security, 'SecKeyRef *')
res = Security.SecCertificateCopyPublicKey(self.sec_certificate_ref, sec_public_key_ref_pointer)
handle_sec_error(res)
sec_public_key_ref = unwrap(sec_public_key_ref_pointer)
self._public_key = PublicKey(sec_public_key_ref, self.asn1['tbs_certificate']['subject_public_key_info'])
return self._public_key
|
:return:
The PublicKey object for the public key this certificate contains
|
def _get_representative_batch(merged):
"""Prepare dictionary matching batch items to a representative within a group.
"""
out = {}
for mgroup in merged:
mgroup = sorted(list(mgroup))
for x in mgroup:
out[x] = mgroup[0]
return out
|
Prepare dictionary matching batch items to a representative within a group.
|
def as_check_request(self, timer=datetime.utcnow):
"""Makes a `ServicecontrolServicesCheckRequest` from this instance
Returns:
a ``ServicecontrolServicesCheckRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesCheckRequest``
"""
if not self.service_name:
raise ValueError(u'the service name must be set')
if not self.operation_id:
raise ValueError(u'the operation id must be set')
if not self.operation_name:
raise ValueError(u'the operation name must be set')
op = super(Info, self).as_operation(timer=timer)
labels = {}
if self.android_cert_fingerprint:
labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint
if self.android_package_name:
labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name
if self.client_ip:
labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip
if self.ios_bundle_id:
labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id
if self.referer:
labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer
# Forcibly add system label reporting here, as the base service
# config does not specify it as a label.
labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT
labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT
op.labels = encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, labels)
check_request = sc_messages.CheckRequest(operation=op)
return sc_messages.ServicecontrolServicesCheckRequest(
serviceName=self.service_name,
checkRequest=check_request)
|
Makes a `ServicecontrolServicesCheckRequest` from this instance
Returns:
a ``ServicecontrolServicesCheckRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesCheckRequest``
|
def try_rgb(s, default=None):
""" Try parsing a string into an rgb value (int, int, int),
where the ints are 0-255 inclusive.
If None is passed, default is returned.
On failure, InvalidArg is raised.
"""
if not s:
return default
try:
r, g, b = (int(x.strip()) for x in s.split(','))
except ValueError:
raise InvalidRgb(s)
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidRgb(s)
return r, g, b
|
Try parsing a string into an rgb value (int, int, int),
where the ints are 0-255 inclusive.
If None is passed, default is returned.
On failure, InvalidArg is raised.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.