Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
374,300
|
def create_api_integration(restApiId, resourcePath, httpMethod, integrationType, integrationHttpMethod,
uri, credentials, requestParameters=None, requestTemplates=None,
region=None, key=None, keyid=None, profile=None):
{}{}
try:
credentials = _get_role_arn(credentials, region=region, key=key, keyid=keyid, profile=profile)
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get()
if resource:
requestParameters = dict() if requestParameters is None else requestParameters
requestTemplates = dict() if requestTemplates is None else requestTemplates
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if httpMethod.lower() == :
uri = ""
credentials = ""
integration = conn.put_integration(restApiId=restApiId, resourceId=resource[], httpMethod=httpMethod,
type=integrationType, integrationHttpMethod=integrationHttpMethod,
uri=uri, credentials=credentials, requestParameters=requestParameters,
requestTemplates=requestTemplates)
return {: True, : integration}
return {: False, : }
except ClientError as e:
return {: False, : __utils__[](e)}
|
Creates an integration for a given method in a given API.
If integrationType is MOCK, uri and credential parameters will be ignored.
uri is in the form of (substitute APIGATEWAY_REGION and LAMBDA_FUNC_ARN)
"arn:aws:apigateway:APIGATEWAY_REGION:lambda:path/2015-03-31/functions/LAMBDA_FUNC_ARN/invocations"
credentials is in the form of an iam role name or role arn.
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_integration restApiId resourcePath httpMethod \\
integrationType integrationHttpMethod uri credentials ['{}' ['{}']]
|
374,301
|
def find_all_checks(self, **kwargs):
checks = self._check_manager.find_all_checks(**kwargs)
for check in checks:
check.set_entity(self)
return checks
|
Finds all checks for this entity with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
|
374,302
|
def setFlag(self, flag, state=True):
has_flag = self.testFlag(flag)
if has_flag and not state:
self.setFlags(self.flags() ^ flag)
elif not has_flag and state:
self.setFlags(self.flags() | flag)
|
Sets whether or not the given flag is enabled or disabled.
:param flag | <XExporter.Flags>
|
374,303
|
def _get_face2(shape=None, face_r=1.0, smile_r1=0.5, smile_r2=0.7, eye_r=0.2):
if shape is None:
shape = [32, 32]
center = (np.asarray(shape) - 1) / 2.0
r = np.min(center) * face_r
x, y = np.meshgrid(range(shape[1]), range(shape[0]))
head = (x - center[0]) ** 2 + (y - center[1]) ** 2 < r ** 2
smile = (
((x - center[0]) ** 2 + (y - center[1]) ** 2 < (r * smile_r2) ** 2)
& (y > (center[1] + 0.3 * r))
& ((x - center[0]) ** 2 + (y - center[1]) ** 2 >= (r * smile_r1) ** 2)
)
smile
e1c = center + r * np.array([-0.35, -0.2])
e2c = center + r * np.array([0.35, -0.2])
eyes = (x - e1c[0]) ** 2 + (y - e1c[1]) ** 2 <= (r * eye_r) ** 2
eyes += (x - e2c[0]) ** 2 + (y - e1c[1]) ** 2 <= (r * eye_r) ** 2
face = head & ~smile & ~eyes
return face
|
Create 2D binar face
:param shape:
:param face_r:
:param smile_r1:
:param smile_r2:
:param eye_r:
:return:
|
374,304
|
def init(self, projectname=None, description=None, **kwargs):
self.app_main(**kwargs)
experiments = self.config.experiments
experiment = self._experiment
if experiment is None and not experiments:
experiment = self.name +
elif experiment is None:
try:
experiment = utils.get_next_name(self.experiment)
except ValueError:
raise ValueError(
"Could not estimate an experiment id! Please use the "
"experiment argument to provide an id.")
self.experiment = experiment
if self.is_archived(experiment):
raise ValueError(
"The specified experiment has already been archived! Run "
"``%s -id %s unarchive`` first" % (self.name, experiment))
if projectname is None:
projectname = self.projectname
else:
self.projectname = projectname
self.logger.info("Initializing experiment %s of project %s",
experiment, projectname)
exp_dict = experiments.setdefault(experiment, OrderedDict())
if description is not None:
exp_dict[] = description
exp_dict[] = projectname
exp_dict[] = exp_dir = osp.join(, experiment)
exp_dir = osp.join(self.config.projects[projectname][], exp_dir)
exp_dict[] = OrderedDict()
if not os.path.exists(exp_dir):
self.logger.debug(" Creating experiment directory %s", exp_dir)
os.makedirs(exp_dir)
self.fix_paths(exp_dict)
return exp_dict
|
Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created
|
374,305
|
def problem_id(self, value):
if value == self._defaults[] and in self._values:
del self._values[]
else:
self._values[] = value
|
The problem_id property.
Args:
value (string). the property value.
|
374,306
|
def separation(sources, fs=22050, labels=None, alpha=0.75, ax=None, **kwargs):
ax, new_axes = __get_axes(ax=ax)
sources = np.atleast_2d(sources)
if labels is None:
labels = [.format(_) for _ in range(len(sources))]
kwargs.setdefault(, )
cumspec = None
specs = []
for i, src in enumerate(sources):
freqs, times, spec = spectrogram(src, fs=fs, **kwargs)
specs.append(spec)
if cumspec is None:
cumspec = spec.copy()
else:
cumspec += spec
ref_max = cumspec.max()
ref_min = ref_max * 1e-6
color_conv = ColorConverter()
for i, spec in enumerate(specs):
color = next(ax._get_lines.prop_cycler)[]
color = color_conv.to_rgba(color, alpha=alpha)
cmap = LinearSegmentedColormap.from_list(labels[i],
[(1.0, 1.0, 1.0, 0.0),
color])
ax.pcolormesh(times, freqs, spec,
cmap=cmap,
norm=LogNorm(vmin=ref_min, vmax=ref_max),
shading=,
label=labels[i])
ax.add_patch(Rectangle((0, 0), 0, 0, color=color, label=labels[i]))
if new_axes:
ax.axis()
return ax
|
Source-separation visualization
Parameters
----------
sources : np.ndarray, shape=(nsrc, nsampl)
A list of waveform buffers corresponding to each source
fs : number > 0
The sampling rate
labels : list of strings
An optional list of descriptors corresponding to each source
alpha : float in [0, 1]
Maximum alpha (opacity) of spectrogram values.
ax : matplotlib.pyplot.axes
An axis handle on which to draw the spectrograms.
If none is provided, a new set of axes is created.
kwargs
Additional keyword arguments to ``scipy.signal.spectrogram``
Returns
-------
ax
The axis handle for this plot
|
374,307
|
def install_package_to_venv(self):
try:
self.env.install(self.name, force=True, options=["--no-deps"])
except (ve.PackageInstallationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException(
)
self.dirs_after_install.fill(self.temp_dir + )
|
Installs package given as first argument to virtualenv without
dependencies
|
374,308
|
def product(pc, service, attrib, sku):
pc.service = service.lower()
pc.sku = sku
pc.add_attributes(attribs=attrib)
click.echo("Service Alias: {0}".format(pc.service_alias))
click.echo("URL: {0}".format(pc.service_url))
click.echo("Region: {0}".format(pc.region))
click.echo("Product Terms: {0}".format(pc.terms))
click.echo("Filtering Attributes: {0}".format(pc.attributes))
prods = pyutu.find_products(pc)
for p in prods:
click.echo("Product SKU: {0} product: {1}".format(
p, json.dumps(prods[p], indent=2, sort_keys=True))
)
click.echo("Total Products Found: {0}".format(len(prods)))
click.echo("Time: {0} secs".format(time.process_time()))
|
Get a list of a service's products.
The list will be in the given region, matching the specific terms and
any given attribute filters or a SKU.
|
374,309
|
def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
resource_path = container_name + + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource()
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, , resource_path)
return sas.get_token()
|
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
|
374,310
|
def _get_svc_list(service_status):
prefix =
ret = set()
lines = glob.glob(.format(prefix))
for line in lines:
svc = _get_svc(line, service_status)
if svc is not None:
ret.add(svc)
return sorted(ret)
|
Returns all service statuses
|
374,311
|
def modify_fk_constraint(apps, schema_editor):
model = apps.get_model("message_sender", "OutboundSendFailure")
table = model._meta.db_table
with schema_editor.connection.cursor() as cursor:
constraints = schema_editor.connection.introspection.get_constraints(
cursor, table
)
[constraint] = filter(lambda c: c[1]["foreign_key"], constraints.items())
[name, _] = constraint
sql_delete_fk = (
"SET CONSTRAINTS {name} IMMEDIATE; "
"ALTER TABLE {table} DROP CONSTRAINT {name}"
).format(table=schema_editor.quote_name(table), name=schema_editor.quote_name(name))
schema_editor.execute(sql_delete_fk)
field = model.outbound.field
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(
field.remote_field.field_name
).column
sql_create_fk = (
"ALTER TABLE {table} ADD CONSTRAINT {name} FOREIGN KEY "
"({column}) REFERENCES {to_table} ({to_column}) "
"ON DELETE CASCADE {deferrable};"
).format(
table=schema_editor.quote_name(table),
name=schema_editor.quote_name(name),
column=schema_editor.quote_name(field.column),
to_table=schema_editor.quote_name(to_table),
to_column=schema_editor.quote_name(to_column),
deferrable=schema_editor.connection.ops.deferrable_sql(),
)
schema_editor.execute(sql_create_fk)
|
Delete's the current foreign key contraint on the outbound field, and adds
it again, but this time with an ON DELETE clause
|
374,312
|
def inner(a,b):
if sps.issparse(a): return a.dot(b)
else: a = np.asarray(a)
if len(a.shape) == 0: return a*b
if sps.issparse(b):
if len(a.shape) == 1: return b.T.dot(a)
else: return b.T.dot(a.T).T
else: b = np.asarray(b)
if len(b.shape) == 0: return a*b
if len(a.shape) == 1 and len(b.shape) == 2: return np.dot(b.T, a)
else: return np.dot(a,b)
|
inner(a,b) yields the dot product of a and b, doing so in a fashion that respects sparse
matrices when encountered. This does not error check for bad dimensionality.
If a or b are constants, then the result is just the a*b; if a and b are both vectors or both
matrices, then the inner product is dot(a,b); if a is a vector and b is a matrix, this is
equivalent to as if a were a matrix with 1 row; and if a is a matrix and b a vector, this is
equivalent to as if b were a matrix with 1 column.
|
374,313
|
def train_model(params: Params,
serialization_dir: str,
file_friendly_logging: bool = False,
recover: bool = False,
force: bool = False,
cache_directory: str = None,
cache_prefix: str = None) -> Model:
prepare_environment(params)
create_serialization_dir(params, serialization_dir, recover, force)
stdout_handler = prepare_global_logging(serialization_dir, file_friendly_logging)
cuda_device = params.params.get().get(, -1)
check_for_gpu(cuda_device)
params.to_file(os.path.join(serialization_dir, CONFIG_NAME))
evaluate_on_test = params.pop_bool("evaluate_on_test", False)
trainer_type = params.get("trainer", {}).get("type", "default")
if trainer_type == "default":
pieces = TrainerPieces.from_params(params,
serialization_dir,
recover,
cache_directory,
cache_prefix)
trainer = Trainer.from_params(
model=pieces.model,
serialization_dir=serialization_dir,
iterator=pieces.iterator,
train_data=pieces.train_dataset,
validation_data=pieces.validation_dataset,
params=pieces.params,
validation_iterator=pieces.validation_iterator)
evaluation_iterator = pieces.validation_iterator or pieces.iterator
evaluation_dataset = pieces.test_dataset
else:
trainer = TrainerBase.from_params(params, serialization_dir, recover)
evaluation_iterator = evaluation_dataset = None
params.assert_empty()
try:
metrics = trainer.train()
except KeyboardInterrupt:
if os.path.exists(os.path.join(serialization_dir, _DEFAULT_WEIGHTS)):
logging.info("Training interrupted by the user. Attempting to create "
"a model archive using the current best epoch weights.")
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
raise
if evaluation_dataset and evaluate_on_test:
logger.info("The model will be evaluated using the best epoch weights.")
test_metrics = evaluate(trainer.model, evaluation_dataset, evaluation_iterator,
cuda_device=trainer._cuda_devices[0],
cleanup_global_logging(stdout_handler)
archive_model(serialization_dir, files_to_archive=params.files_to_archive)
dump_metrics(os.path.join(serialization_dir, "metrics.json"), metrics, log=True)
return trainer.model
|
Trains the model specified in the given :class:`Params` object, using the data and training
parameters also specified in that object, and saves the results in ``serialization_dir``.
Parameters
----------
params : ``Params``
A parameter object specifying an AllenNLP Experiment.
serialization_dir : ``str``
The directory in which to save results and logs.
file_friendly_logging : ``bool``, optional (default=False)
If ``True``, we add newlines to tqdm output, even on an interactive terminal, and we slow
down tqdm's output to only once every 10 seconds.
recover : ``bool``, optional (default=False)
If ``True``, we will try to recover a training run from an existing serialization
directory. This is only intended for use when something actually crashed during the middle
of a run. For continuing training a model on new data, see the ``fine-tune`` command.
force : ``bool``, optional (default=False)
If ``True``, we will overwrite the serialization directory if it already exists.
cache_directory : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
cache_prefix : ``str``, optional
For caching data pre-processing. See :func:`allennlp.training.util.datasets_from_params`.
Returns
-------
best_model: ``Model``
The model with the best epoch weights.
|
374,314
|
def set_euk_hmm(self, args):
if hasattr(args, ):
pass
elif not hasattr(args, ):
setattr(args, , os.path.join(os.path.dirname(inspect.stack()[-1][1]),,, ))
else:
raise Exception()
|
Set the hmm used by graftM to cross check for euks.
|
374,315
|
def send(self, request, headers=None, content=None, **kwargs):
if headers:
request.headers.update(headers)
if not request.files and request.data is None and content is not None:
request.add_content(content)
response = None
kwargs.setdefault(, True)
try:
pipeline_response = self.config.pipeline.run(request, **kwargs)
response = pipeline_response.http_response.internal_response
response._universal_http_response = pipeline_response.http_response
response.context = pipeline_response.context
return response
finally:
self._close_local_session_if_necessary(response, kwargs[])
|
Prepare and send request object according to configuration.
:param ClientRequest request: The request object to be sent.
:param dict headers: Any headers to add to the request.
:param content: Any body data to add to the request.
:param config: Any specific config overrides
|
374,316
|
def cursor_position_changed(self):
if self.bracepos is not None:
self.__highlight(self.bracepos, cancel=True)
self.bracepos = None
cursor = self.textCursor()
if cursor.position() == 0:
return
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor)
text = to_text_string(cursor.selectedText())
pos1 = cursor.position()
if text in (, , ):
pos2 = self.find_brace_match(pos1, text, forward=False)
elif text in (, , ):
pos2 = self.find_brace_match(pos1, text, forward=True)
else:
return
if pos2 is not None:
self.bracepos = (pos1, pos2)
self.__highlight(self.bracepos, color=self.matched_p_color)
else:
self.bracepos = (pos1,)
self.__highlight(self.bracepos, color=self.unmatched_p_color)
|
Brace matching
|
374,317
|
def convertMzml(mzmlPath, outputDirectory=None):
outputDirectory = outputDirectory if outputDirectory is not None else os.path.dirname(mzmlPath)
msrunContainer = importMzml(mzmlPath)
msrunContainer.setPath(outputDirectory)
msrunContainer.save()
|
Imports an mzml file and converts it to a MsrunContainer file
:param mzmlPath: path of the mzml file
:param outputDirectory: directory where the MsrunContainer file should be written
if it is not specified, the output directory is set to the mzml files directory.
|
374,318
|
def page(self, recurring=values.unset, trigger_by=values.unset,
usage_category=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
params = values.of({
: recurring,
: trigger_by,
: usage_category,
: page_token,
: page_number,
: page_size,
})
response = self._version.page(
,
self._uri,
params=params,
)
return TriggerPage(self._version, response, self._solution)
|
Retrieve a single page of TriggerInstance records from the API.
Request is executed immediately
:param TriggerInstance.Recurring recurring: The frequency of recurring UsageTriggers to read
:param TriggerInstance.TriggerField trigger_by: The trigger field of the UsageTriggers to read
:param TriggerInstance.UsageCategory usage_category: The usage category of the UsageTriggers to read
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TriggerInstance
:rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerPage
|
374,319
|
def sort(self, *sorting, **kwargs):
sorting_ = []
for name, desc in sorting:
field = self.meta.model._meta.fields.get(name)
if field is None:
continue
if desc:
field = field.desc()
sorting_.append(field)
if sorting_:
return self.collection.order_by(*sorting_)
return self.collection
|
Sort resources.
|
374,320
|
def get_destination(self, filepath, targetdir=None):
dst = self.change_extension(filepath, )
if targetdir:
dst = os.path.join(targetdir, dst)
return dst
|
Return destination path from given source file path.
Destination is allways a file with extension ``.css``.
Args:
filepath (str): A file path. The path is allways relative to
sources directory. If not relative, ``targetdir`` won't be
joined.
absolute (bool): If given will be added at beginning of file
path.
Returns:
str: Destination filepath.
|
374,321
|
def copy_function(func, name=None):
code = func.__code__
newname = name or func.__name__
newcode = CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
newname,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
newfunc = FunctionType(
newcode,
func.__globals__,
newname,
func.__defaults__,
func.__closure__,
)
newfunc.__dict__.update(func.__dict__)
return newfunc
|
Copy a function object with different name.
Args:
func (function): Function to be copied.
name (string, optional): Name of the new function.
If not spacified, the same name of `func` will be used.
Returns:
newfunc (function): New function with different name.
|
374,322
|
def _set_system_mode(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u: {: 0}, u: {: 1}},), is_leaf=True, yang_name="system-mode", rest_name="system-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "brocade-hardware:system-mode-type",
: ,
})
self.__system_mode = t
if hasattr(self, ):
self._set()
|
Setter method for system_mode, mapped from YANG variable /hardware/system_mode (system-mode-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_system_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_system_mode() directly.
|
374,323
|
def _wpad(l, windowsize, stepsize):
if l <= windowsize:
return windowsize
nsteps = ((l // stepsize) * stepsize)
overlap = (windowsize - stepsize)
if overlap:
return nsteps + overlap
diff = (l - nsteps)
left = max(0, windowsize - diff)
return l + left if diff else l
|
Parameters
l - The length of the input array
windowsize - the size of each window of samples
stepsize - the number of samples to move the window each step
Returns
The length the input array should be so that no samples are leftover
|
374,324
|
def read(self, gpio):
res = yield from self._pigpio_aio_command(_PI_CMD_READ, gpio, 0)
return _u2i(res)
|
Returns the GPIO level.
gpio:= 0-53.
...
yield from pi.set_mode(23, pigpio.INPUT)
yield from pi.set_pull_up_down(23, pigpio.PUD_DOWN)
print(yield from pi.read(23))
0
yield from pi.set_pull_up_down(23, pigpio.PUD_UP)
print(yield from pi.read(23))
1
...
|
374,325
|
def update_asset_browser(self, project, releasetype):
if project is None:
self.assetbrws.set_model(None)
return
assetmodel = self.create_asset_model(project, releasetype)
self.assetbrws.set_model(assetmodel)
|
update the assetbrowser to the given project
:param releasetype: the releasetype for the model
:type releasetype: :data:`djadapter.RELEASETYPES`
:param project: the project of the assets
:type project: :class:`djadapter.models.Project`
:returns: None
:rtype: None
:raises: None
|
374,326
|
def construct_graph(sakefile, settings):
verbose = settings["verbose"]
sprint = settings["sprint"]
G = nx.DiGraph()
sprint("Going to construct Graph", level="verbose")
for target in sakefile:
if target == "all":
matches = check_for_dep_in_outputs(dep, verbose, G)
if not matches:
continue
for match in matches:
sprint("Appending {} to matches".format(match), level="verbose")
connects.append(match)
if connects:
for connect in connects:
G.add_edge(connect, node[0])
return G
|
Takes the sakefile dictionary and builds a NetworkX graph
Args:
A dictionary that is the parsed Sakefile (from sake.py)
The settings dictionary
Returns:
A NetworkX graph
|
374,327
|
def report(ctx, board, done, output):
ctx.obj[] = board
ts = TrelloStats(ctx.obj)
ct = cycle_time(ts, board, done)
env = get_env()
if target.startswith("render_") and
target.endswith(output)]
for render_func in render_functions:
print globals()[render_func](env, **dict(cycle_time=ct))
|
Reporting mode - Daily snapshots of a board for ongoing reporting:
-> trellis report --board=87hiudhw
--spend
--revenue
--done=Done
|
374,328
|
def VerifyRow(self, parser_mediator, row):
if row[] != and not self._MD5_RE.match(row[]):
return False
for column_name in (
, , , , , , ):
column_value = row.get(column_name, None)
if not column_value:
continue
try:
int(column_value, 10)
except (TypeError, ValueError):
return False
return True
|
Verifies if a line of the file is in the expected format.
Args:
parser_mediator (ParserMediator): mediates interactions between
parsers and other components, such as storage and dfvfs.
row (dict[str, str]): fields of a single row, as specified in COLUMNS.
Returns:
bool: True if this is the correct parser, False otherwise.
|
374,329
|
def calculate_r_matrices(fine_states, reduced_matrix_elements, q=None,
numeric=True, convention=1):
ur
magnetic_states = make_list_of_states(fine_states, , verbose=0)
aux = calculate_boundaries(fine_states, magnetic_states)
index_list_fine, index_list_hyperfine = aux
Ne = len(magnetic_states)
r = [[[0 for j in range(Ne)] for i in range(Ne)] for p in range(3)]
II = fine_states[0].i
for p in [-1, 0, 1]:
for i in range(Ne):
ei = magnetic_states[i]
ii = fine_index(i, index_list_fine)
for j in range(Ne):
ej = magnetic_states[j]
jj = fine_index(j, index_list_fine)
reduced_matrix_elementij = reduced_matrix_elements[ii][jj]
if reduced_matrix_elementij != 0:
ji = ei.j; jj = ej.j
fi = ei.f; fj = ej.f
mi = ei.m; mj = ej.m
rpij = matrix_element(ji, fi, mi, jj, fj, mj,
II, reduced_matrix_elementij, p,
numeric=numeric,
convention=convention)
if q == 1:
r[p+1][i][j] = rpij*delta_lesser(i, j)
elif q == -1:
r[p+1][i][j] = rpij*delta_greater(i, j)
else:
r[p+1][i][j] = rpij
if not numeric:
r = [Matrix(ri) for ri in r]
return r
|
ur"""Calculate the matrix elements of the electric dipole (in the helicity
basis).
We calculate all matrix elements for the D2 line in Rb 87.
>>> from sympy import symbols, pprint
>>> red = symbols("r", positive=True)
>>> reduced_matrix_elements = [[0, -red], [red, 0]]
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> e = State("Rb", 87, 5, 1, 3/Integer(2))
>>> fine_levels = [g, e]
>>> r = calculate_r_matrices(fine_levels, reduced_matrix_elements,
... numeric=False)
>>> pprint(r[0][8:,:8])
β‘ β3β
r β€
β’ 0 0 ββββ 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ -β15β
r β15β
r β₯
β’ 0 βββββββ 0 0 0 βββββ 0 0 β₯
β’ 12 60 β₯
β’ β₯
β’ -β15β
r β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 20 β₯
β’ β₯
β’β2β
r -β6β
r β₯
β’ββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ r -r β₯
β’ 0 β 0 0 0 βββ 0 0 β₯
β’ 4 4 β₯
β’ β₯
β’ β3β
r -r β₯
β’ 0 0 ββββ 0 0 0 βββ 0 β₯
β’ 12 4 β₯
β’ β₯
β’ -β6β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ r β₯
β’ 0 0 0 β 0 0 0 0 β₯
β’ 2 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 ββββ 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 0 0 ββββ 0 β₯
β’ 10 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 0 0 βββββ β₯
β’ 30 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[1][8:,:8])
β‘ -β3β
r β€
β’ 0 ββββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’β15β
r -β5β
r β₯
β’βββββ 0 0 0 ββββββ 0 0 0 β₯
β’ 12 20 β₯
β’ β₯
β’ -β15β
r β₯
β’ 0 0 0 0 0 βββββββ 0 0 β₯
β’ 30 β₯
β’ β₯
β’ -β15β
r -β5β
r β₯
β’ 0 0 βββββββ 0 0 0 ββββββ 0 β₯
β’ 12 20 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r β3β
r β₯
β’ β 0 0 0 ββββ 0 0 0 β₯
β’ 4 12 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 ββββ 0 0 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ r -β3β
r β₯
β’ 0 0 β 0 0 0 ββββββ 0 β₯
β’ 4 12 β₯
β’ β₯
β’ -β3β
r β₯
β’ 0 0 0 0 0 0 0 βββββββ₯
β’ 6 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 ββββ 0 0 0 0 β₯
β’ 6 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 βββββ 0 0 0 β₯
β’ 15 β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 0 0 βββββ 0 0 β₯
β’ 10 β₯
β’ β₯
β’ β30β
r β₯
β’ 0 0 0 0 0 0 βββββ 0 β₯
β’ 15 β₯
β’ β₯
β’ β3β
r β₯
β’ 0 0 0 0 0 0 0 ββββ β₯
β’ 6 β₯
β’ β₯
β£ 0 0 0 0 0 0 0 0 β¦
>>> pprint(r[2][8:,:8])
β‘β3β
r β€
β’ββββ 0 0 0 0 0 0 0β₯
β’ 6 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 20 β₯
β’ β₯
β’β15β
r β5β
r β₯
β’βββββ 0 0 0 ββββ 0 0 0β₯
β’ 12 20 β₯
β’ β₯
β’ β15β
r β15β
r β₯
β’ 0 βββββ 0 0 0 βββββ 0 0β₯
β’ 12 60 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 ββββ 0 0 0 0β₯
β’ 12 β₯
β’ β₯
β’β3β
r r β₯
β’ββββ 0 0 0 β 0 0 0β₯
β’ 12 4 β₯
β’ β₯
β’ r r β₯
β’ 0 β 0 0 0 β 0 0β₯
β’ 4 4 β₯
β’ β₯
β’ β2β
r β6β
r β₯
β’ 0 0 ββββ 0 0 0 ββββ 0β₯
β’ 4 12 β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ 0 0 0 0 0 0 0 0β₯
β’ β₯
β’ β15β
r β₯
β’ 0 0 0 βββββ 0 0 0 0β₯
β’ 30 β₯
β’ β₯
β’ β5β
r β₯
β’ 0 0 0 0 ββββ 0 0 0β₯
β’ 10 β₯
β’ β₯
β’ β10β
r β₯
β’ 0 0 0 0 0 βββββ 0 0β₯
β’ 10 β₯
β’ β₯
β’ β6β
r β₯
β’ 0 0 0 0 0 0 ββββ 0β₯
β’ 6 β₯
β’ β₯
β’ rβ₯
β’ 0 0 0 0 0 0 0 ββ₯
β£ 2β¦
|
374,330
|
def download_file(save_path, file_url):
r = requests.get(file_url)
with open(save_path, ) as f:
f.write(r.content)
return save_path
|
Download file from http url link
|
374,331
|
def _on(on_signals, callback, max_calls=None):
if not callable(callback):
raise AssertionError()
if not isinstance(on_signals, (list, tuple)):
on_signals = [on_signals]
callback._max_calls = max_calls
for signal in on_signals:
receivers[signal].add(callback)
if not hasattr(callback, ):
callback.responds_to = partial(responds_to, callback)
if not hasattr(callback, ):
callback.signals = partial(signals, callback)
if not hasattr(callback, ):
callback.disconnect = partial(disconnect, callback)
if not hasattr(callback, ):
callback.disconnect_from = partial(disconnect_from, callback)
return callback
|
Proxy for `smokesignal.on`, which is compatible as both a function call and
a decorator. This method cannot be used as a decorator
:param signals: A single signal or list/tuple of signals that callback should respond to
:param callback: A callable that should repond to supplied signal(s)
:param max_calls: Integer maximum calls for callback. None for no limit.
|
374,332
|
def _get_model_fitting(self, mf_id):
for model_fitting in self.model_fittings:
if model_fitting.activity.id == mf_id:
return model_fitting
raise Exception("Model fitting activity with id: " + str(mf_id) +
" not found.")
|
Retreive model fitting with identifier 'mf_id' from the list of model
fitting objects stored in self.model_fitting
|
374,333
|
def upload_files(self, abspaths, relpaths, remote_objects):
for relpath in relpaths:
abspath = [p for p in abspaths if p[len(self.file_root):] == relpath][0]
cloud_datetime = remote_objects[relpath] if relpath in remote_objects else None
local_datetime = datetime.datetime.utcfromtimestamp(os.stat(abspath).st_mtime)
if cloud_datetime and local_datetime < cloud_datetime:
self.skip_count += 1
if not self.quiet:
print("Skipped {0}: not modified.".format(relpath))
continue
if relpath in remote_objects:
self.update_count += 1
else:
self.create_count += 1
self.upload_file(abspath, relpath)
|
Determines files to be uploaded and call ``upload_file`` on each.
|
374,334
|
def is_BF_hypergraph(self):
for hyperedge_id in self._hyperedge_attributes:
tail = self.get_hyperedge_tail(hyperedge_id)
head = self.get_hyperedge_head(hyperedge_id)
if len(tail) > 1 and len(head) > 1:
return False
return True
|
Indicates whether the hypergraph is a BF-hypergraph.
A BF-hypergraph consists of only B-hyperedges and F-hyperedges.
See "is_B_hypergraph" or "is_F_hypergraph" for more details.
:returns: bool -- True iff the hypergraph is an F-hypergraph.
|
374,335
|
def as_sql(self, *args, **kwargs):
CTEQuery._remove_cte_where(self.query)
return super(self.__class__, self).as_sql(*args, **kwargs)
|
Overrides the :class:`SQLUpdateCompiler` method in order to remove any
CTE-related WHERE clauses, which are not necessary for UPDATE queries,
yet may have been added if this query was cloned from a CTEQuery.
:return:
:rtype:
|
374,336
|
def diffusion_correlated(diffusion_constant=0.2, exposure_time=0.05,
samples=40, phi=0.25):
radius = 5
psfsize = np.array([2.0, 1.0, 3.0])/2
pos, rad, tile = nbody.initialize_particles(N=50, phi=phi, polydispersity=0.0)
sim = nbody.BrownianHardSphereSimulation(
pos, rad, tile, D=diffusion_constant, dt=exposure_time/samples
)
sim.dt = 1e-2
sim.relax(2000)
sim.dt = exposure_time/samples
c = ((sim.pos - sim.tile.center())**2).sum(axis=-1).argmin()
pc = sim.pos[c].copy()
sim.pos[c] = sim.pos[0]
sim.pos[0] = pc
mask = np.zeros_like(sim.rad).astype()
neigh = sim.neighbors(3*radius, 0)
for i in neigh+[0]:
mask[i] = True
img = np.zeros(sim.tile.shape)
s0 = runner.create_state(img, sim.pos, sim.rad, ignoreimage=True)
finalimage = 0*s0.get_model_image()[s0.inner]
position = 0*s0.obj.pos
for i in xrange(samples):
sim.step(1, mask=mask)
s0.obj.pos = sim.pos.copy() + s0.pad
s0.reset()
finalimage += s0.get_model_image()[s0.inner]
position += s0.obj.pos
finalimage /= float(samples)
position /= float(samples)
s = runner.create_state(img, sim.pos, sim.rad, ignoreimage=True)
s.reset()
return s, finalimage, position
|
Calculate the (perhaps) correlated diffusion effect between particles
during the exposure time of the confocal microscope. diffusion_constant is
in terms of seconds and pixel sizes exposure_time is in seconds
1 micron radius particle:
D = kT / (6 a\pi\eta)
for 80/20 g/w (60 mPas), 3600 nm^2/sec ~ 0.15 px^2/sec
for 100 % w (0.9 mPas), ~ 10.1 px^2/sec
a full 60 layer scan takes 0.1 sec, so a particle is 0.016 sec exposure
|
374,337
|
def tmpdir():
target = None
try:
with _tmpdir_extant() as target:
yield target
finally:
if target is not None:
shutil.rmtree(target, ignore_errors=True)
|
Create a tempdir context for the cwd and remove it after.
|
374,338
|
def get_workflow_status_of(brain_or_object, state_var="review_state"):
workflow = get_tool("portal_workflow")
obj = get_object(brain_or_object)
return workflow.getInfoFor(ob=obj, name=state_var)
|
Get the current workflow status of the given brain or context.
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param state_var: The name of the state variable
:type state_var: string
:returns: Status
:rtype: str
|
374,339
|
def gaussian(data, mean, covariance):
dimension = float(len(data[0]))
if dimension != 1.0:
inv_variance = numpy.linalg.pinv(covariance)
else:
inv_variance = 1.0 / covariance
divider = (pi * 2.0) ** (dimension / 2.0) * numpy.sqrt(numpy.linalg.norm(covariance))
if divider != 0.0:
right_const = 1.0 / divider
else:
right_const = float()
result = []
for point in data:
mean_delta = point - mean
point_gaussian = right_const * numpy.exp( -0.5 * mean_delta.dot(inv_variance).dot(numpy.transpose(mean_delta)) )
result.append(point_gaussian)
return result
|
!
@brief Calculates gaussian for dataset using specified mean (mathematical expectation) and variance or covariance in case
multi-dimensional data.
@param[in] data (list): Data that is used for gaussian calculation.
@param[in] mean (float|numpy.array): Mathematical expectation used for calculation.
@param[in] covariance (float|numpy.array): Variance or covariance matrix for calculation.
@return (list) Value of gaussian function for each point in dataset.
|
374,340
|
def remove_field(self, name):
field = self.get_field(name)
if field:
predicat = lambda field: field.get() != name
self.__current_descriptor[] = filter(
predicat, self.__current_descriptor[])
self.__build()
return field
|
https://github.com/frictionlessdata/tableschema-py#schema
|
374,341
|
def _parse_xmatch_catalog_header(xc, xk):
catdef = []
if xc.endswith():
infd = gzip.open(xc,)
else:
infd = open(xc,)
for line in infd:
if line.decode().startswith():
catdef.append(
line.decode().replace(,).strip().rstrip()
)
if not line.decode().startswith():
break
if not len(catdef) > 0:
LOGERROR("catalog definition not parseable "
"for catalog: %s, skipping..." % xc)
return None
catdef = .join(catdef)
catdefdict = json.loads(catdef)
catdefkeys = [x[] for x in catdefdict[]]
catdefdtypes = [x[] for x in catdefdict[]]
catdefnames = [x[] for x in catdefdict[]]
catdefunits = [x[] for x in catdefdict[]]
catcolinds = []
catcoldtypes = []
catcolnames = []
catcolunits = []
for xkcol in xk:
if xkcol in catdefkeys:
xkcolind = catdefkeys.index(xkcol)
catcolinds.append(xkcolind)
catcoldtypes.append(catdefdtypes[xkcolind])
catcolnames.append(catdefnames[xkcolind])
catcolunits.append(catdefunits[xkcolind])
return (infd, catdefdict,
catcolinds, catcoldtypes, catcolnames, catcolunits)
|
This parses the header for a catalog file and returns it as a file object.
Parameters
----------
xc : str
The file name of an xmatch catalog prepared previously.
xk : list of str
This is a list of column names to extract from the xmatch catalog.
Returns
-------
tuple
The tuple returned is of the form::
(infd: the file object associated with the opened xmatch catalog,
catdefdict: a dict describing the catalog column definitions,
catcolinds: column number indices of the catalog,
catcoldtypes: the numpy dtypes of the catalog columns,
catcolnames: the names of each catalog column,
catcolunits: the units associated with each catalog column)
|
374,342
|
async def retract(self, mount: top_types.Mount, margin: float):
smoothie_ax = Axis.by_mount(mount).name.upper()
async with self._motion_lock:
smoothie_pos = self._backend.fast_home(smoothie_ax, margin)
self._current_position = self._deck_from_smoothie(smoothie_pos)
|
Pull the specified mount up to its home position.
Works regardless of critical point or home status.
|
374,343
|
def record_iterator(xml):
if hasattr(xml, "read"):
xml = xml.read()
dom = None
try:
dom = dhtmlparser.parseString(xml)
except UnicodeError:
dom = dhtmlparser.parseString(xml.encode("utf-8"))
for record_xml in dom.findB("record"):
yield MARCXMLRecord(record_xml)
|
Iterate over all ``<record>`` tags in `xml`.
Args:
xml (str/file): Input string with XML. UTF-8 is prefered encoding,
unicode should be ok.
Yields:
MARCXMLRecord: For each corresponding ``<record>``.
|
374,344
|
def set(self, key, val, time=0, min_compress_len=0):
s objects
on the same memcache server, so you could use the usert ever try to compress.
'
return self._set("set", key, val, time, min_compress_len)
|
Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
|
374,345
|
def pick_frequency_line(self, filename, frequency, cumulativefield=):
if resource_exists(, filename):
with closing(resource_stream(, filename)) as b:
g = codecs.iterdecode(b, )
return self._pick_frequency_line(g, frequency, cumulativefield)
else:
with open(filename, encoding=) as g:
return self._pick_frequency_line(g, frequency, cumulativefield)
|
Given a numeric frequency, pick a line from a csv with a cumulative frequency field
|
374,346
|
def add_deviation(self, dev, td=None):
self.deviation = dev
try:
self.compute_position_log(td=td)
except:
self.position = None
return
|
Add a deviation survey to this instance, and try to compute a position
log from it.
|
374,347
|
def publish(self, value):
value = super(Float, self).publish(value)
if isinstance(value, int):
value = float(value)
return value
|
Accepts: float
Returns: float
|
374,348
|
def _set_ipv6_track(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6_track.ipv6_track, is_container=, presence=False, yang_name="ipv6-track", rest_name="track", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__ipv6_track = t
if hasattr(self, ):
self._set()
|
Setter method for ipv6_track, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_track is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_track() directly.
|
374,349
|
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
del model_hparams, vocab_size
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights)
|
Compute the CTC loss.
|
374,350
|
def addVariantFeature(self,variantFeature):
if isinstance(variantFeature, Feature):
self.features.append(variantFeature)
else:
raise(TypeError,
% type(
variantFeature)
)
|
Appends one VariantFeature to variantFeatures
|
374,351
|
def child_object(self):
from . import types
child_klass = types.get(self.task_type.split()[1])
return child_klass.retrieve(self.task_id, client=self._client)
|
Get Task child object class
|
374,352
|
def Si_to_pandas_dict(S_dict):
problem = S_dict.problem
total_order = {
: S_dict[],
: S_dict[]
}
first_order = {
: S_dict[],
: S_dict[]
}
idx = None
second_order = None
if in S_dict:
names = problem[]
idx = list(combinations(names, 2))
second_order = {
: [S_dict[][names.index(i[0]), names.index(i[1])]
for i in idx],
: [S_dict[][names.index(i[0]), names.index(i[1])]
for i in idx]
}
return total_order, first_order, (idx, second_order)
|
Convert Si information into Pandas DataFrame compatible dict.
Parameters
----------
S_dict : ResultDict
Sobol sensitivity indices
See Also
----------
Si_list_to_dict
Returns
----------
tuple : of total, first, and second order sensitivities.
Total and first order are dicts.
Second order sensitivities contain a tuple of parameter name
combinations for use as the DataFrame index and second order
sensitivities.
If no second order indices found, then returns tuple of (None, None)
Examples
--------
>>> X = saltelli.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = sobol.analyze(problem, Y, print_to_console=True)
>>> T_Si, first_Si, (idx, second_Si) = sobol.Si_to_pandas_dict(Si, problem)
|
374,353
|
def configure_logger(glob, multi_level,
relative=False, logfile=None, syslog=False):
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG] \
if multi_level else [logging.INFO, logging.DEBUG]
try:
verbose = min(int(glob[].verbose), 3)
except AttributeError:
verbose = 0
glob[]._debug_level = dl = levels[verbose]
logger.handlers = []
glob[] = logger
handler = logging.StreamHandler()
formatter = logging.Formatter(glob[], glob[])
handler.setFormatter(formatter)
glob[].addHandler(handler)
glob[].setLevel(dl)
if relative:
coloredlogs.ColoredFormatter = RelativeTimeColoredFormatter
coloredlogs.install(dl,
logger=glob[],
fmt=glob[],
datefmt=glob[],
milliseconds=glob[],
syslog=syslog,
stream=logfile)
|
Logger configuration function for setting either a simple debug mode or a
multi-level one.
:param glob: globals dictionary
:param multi_level: boolean telling if multi-level debug is to be considered
:param relative: use relative time for the logging messages
:param logfile: log file to be saved (None means do not log to file)
:param syslog: enable logging to /var/log/syslog
|
374,354
|
def reciprocal_rank(
model,
test_interactions,
train_interactions=None,
user_features=None,
item_features=None,
preserve_rows=False,
num_threads=1,
check_intersections=True,
):
if num_threads < 1:
raise ValueError("Number of threads must be 1 or larger.")
ranks = model.predict_rank(
test_interactions,
train_interactions=train_interactions,
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
check_intersections=check_intersections,
)
ranks.data = 1.0 / (ranks.data + 1.0)
ranks = np.squeeze(np.array(ranks.max(axis=1).todense()))
if not preserve_rows:
ranks = ranks[test_interactions.getnnz(axis=1) > 0]
return ranks
|
Measure the reciprocal rank metric for a model: 1 / the rank of the highest
ranked positive example. A perfect score is 1.0.
Parameters
----------
model: LightFM instance
the fitted model to be evaluated
test_interactions: np.float32 csr_matrix of shape [n_users, n_items]
Non-zero entries representing known positives in the evaluation set.
train_interactions: np.float32 csr_matrix of shape [n_users, n_items], optional
Non-zero entries representing known positives in the train set. These
will be omitted from the score calculations to avoid re-recommending
known positives.
user_features: np.float32 csr_matrix of shape [n_users, n_user_features], optional
Each row contains that user's weights over features.
item_features: np.float32 csr_matrix of shape [n_items, n_item_features], optional
Each row contains that item's weights over features.
preserve_rows: boolean, optional
When False (default), the number of rows in the output will be equal
to the number of users with interactions in the evaluation set.
When True, the number of rows in the output will be equal to the
number of users.
num_threads: int, optional
Number of parallel computation threads to use. Should
not be higher than the number of physical cores.
check_intersections: bool, optional, True by default,
Only relevant when train_interactions are supplied.
A flag that signals whether the test and train matrices should be checked
for intersections to prevent optimistic ranks / wrong evaluation / bad data split.
Returns
-------
np.array of shape [n_users with interactions or n_users,]
Numpy array containing reciprocal rank scores for each user.
If there are no interactions for a given user the returned value will
be 0.0.
|
374,355
|
def xack(self, stream, group_name, id, *ids):
return self.execute(b, stream, group_name, id, *ids)
|
Acknowledge a message for a given consumer group
|
374,356
|
def filter(self, value, model=None, context=None):
value = str(value)
return bleach.clean(text=value, **self.bleach_params)
|
Filter
Performs value filtering and returns filtered result.
:param value: input value
:param model: parent model being validated
:param context: object, filtering context
:return: filtered value
|
374,357
|
def shorten_duplicate_content_url(url):
if in url:
url = url.split(, 1)[0]
if url.endswith():
return url[:-10]
if url.endswith():
return url[:-9]
return url
|
Remove anchor part and trailing index.html from URL.
|
374,358
|
def select_data(db_file, slab=None, facet=None):
con = sql.connect(db_file)
cur = con.cursor()
if slab and facet:
select_command = \
+str(facet)++slab+
elif slab and not facet:
select_command = \
+slab+
else:
select_command =
cur.execute(select_command)
data = cur.fetchall()
return(data)
|
Gathers relevant data from SQL database generated by CATHUB.
Parameters
----------
db_file : Path to database
slab : Which metal (slab) to select.
facet : Which facets to select.
Returns
-------
data : SQL cursor output.
|
374,359
|
def visit_dictcomp(self, node, parent):
newnode = nodes.DictComp(node.lineno, node.col_offset, parent)
newnode.postinit(
self.visit(node.key, newnode),
self.visit(node.value, newnode),
[self.visit(child, newnode) for child in node.generators],
)
return newnode
|
visit a DictComp node by returning a fresh instance of it
|
374,360
|
def pot_to_requiv_contact(pot, q, sma, compno=1):
return ConstraintParameter(pot._bundle, "pot_to_requiv_contact({}, {}, {}, {})".format(_get_expr(pot), _get_expr(q), _get_expr(sma), compno))
|
TODO: add documentation
|
374,361
|
def _get_es_version(self, config):
try:
data = self._get_data(config.url, config, send_sc=False)
self.log.debug("Elasticsearch version is %s" % version)
return version
|
Get the running version of elasticsearch.
|
374,362
|
def connect(self):
if (self.__ser is not None):
serial = importlib.import_module("serial")
if self.__stopbits == 0:
self.__ser.stopbits = serial.STOPBITS_ONE
elif self.__stopbits == 1:
self.__ser.stopbits = serial.STOPBITS_TWO
elif self.__stopbits == 2:
self.__ser.stopbits = serial.STOPBITS_ONE_POINT_FIVE
if self.__parity == 0:
self.__ser.parity = serial.PARITY_EVEN
elif self.__parity == 1:
self.__ser.parity = serial.PARITY_ODD
elif self.__parity == 2:
self.__ser.parity = serial.PARITY_NONE
self.__ser = serial.Serial(self.serialPort, self.__baudrate, timeout=self.__timeout, parity=self.__ser.parity, stopbits=self.__ser.stopbits, xonxoff=0, rtscts=0)
self.__ser.writeTimeout = self.__timeout
if (self.__tcpClientSocket is not None):
self.__tcpClientSocket.settimeout(5)
self.__tcpClientSocket.connect((self.__ipAddress, self.__port))
self.__connected = True
self.__thread = threading.Thread(target=self.__listen, args=())
self.__thread.start()
|
Connects to a Modbus-TCP Server or a Modbus-RTU Slave with the given Parameters
|
374,363
|
def search(self):
self.q(css=).click()
GitHubSearchResultsPage(self.browser).wait_for_page()
|
Click on the Search button and wait for the
results page to be displayed
|
374,364
|
def exact_anniversaries(frequency, anniversary, start, finish):
if frequency != DATE_FREQUENCY_MONTHLY:
raise DateFrequencyError("Only monthly date frequency is supported - not " % (frequency))
if start.day != anniversary:
return False
periods = 0
current = start
while current <= finish:
period_end = current + relativedelta(months=+1, days=-1)
if period_end <= finish:
periods += 1
else:
return False
current = current + relativedelta(months=+1)
return periods
|
Returns the number of exact anniversaries if start and finish represent an anniversary.
ie..
exact_anniversaries(DATE_FREQUENCY_MONTHLY, 10, date(2012, 2, 10), date(2012, 3, 9)) returns 1
exact_anniversaries(DATE_FREQUENCY_MONTHLY, 10, date(2012, 2, 10), date(2012, 4, 9)) returns 2
|
374,365
|
def list_scheduled_queries(self):
url = .format(
account_id=self.account_id)
return self._api_get(url=url).get()
|
List all scheduled_queries
:return: A list of all scheduled query dicts
:rtype: list of dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
|
374,366
|
def write_records(records, output_file, split=False):
if split:
for record in records:
with open(
"{}{}.fa".format(output_file, record.id), "w"
) as record_handle:
SeqIO.write(record, record_handle, "fasta")
else:
SeqIO.write(records, output_file, "fasta")
|
Write FASTA records
Write a FASTA file from an iterable of records.
Parameters
----------
records : iterable
Input records to write.
output_file : file, str or pathlib.Path
Output FASTA file to be written into.
split : bool, optional
If True, each record is written into its own separate file. Default is
False.
|
374,367
|
def dendrogram(adata: AnnData, groupby: str,
n_pcs: Optional[int]=None,
use_rep: Optional[str]=None,
var_names: Optional[List[str]]=None,
use_raw: Optional[bool]=None,
cor_method: Optional[str]=,
linkage_method: Optional[str]=,
key_added: Optional[str]=None) -> None:
if groupby not in adata.obs_keys():
raise ValueError(
.format(groupby, adata.obs_keys()))
if not is_categorical_dtype(adata.obs[groupby]):
raise ValueError(
.format(groupby, adata.obs[groupby].dtype))
if var_names is None:
rep_df = pd.DataFrame(choose_representation(adata, use_rep=use_rep, n_pcs=n_pcs))
rep_df.set_index(adata.obs[groupby], inplace=True)
categories = rep_df.index.categories
else:
if use_raw is None and adata.raw is not None: use_raw = True
gene_names = adata.raw.var_names if use_raw else adata.var_names
from ..plotting._anndata import _prepare_dataframe
categories, rep_df = _prepare_dataframe(adata, gene_names, groupby, use_raw)
if key_added is None:
key_added = + groupby
logg.info(.format(key_added))
mean_df = rep_df.groupby(level=0).mean()
import scipy.cluster.hierarchy as sch
corr_matrix = mean_df.T.corr(method=cor_method)
z_var = sch.linkage(corr_matrix, method=linkage_method)
dendro_info = sch.dendrogram(z_var, labels=categories, no_plot=True)
categories_idx_ordered = dendro_info[]
adata.uns[key_added] = {: z_var,
: groupby,
: use_rep,
: cor_method,
: linkage_method,
: categories_idx_ordered,
: dendro_info,
: corr_matrix.values}
|
\
Computes a hierarchical clustering for the given `groupby` categories.
By default, the PCA representation is used unless `.X` has less than 50 variables.
Alternatively, a list of `var_names` (e.g. genes) can be given.
Average values of either `var_names` or components are used to compute a correlation matrix.
The hierarchical clustering can be visualized using `sc.pl.dendrogram` or multiple other
visualizations that can include a dendrogram: `matrixplot`, `heatmap`, `dotplot` and `stacked_violin`
.. note::
The computation of the hierarchical clustering is based on predefined groups and not
per cell. The correlation matrix is computed using by default pearson but other methods
are available.
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix
{n_pcs}
{use_rep}
var_names : `list of str` (default: None)
List of var_names to use for computing the hierarchical clustering. If `var_names` is given,
then `use_rep` and `n_pcs` is ignored.
use_raw : `bool`, optional (default: None)
Only when `var_names` is not None. Use `raw` attribute of `adata` if present.
cor_method : `str`, optional (default: `"pearson"`)
correlation method to use. Options are 'pearson', 'kendall', and 'spearman'
linkage_method : `str`, optional (default: `"complete"`)
linkage method to use. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
for more information.
key_added : : `str`, optional (default: `None`)
By default, the dendrogram information is added to `.uns['dendrogram_' + groupby]`. Notice
that the `groupby` information is added to the dendrogram.
Returns
-------
adata.uns['dendrogram'] (or instead of 'dendrogram' the value selected for `key_added`) is updated
with the dendrogram information
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.tl.dendrogram(adata, groupby='bulk_labels')
>>> sc.pl.dendrogram(adata)
>>> sc.pl.dotplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
|
374,368
|
def conditions_list(self, conkey):
L = []
keys = [k for k in self.conditions if k.startswith(conkey)]
if not keys:
raise KeyError(conkey)
for k in keys:
if self.conditions[k] is None:
continue
raw = self.conditions[k]
L.append(raw)
return L
|
Return a (possibly empty) list of conditions based on
conkey. The conditions are returned raw, not parsed.
conkey: str
for cond<n>, startcond<n> or stopcond<n>, specify only the
prefix. The list will be filled with all conditions.
|
374,369
|
def less(x, y):
x = BigFloat._implicit_convert(x)
y = BigFloat._implicit_convert(y)
return mpfr.mpfr_less_p(x, y)
|
Return True if x < y and False otherwise.
This function returns False whenever x and/or y is a NaN.
|
374,370
|
def remove_regex(urls, regex):
if not regex:
return urls
if not isinstance(urls, (list, set, tuple)):
urls = [urls]
try:
non_matching_urls = [url for url in urls if not re.search(regex, url)]
except TypeError:
return []
return non_matching_urls
|
Parse a list for non-matches to a regex.
Args:
urls: iterable of urls
regex: string regex to be parsed for
Returns:
list of strings not matching regex
|
374,371
|
def result(self) -> workflow.IntervalGeneratorType:
config = cast(SentenceSegementationConfig, self.config)
index = -1
labels = None
while True:
start = -1
while True:
if labels is None:
try:
index, labels = next(self.index_labels_generator)
except StopIteration:
return
if labels[SentenceValidCharacterLabeler]:
start = index
break
labels = None
index = -1
end = -1
try:
while True:
index, labels = next(self.index_labels_generator)
if config.enable_strict_sentence_charset and \
not labels[SentenceValidCharacterLabeler] and \
not labels[WhitespaceLabeler]:
end = index
break
if self._labels_indicate_sentence_ending(labels):
while True:
index, labels = next(self.index_labels_generator)
is_ending = (self._labels_indicate_sentence_ending(labels) or
(config.extend_ending_with_delimiters and
labels[DelimitersLabeler]))
if not is_ending:
end = index
break
break
except StopIteration:
end = len(self.input_sequence)
labels = None
index = -1
yield start, end
|
Generate intervals indicating the valid sentences.
|
374,372
|
def _parse_file(self, file_obj):
byte_data = file_obj.read(self.size)
self._parse_byte_data(byte_data)
|
Directly read from file handler.
Note that this will move the file pointer.
|
374,373
|
def on_frame(self, frame_in):
if frame_in.name not in self._request:
return False
uuid = self._request[frame_in.name]
if self._response[uuid]:
self._response[uuid].append(frame_in)
else:
self._response[uuid] = [frame_in]
return True
|
On RPC Frame.
:param specification.Frame frame_in: Amqp frame.
:return:
|
374,374
|
def isPairTag(self):
if self.isComment() or self.isNonPairTag():
return False
if self.isEndTag():
return True
if self.isOpeningTag() and self.endtag:
return True
return False
|
Returns:
bool: True if this is pair tag - ``<body> .. </body>`` for example.
|
374,375
|
def convert_to_equivalent(self, unit, equivalence, **kwargs):
conv_unit = Unit(unit, registry=self.units.registry)
if self.units.same_dimensions_as(conv_unit):
self.convert_to_units(conv_unit)
return
this_equiv = equivalence_registry[equivalence](in_place=True)
if self.has_equivalent(equivalence):
this_equiv.convert(self, conv_unit.dimensions, **kwargs)
self.convert_to_units(conv_unit)
else:
raise InvalidUnitEquivalence(equivalence, self.units, conv_unit)
|
Return a copy of the unyt_array in the units specified units, assuming
the given equivalency. The dimensions of the specified units and the
dimensions of the original array need not match so long as there is an
appropriate conversion in the specified equivalency.
Parameters
----------
unit : string
The unit that you wish to convert to.
equivalence : string
The equivalence you wish to use. To see which equivalencies are
supported for this unitful quantity, try the
:meth:`list_equivalencies` method.
Examples
--------
>>> from unyt import K
>>> a = [10, 20, 30]*(1e7*K)
>>> a.convert_to_equivalent("keV", "thermal")
>>> a
unyt_array([ 8.6173324, 17.2346648, 25.8519972], 'keV')
|
374,376
|
def build_graph(self, regularizers=()):
key = self._hash(regularizers)
if key not in self._graphs:
util.log()
for loss in self.losses:
loss.log()
for reg in regularizers:
reg.log()
outputs = {}
updates = []
for layer in self.layers:
out, upd = layer.connect(outputs)
for reg in regularizers:
reg.modify_graph(out)
outputs.update(out)
updates.extend(upd)
self._graphs[key] = outputs, updates
return self._graphs[key]
|
Connect the layers in this network to form a computation graph.
Parameters
----------
regularizers : list of :class:`theanets.regularizers.Regularizer`
A list of the regularizers to apply while building the computation
graph.
Returns
-------
outputs : list of Theano variables
A list of expressions giving the output of each layer in the graph.
updates : list of update tuples
A list of updates that should be performed by a Theano function that
computes something using this graph.
|
374,377
|
async def main(loop):
PYVLXLOG.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
PYVLXLOG.addHandler(stream_handler)
pyvlx = PyVLX(, loop=loop)
await pyvlx.load_scenes()
await pyvlx.load_nodes()
await asyncio.sleep(90)
await pyvlx.disconnect()
|
Log packets from Bus.
|
374,378
|
def best_four_point_to_sell(self):
result = []
if self.check_plus_bias_ratio() and \
(self.best_sell_1() or self.best_sell_2() or self.best_sell_3() or \
self.best_sell_4()):
if self.best_sell_1():
result.append(self.best_sell_1.__doc__.strip())
if self.best_sell_2():
result.append(self.best_sell_2.__doc__.strip())
if self.best_sell_3():
result.append(self.best_sell_3.__doc__.strip())
if self.best_sell_4():
result.append(self.best_sell_4.__doc__.strip())
result = .join(result)
else:
result = False
return result
|
ε€ζ·ζ―ε¦ηΊεε€§θ³£ι»
:rtype: str or False
|
374,379
|
def _set_scores(self):
anom_scores = {}
self._generate_SAX()
self._construct_all_SAX_chunk_dict()
length = self.time_series_length
lws = self.lag_window_size
fws = self.future_window_size
for i, timestamp in enumerate(self.time_series.timestamps):
if i < lws or i > length - fws:
anom_scores[timestamp] = 0
else:
anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i)
self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
|
Compute anomaly scores for the time series by sliding both lagging window and future window.
|
374,380
|
def _evictStaleDevices(self):
while self.running:
expiredDeviceIds = [key for key, value in self.devices.items() if value.hasExpired()]
for key in expiredDeviceIds:
logger.warning("Device timeout, removing " + key)
del self.devices[key]
time.sleep(1)
logger.warning("DeviceCaretaker is now shutdown")
|
A housekeeping function which runs in a worker thread and which evicts devices that haven't sent an update for a
while.
|
374,381
|
def _httplib2_init(username, password):
obj = httplib2.Http()
if username and password:
obj.add_credentials(username, password)
return obj
|
Used to instantiate a regular HTTP request object
|
374,382
|
def does_collection_exist(self, collection_name, database_name=None):
if collection_name is None:
raise AirflowBadRequest("Collection name cannot be None.")
existing_container = list(self.get_conn().QueryContainers(
get_database_link(self.__get_database_name(database_name)), {
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{"name": "@id", "value": collection_name}
]
}))
if len(existing_container) == 0:
return False
return True
|
Checks if a collection exists in CosmosDB.
|
374,383
|
def p_edgesigs(self, p):
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1))
|
edgesigs : edgesigs SENS_OR edgesig
|
374,384
|
def cmd_signing_remove(self, args):
if not self.master.mavlink20():
print("You must be using MAVLink2 for signing")
return
self.master.mav.setup_signing_send(self.target_system, self.target_component, [0]*32, 0)
self.master.disable_signing()
print("Removed signing")
|
remove signing from server
|
374,385
|
def _pretty_access_flags_gen(self):
if self.is_public():
yield "public"
if self.is_final():
yield "final"
if self.is_abstract():
yield "abstract"
if self.is_interface():
if self.is_annotation():
yield "@interface"
else:
yield "interface"
if self.is_enum():
yield "enum"
|
generator of the pretty access flags
|
374,386
|
def describe_topic_rule(ruleName,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
rule = conn.get_topic_rule(ruleName=ruleName)
if rule and in rule:
rule = rule[]
keys = (, , ,
, )
return {: dict([(k, rule.get(k)) for k in keys])}
else:
return {: None}
except ClientError as e:
return {: __utils__[](e)}
|
Given a topic rule name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.describe_topic_rule myrule
|
374,387
|
def on_open(self):
filename, filter = QtWidgets.QFileDialog.getOpenFileName(self, )
if filename:
self.open_file(filename)
self.actionRun.setEnabled(True)
self.actionConfigure_run.setEnabled(True)
|
Shows an open file dialog and open the file if the dialog was
accepted.
|
374,388
|
def pretty_date(time=False):
from datetime import datetime
from django.utils import timezone
now = timezone.now()
if isinstance(time, int):
diff = now - datetime.fromtimestamp(time)
elif isinstance(time, datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff // 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff // 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff // 7) + " weeks ago"
if day_diff < 365:
return str(day_diff // 30) + " months ago"
return str(day_diff // 365) + " years ago"
|
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
|
374,389
|
def package_username(repo):
fabsetup-theno-termdown
package = repo.replace(, )
username = repo.split()[1]
return package, username
|
>>> package_user('fabsetup-theno-termdown')
(termdown, theno)
|
374,390
|
def get_dihedral(self, i: int, j: int, k: int, l: int) -> float:
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
|
Returns dihedral angle specified by four sites.
Args:
i: Index of first site
j: Index of second site
k: Index of third site
l: Index of fourth site
Returns:
Dihedral angle in degrees.
|
374,391
|
def _monitor(last_ping, stop_plugin, is_shutting_down, timeout=5):
_timeout_count = 0
_last_check = time.time()
_sleep_interval = 1
if timeout < _sleep_interval:
_sleep_interval = timeout
while True:
time.sleep(_sleep_interval)
if is_shutting_down():
return
if ((time.time() - _last_check) > timeout) and ((time.time() - last_ping()) > timeout):
_last_check = time.time()
_timeout_count += 1
LOG.warning("Missed ping health check from the framework. " +
"({} of 3)".format(_timeout_count))
if _timeout_count >= 3:
stop_plugin()
return
elif (time.time() - last_ping()) <= timeout:
_timeout_count = 0
|
Monitors health checks (pings) from the Snap framework.
If the plugin doesn't receive 3 consecutive health checks from Snap the
plugin will shutdown. The default timeout is set to 5 seconds.
|
374,392
|
def write_ImageMapLine(tlx, tly, brx, bry, w, h, dpi, chr, segment_start, segment_end):
tlx, brx = [canvas2px(x, w, dpi) for x in (tlx, brx)]
tly, bry = [canvas2px(y, h, dpi) for y in (tly, bry)]
chr, bac_list = chr.split()
return + \
",".join(str(x) for x in (tlx, tly, brx, bry)) \
+ + chr + \
+ + chr + + str(segment_start) + + str(segment_end) + \
+
|
Write out an image map area line with the coordinates passed to this
function
<area shape="rect" coords="tlx,tly,brx,bry" href="#chr7" title="chr7:100001..500001">
|
374,393
|
def becomeMemberOf(self, groupRole):
self.store.findOrCreate(RoleRelationship,
group=groupRole,
member=self)
|
Instruct this (user or group) Role to become a member of a group role.
@param groupRole: The role that this group should become a member of.
|
374,394
|
def RotateServerKey(cn=u"grr", keylength=4096):
ca_certificate = config.CONFIG["CA.certificate"]
ca_private_key = config.CONFIG["PrivateKeys.ca_key"]
if not ca_certificate or not ca_private_key:
raise ValueError("No existing CA certificate found.")
existing_cert = config.CONFIG["Frontend.certificate"]
serial_number = existing_cert.GetSerialNumber() + 1
EPrint("Generating new server key (%d bits, cn , serial
(keylength, cn, serial_number))
server_private_key = rdf_crypto.RSAPrivateKey.GenerateKey(bits=keylength)
server_cert = key_utils.MakeCASignedCert(
str(cn),
server_private_key,
ca_certificate,
ca_private_key,
serial_number=serial_number)
EPrint("Updating configuration.")
config.CONFIG.Set("Frontend.certificate", server_cert.AsPEM())
config.CONFIG.Set("PrivateKeys.server_key", server_private_key.AsPEM())
config.CONFIG.Write()
EPrint("Server key rotated, please restart the GRR Frontends.")
|
This function creates and installs a new server key.
Note that
- Clients might experience intermittent connection problems after
the server keys rotated.
- It's not possible to go back to an earlier key. Clients that see a
new certificate will remember the cert's serial number and refuse
to accept any certificate with a smaller serial number from that
point on.
Args:
cn: The common name for the server to use.
keylength: Length in bits for the new server key.
Raises:
ValueError: There is no CA cert in the config. Probably the server
still needs to be initialized.
|
374,395
|
def dynamics(start, end=None):
def _(sequence):
if start in _dynamic_markers_to_velocity:
start_velocity = _dynamic_markers_to_velocity[start]
start_marker = start
else:
raise ValueError("Unknown start dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys()))
if end is None:
end_velocity = start_velocity
end_marker = start_marker
elif end in _dynamic_markers_to_velocity:
end_velocity = _dynamic_markers_to_velocity[end]
end_marker = end
else:
raise ValueError("Unknown end dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys()))
retval = sequence.__class__([Point(point) for point in sequence._elements])
velocity_interval = (float(end_velocity) - float(start_velocity)) / (len(retval) - 1) if len(retval) > 1 else 0
velocities = [int(start_velocity + velocity_interval * pos) for pos in range(len(retval))]
if start_velocity > end_velocity:
retval[0]["dynamic"] = "diminuendo"
retval[-1]["dynamic"] = end_marker
elif start_velocity < end_velocity:
retval[0]["dynamic"] = "crescendo"
retval[-1]["dynamic"] = end_marker
else:
retval[0]["dynamic"] = start_marker
for point, velocity in zip(retval, velocities):
point["velocity"] = velocity
return retval
return _
|
Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics.
You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff]
Args:
start: beginning dynamic marker, if no end is specified all notes will get this marker
end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker
Example usage:
s1 | dynamics('p') # play a sequence in piano
s2 | dynamics('p', 'ff') # crescendo from p to ff
s3 | dynamics('ff', 'p') # diminuendo from ff to p
|
374,396
|
def public_key(self):
if not self._public_key and self.sec_certificate_ref:
sec_public_key_ref_pointer = new(Security, )
res = Security.SecCertificateCopyPublicKey(self.sec_certificate_ref, sec_public_key_ref_pointer)
handle_sec_error(res)
sec_public_key_ref = unwrap(sec_public_key_ref_pointer)
self._public_key = PublicKey(sec_public_key_ref, self.asn1[][])
return self._public_key
|
:return:
The PublicKey object for the public key this certificate contains
|
374,397
|
def _get_representative_batch(merged):
out = {}
for mgroup in merged:
mgroup = sorted(list(mgroup))
for x in mgroup:
out[x] = mgroup[0]
return out
|
Prepare dictionary matching batch items to a representative within a group.
|
374,398
|
def as_check_request(self, timer=datetime.utcnow):
if not self.service_name:
raise ValueError(u)
if not self.operation_id:
raise ValueError(u)
if not self.operation_name:
raise ValueError(u)
op = super(Info, self).as_operation(timer=timer)
labels = {}
if self.android_cert_fingerprint:
labels[_KNOWN_LABELS.SCC_ANDROID_CERT_FINGERPRINT.label_name] = self.android_cert_fingerprint
if self.android_package_name:
labels[_KNOWN_LABELS.SCC_ANDROID_PACKAGE_NAME.label_name] = self.android_package_name
if self.client_ip:
labels[_KNOWN_LABELS.SCC_CALLER_IP.label_name] = self.client_ip
if self.ios_bundle_id:
labels[_KNOWN_LABELS.SCC_IOS_BUNDLE_ID.label_name] = self.ios_bundle_id
if self.referer:
labels[_KNOWN_LABELS.SCC_REFERER.label_name] = self.referer
labels[_KNOWN_LABELS.SCC_SERVICE_AGENT.label_name] = SERVICE_AGENT
labels[_KNOWN_LABELS.SCC_USER_AGENT.label_name] = USER_AGENT
op.labels = encoding.PyValueToMessage(
sc_messages.Operation.LabelsValue, labels)
check_request = sc_messages.CheckRequest(operation=op)
return sc_messages.ServicecontrolServicesCheckRequest(
serviceName=self.service_name,
checkRequest=check_request)
|
Makes a `ServicecontrolServicesCheckRequest` from this instance
Returns:
a ``ServicecontrolServicesCheckRequest``
Raises:
ValueError: if the fields in this instance are insufficient to
to create a valid ``ServicecontrolServicesCheckRequest``
|
374,399
|
def try_rgb(s, default=None):
if not s:
return default
try:
r, g, b = (int(x.strip()) for x in s.split())
except ValueError:
raise InvalidRgb(s)
if not all(in_range(x, 0, 255) for x in (r, g, b)):
raise InvalidRgb(s)
return r, g, b
|
Try parsing a string into an rgb value (int, int, int),
where the ints are 0-255 inclusive.
If None is passed, default is returned.
On failure, InvalidArg is raised.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.