code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _worker(reader: DatasetReader,
input_queue: Queue,
output_queue: Queue,
index: int) -> None:
while True:
file_path = input_queue.get()
if file_path is None:
output_queue.put(index)
break
logger.info(f"reading instances from {file_path}")
for instance in reader.read(file_path):
output_queue.put(instance) | A worker that pulls filenames off the input queue, uses the dataset reader
to read them, and places the generated instances on the output queue.
When there are no filenames left on the input queue, it puts its ``index``
on the output queue and doesn't do anything else. |
def listFieldsFromWorkitem(self, copied_from, keep=False):
return self.templater.listFieldsFromWorkitem(copied_from,
keep=keep) | List all the attributes to be rendered directly from some
to-be-copied workitems
More details, please refer to
:class:`rtcclient.template.Templater.listFieldsFromWorkitem` |
def remove(self, pointer):
doc = deepcopy(self.document)
parent, obj = None, doc
try:
for token in Pointer(pointer):
parent, obj = obj, token.extract(obj, bypass_ref=True)
if isinstance(parent, Mapping):
del parent[token]
if isinstance(parent, MutableSequence):
parent.pop(int(token))
except Exception as error:
raise Error(*error.args)
return Target(doc) | Remove element from sequence, member from mapping.
:param pointer: the path to search in
:return: resolved document
:rtype: Target |
def shell_join(delim, it):
'Joins an iterable of ShellQuoted with a delimiter between each two'
return ShellQuoted(delim.join(raw_shell(s) for s in it)) | Joins an iterable of ShellQuoted with a delimiter between each two |
def import_module(self, modules, shared=False, into_spooler=False):
if all((shared, into_spooler)):
raise ConfigurationError('Unable to set both `shared` and `into_spooler` flags')
if into_spooler:
command = 'spooler-python-import'
else:
command = 'shared-python-import' if shared else 'python-import'
self._set(command, modules, multi=True)
return self._section | Imports a python module.
:param list|str|unicode modules:
:param bool shared: Import a python module in all of the processes.
This is done after fork but before request processing.
:param bool into_spooler: Import a python module in the spooler.
http://uwsgi-docs.readthedocs.io/en/latest/Spooler.html |
def validateDatetime(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None,
formats=('%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S',
'%Y/%m/%d %H:%M', '%y/%m/%d %H:%M', '%m/%d/%Y %H:%M', '%m/%d/%y %H:%M', '%x %H:%M',
'%Y/%m/%d %H:%M:%S', '%y/%m/%d %H:%M:%S', '%m/%d/%Y %H:%M:%S', '%m/%d/%y %H:%M:%S', '%x %H:%M:%S'), excMsg=None):
try:
return _validateToDateTimeFormat(value, formats, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes)
except ValidationException:
_raiseValidationException(_('%r is not a valid date and time.') % (_errstr(value)), excMsg) | Raises ValidationException if value is not a datetime formatted in one
of the formats formats. Returns a datetime.datetime object of value.
* value (str): The value being validated as a datetime.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* formats: A tuple of strings that can be passed to time.strftime, dictating the possible formats for a valid datetime.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateDatetime('2018/10/31 12:00:01')
datetime.datetime(2018, 10, 31, 12, 0, 1)
>>> pysv.validateDatetime('10/31/2018 12:00:01')
datetime.datetime(2018, 10, 31, 12, 0, 1)
>>> pysv.validateDatetime('10/31/2018')
Traceback (most recent call last):
...
pysimplevalidate.ValidationException: '10/31/2018' is not a valid date and time. |
def _query(self, host_object, classification=False):
template = 'http://verify.hosts-file.net/?v={}&s={}'
url = template.format(self.app_id, host_object.to_unicode())
url = url + '&class=true' if classification else url
return get(url).text | Query the client for data of given host.
:param host_object: an object representing a host value
:param classification: if True: hpHosts is queried also
for classification for given host, if listed
:returns: content of response to GET request to hpHosts
for data on the given host |
def download_handler(feed, placeholders):
import shlex
value = feed.retrieve_config('downloadhandler', 'greg')
if value == 'greg':
while os.path.isfile(placeholders.fullpath):
placeholders.fullpath = placeholders.fullpath + '_'
placeholders.filename = placeholders.filename + '_'
urlretrieve(placeholders.link, placeholders.fullpath)
else:
value_list = shlex.split(value)
instruction_list = [substitute_placeholders(part, placeholders) for
part in value_list]
returncode = subprocess.call(instruction_list)
if returncode:
raise URLError | Parse and execute the download handler |
def _table_relabel(table, substitutions, replacements=None):
if replacements is not None:
raise NotImplementedError
observed = set()
exprs = []
for c in table.columns:
expr = table[c]
if c in substitutions:
expr = expr.name(substitutions[c])
observed.add(c)
exprs.append(expr)
for c in substitutions:
if c not in observed:
raise KeyError('{0!r} is not an existing column'.format(c))
return table.projection(exprs) | Change table column names, otherwise leaving table unaltered
Parameters
----------
substitutions
Returns
-------
relabeled : TableExpr |
def _bake_script(script):
if "src" in script.attrs:
if re.match("https?://", script["src"]):
script_data = _load_url(script["src"]).read()
else:
script_data = _load_file(script["src"]).read()
script.clear()
if USING_PYTHON2:
script.string = "\n" + script_data + "\n"
else:
script.string = "\n" + str(script_data) + "\n"
del script["src"]
del script["type"] | Takes a script element and bakes it in only if it contains a remote resource |
def plot_sample(self, nsims=10, plot_data=True, **kwargs):
if self.latent_variables.estimation_method not in ['BBVI', 'M-H']:
raise Exception("No latent variables estimated!")
else:
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index[max(self.ar, self.sc):self.data_length]
mu, Y, scores = self._model(self.latent_variables.get_z_values())
draws = self.sample(nsims).T
plt.plot(date_index, draws, label='Posterior Draws', alpha=1.0)
if plot_data is True:
plt.plot(date_index, Y, label='Data', c='black', alpha=0.5, linestyle='', marker='s')
plt.title(self.data_name)
plt.show() | Plots draws from the posterior predictive density against the data
Parameters
----------
nsims : int (default : 1000)
How many draws from the posterior predictive distribution
plot_data boolean
Whether to plot the data or not |
def setWorkerInfo(self, hostname, workerAmount, origin):
scoop.logger.debug('Initialising {0}{1} worker {2} [{3}].'.format(
"local" if hostname in utils.localHostnames else "remote",
" origin" if origin else "",
self.workersLeft,
hostname,
)
)
add_args, add_kwargs = self._setWorker_args(origin)
self.workers[-1].setWorker(*add_args, **add_kwargs)
self.workers[-1].setWorkerAmount(workerAmount) | Sets the worker information for the current host. |
def delete(self, ip_dest, next_hop, **kwargs):
kwargs.update({'delete': True})
return self._set_route(ip_dest, next_hop, **kwargs) | Delete a static route
Args:
ip_dest (string): The ip address of the destination in the
form of A.B.C.D/E
next_hop (string): The next hop interface or ip address
**kwargs['next_hop_ip'] (string): The next hop address on
destination interface
**kwargs['distance'] (string): Administrative distance for this
route
**kwargs['tag'] (string): Route tag
**kwargs['route_name'] (string): Route name
Returns:
True if the operation succeeds, otherwise False. |
def purge(self):
def partial_file(item):
"Filter out partial files"
return item.completed_chunks < item.size_chunks
self.cull(file_filter=partial_file, attrs=["get_completed_chunks", "get_size_chunks"]) | Delete PARTIAL data files and remove torrent from client. |
def get_sort_field(request):
sort_direction = request.GET.get("dir")
field_name = (request.GET.get("sort") or "") if sort_direction else ""
sort_sign = "-" if sort_direction == "desc" else ""
result_field = "{sign}{field}".format(sign=sort_sign, field=field_name)
return result_field | Retrieve field used for sorting a queryset
:param request: HTTP request
:return: the sorted field name, prefixed with "-" if ordering is descending |
def ReplaceStopTimeObject(self, stoptime, schedule=None):
if schedule is None:
schedule = self._schedule
new_secs = stoptime.GetTimeSecs()
cursor = schedule._connection.cursor()
cursor.execute("DELETE FROM stop_times WHERE trip_id=? and "
"stop_sequence=? and stop_id=?",
(self.trip_id, stoptime.stop_sequence, stoptime.stop_id))
if cursor.rowcount == 0:
raise problems_module.Error('Attempted replacement of StopTime object which does not exist')
self._AddStopTimeObjectUnordered(stoptime, schedule) | Replace a StopTime object from this trip with the given one.
Keys the StopTime object to be replaced by trip_id, stop_sequence
and stop_id as 'stoptime', with the object 'stoptime'. |
def _close_stdio():
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no) | Close stdio streams to avoid output in the tty that launched pantsd. |
async def update_notifications(self, on_match_open: bool = None, on_tournament_end: bool = None):
params = {}
if on_match_open is not None:
params['notify_users_when_matches_open'] = on_match_open
if on_tournament_end is not None:
params['notify_users_when_the_tournament_ends'] = on_tournament_end
assert_or_raise(len(params) > 0, ValueError, 'At least one of the notifications must be given')
await self.update(**params) | update participants notifications for this tournament
|methcoro|
Args:
on_match_open: Email registered Challonge participants when matches open up for them
on_tournament_end: Email registered Challonge participants the results when this tournament ends
Raises:
APIException |
def _drop_membership_multicast_socket(self):
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_DROP_MEMBERSHIP,
self._membership_request
)
self._membership_request = None | Drop membership to multicast
:rtype: None |
def write_template_file(source, target, content):
print(target)
data = format_template_file(source, content)
with open(target, 'w') as f:
for line in data:
if type(line) != str:
line = line.encode('utf-8')
f.write(line) | Write a new file from a given pystache template file and content |
def generate_enum(self):
enum = self._definition['enum']
if not isinstance(enum, (list, tuple)):
raise JsonSchemaDefinitionException('enum must be an array')
with self.l('if {variable} not in {enum}:'):
enum = str(enum).replace('"', '\\"')
self.l('raise JsonSchemaException("{name} must be one of {}")', enum) | Means that only value specified in the enum is valid.
.. code-block:: python
{
'enum': ['a', 'b'],
} |
def put(self, path, data):
assert path is not None
assert data is None or isinstance(data, dict)
if data is None:
data = {}
response = self.conn.request_encode_body('PUT', path, data,
self._get_headers(), False)
self._last_status = response_status = response.status
response_content = response.data.decode()
return Result(status=response_status, json=response_content) | Executes a PUT.
'path' may not be None. Should include the full path to the
resoure.
'data' may be None or a dictionary.
Returns a named tuple that includes:
status: the HTTP status code
json: the returned JSON-HAL
If the key was not set, throws an APIConfigurationException. |
def get_secret(self, handle, contributor):
queryset = self.all()
if contributor is not None:
queryset = queryset.filter(contributor=contributor)
secret = queryset.get(handle=handle)
return secret.value | Retrieve an existing secret's value.
:param handle: Secret handle
:param contributor: User instance to perform contributor validation,
which means that only secrets for the given contributor will be
looked up. |
def list_default_storage_policy_of_datastore(datastore, service_instance=None):
log.trace('Listing the default storage policy of datastore \'%s\'', datastore)
target_ref = _get_proxy_target(service_instance)
ds_refs = salt.utils.vmware.get_datastores(service_instance, target_ref,
datastore_names=[datastore])
if not ds_refs:
raise VMwareObjectRetrievalError('Datastore \'{0}\' was not '
'found'.format(datastore))
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy = salt.utils.pbm.get_default_storage_policy_of_datastore(
profile_manager, ds_refs[0])
return _get_policy_dict(policy) | Returns a list of datastores assign the the storage policies.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1 |
def eum_snippet(trace_id=None, eum_api_key=None, meta={}):
try:
eum_file = open(os.path.dirname(__file__) + '/eum.js')
eum_src = Template(eum_file.read())
ids = {}
ids['meta_kvs'] = ''
parent_span = tracer.active_span
if trace_id or parent_span:
ids['trace_id'] = trace_id or parent_span.trace_id
else:
return ''
if eum_api_key:
ids['eum_api_key'] = eum_api_key
else:
ids['eum_api_key'] = global_eum_api_key
for key, value in meta.items():
ids['meta_kvs'] += ("'ineum('meta', '%s', '%s');'" % (key, value))
return eum_src.substitute(ids)
except Exception as e:
logger.debug(e)
return '' | Return an EUM snippet for use in views, templates and layouts that reports
client side metrics to Instana that will automagically be linked to the
current trace.
@param trace_id [optional] the trace ID to insert into the EUM string
@param eum_api_key [optional] the EUM API key from your Instana dashboard
@param meta [optional] optional additional KVs you want reported with the
EUM metrics
@return string |
def show(self):
if not self._error and not self.stats:
return
self.header()
for stat in self.stats:
utils.item(stat, level=1, options=self.options) | Display indented statistics. |
def dbr(self, value=None):
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `dbr`'.format(value))
self._dbr = value | Corresponds to IDD Field `dbr` Daily temperature range for hottest
month.
[defined as mean of the difference between daily maximum
and daily minimum dry-bulb temperatures for hottest month]
Args:
value (float): value for IDD Field `dbr`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value |
def get_cookie_header(queue_item):
header = []
path = URLHelper.get_path(queue_item.request.url)
for cookie in queue_item.request.cookies:
root_path = cookie.path == "" or cookie.path == "/"
if path.startswith(cookie.path) or root_path:
header.append(cookie.name + "=" + cookie.value)
return "&".join(header) | Convert a requests cookie jar to a HTTP request cookie header value.
Args:
queue_item (:class:`nyawc.QueueItem`): The parent queue item of the new request.
Returns:
str: The HTTP cookie header value. |
def get_participants_for_gradebook(gradebook_id, person=None):
if not valid_gradebook_id(gradebook_id):
raise InvalidGradebookID(gradebook_id)
url = "/rest/gradebook/v1/book/{}/participants".format(gradebook_id)
headers = {}
if person is not None:
headers["X-UW-Act-as"] = person.uwnetid
data = get_resource(url, headers)
participants = []
for pt in data["participants"]:
participants.append(_participant_from_json(pt))
return participants | Returns a list of gradebook participants for the passed gradebook_id and
person. |
def print_runs(query):
if query is None:
return
for tup in query:
print(("{0} @ {1} - {2} id: {3} group: {4}".format(
tup.end, tup.experiment_name, tup.project_name,
tup.experiment_group, tup.run_group))) | Print all rows in this result query. |
def cancel(self, consumer_tag=''):
if not compatibility.is_string(consumer_tag):
raise AMQPInvalidArgument('consumer_tag should be a string')
cancel_frame = specification.Basic.Cancel(consumer_tag=consumer_tag)
result = self._channel.rpc_request(cancel_frame)
self._channel.remove_consumer_tag(consumer_tag)
return result | Cancel a queue consumer.
:param str consumer_tag: Consumer tag
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:rtype: dict |
def conflict(request, target=None, template_name='409.html'):
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
template = Template(
'<h1>Conflict</h1>'
'<p>The request was unsuccessful due to a conflict. '
'The object changed during the transaction.</p>')
try:
saved = target.__class__._default_manager.get(pk=target.pk)
except target.__class__.DoesNotExist:
saved = None
ctx = {'target': target,
'saved': saved,
'request_path': request.path}
return ConflictResponse(template.render(ctx)) | 409 error handler.
:param request: Request
:param template_name: `409.html`
:param target: The model to save |
def save(self, force_insert=False):
delayed = {}
for field, value in self.data.items():
model_field = getattr(type(self.instance), field, None)
if isinstance(model_field, ManyToManyField):
if value is not None:
delayed[field] = value
continue
setattr(self.instance, field, value)
rv = self.instance.save(force_insert=force_insert)
for field, value in delayed.items():
setattr(self.instance, field, value)
return rv | Save the model and any related many-to-many fields.
:param force_insert: Should the save force an insert?
:return: Number of rows impacted, or False. |
def get_plist_data_from_string (data):
if has_biplist:
return biplist.readPlistFromString(data)
try:
return plistlib.readPlistFromString(data)
except Exception:
return {} | Parse plist data for a string. Tries biplist, falling back to
plistlib. |
def update_email_template(self, template_id, template_dict):
return self._create_put_request(
resource=EMAIL_TEMPLATES,
billomat_id=template_id,
send_data=template_dict
) | Updates a emailtemplate
:param template_id: the template id
:param template_dict: dict
:return: dict |
def python_version(self):
v = self.get('python-version', '')
if v == '':
v = "{}.{}".format(sys.version_info.major, sys.version_info.minor)
return v | Get the configured Python version.
If this is not set in the config, then it defaults to the version of the current runtime.
Returns: A string of the form "MAJOR.MINOR", e.g. "3.6". |
def available(self):
disco_info = yield from self._disco_client.query_info(
self.client.local_jid.bare()
)
for item in disco_info.identities.filter(attrs={"category": "pubsub"}):
if item.type_ == "pep":
return True
return False | Check whether we have a PEP identity associated with our account. |
def _get_covered_keys_and_masks(merge, aliases):
for entry in merge.routing_table[merge.insertion_index:]:
key_mask = (entry.key, entry.mask)
keys_masks = aliases.get(key_mask, [key_mask])
for key, mask in keys_masks:
if intersect(merge.key, merge.mask, key, mask):
yield key, mask | Get keys and masks which would be covered by the entry resulting from
the merge.
Parameters
----------
aliases : {(key, mask): {(key, mask), ...}, ...}
Map of key-mask pairs to the sets of key-mask pairs that they actually
represent.
Yields
------
(key, mask)
Pairs of keys and masks which would be covered if the given `merge`
were to be applied to the routing table. |
def options(self, request, map, *args, **kwargs):
options = {}
for method, function in map.items():
options[method] = function.__doc__
return self._render(
request = request,
template = 'options',
context = {
'options': options
},
status = 200,
headers = {
'Allow': ', '.join(options.keys())
}
) | List communication options. |
def reg(name):
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
now = time.time()
if 'status' not in __reg__:
__reg__['status'] = {}
__reg__['status']['val'] = {}
for event in __events__:
if fnmatch.fnmatch(event['tag'], 'salt/beacon/*/status/*'):
idata = {'recv_time': now}
for key in event['data']['data']:
if key in ('id', 'recv_time'):
continue
idata[key] = event['data']['data'][key]
__reg__['status']['val'][event['data']['id']] = idata
ret['changes'][event['data']['id']] = True
return ret | Activate this register to turn on a minion status tracking register, this
register keeps the current status beacon data and the time that each beacon
was last checked in. |
def filter_significance(diff, significance):
changed = diff['changed']
reduced = [{'key': delta['key'],
'fields': {k: v
for k, v in delta['fields'].items()
if _is_significant(v, significance)}}
for delta in changed]
filtered = [delta for delta in reduced if delta['fields']]
diff = diff.copy()
diff['changed'] = filtered
return diff | Prune any changes in the patch which are due to numeric changes less than this level of
significance. |
def annotation_spec_set_path(cls, project, annotation_spec_set):
return google.api_core.path_template.expand(
"projects/{project}/annotationSpecSets/{annotation_spec_set}",
project=project,
annotation_spec_set=annotation_spec_set,
) | Return a fully-qualified annotation_spec_set string. |
def _slice(index, n_samples, margin=None):
if margin is None:
margin = (0, 0)
assert isinstance(n_samples, (tuple, list))
assert len(n_samples) == 2
before, after = n_samples
assert isinstance(margin, (tuple, list))
assert len(margin) == 2
margin_before, margin_after = margin
before += margin_before
after += margin_after
index = int(index)
before = int(before)
after = int(after)
return slice(max(0, index - before), index + after, None) | Return a waveform slice. |
def product(pc, service, attrib, sku):
pc.service = service.lower()
pc.sku = sku
pc.add_attributes(attribs=attrib)
click.echo("Service Alias: {0}".format(pc.service_alias))
click.echo("URL: {0}".format(pc.service_url))
click.echo("Region: {0}".format(pc.region))
click.echo("Product Terms: {0}".format(pc.terms))
click.echo("Filtering Attributes: {0}".format(pc.attributes))
prods = pyutu.find_products(pc)
for p in prods:
click.echo("Product SKU: {0} product: {1}".format(
p, json.dumps(prods[p], indent=2, sort_keys=True))
)
click.echo("Total Products Found: {0}".format(len(prods)))
click.echo("Time: {0} secs".format(time.process_time())) | Get a list of a service's products.
The list will be in the given region, matching the specific terms and
any given attribute filters or a SKU. |
def controlled(num_ptr_bits, U):
d = 2 ** (1 + num_ptr_bits)
m = np.eye(d)
m[d - 2:, d - 2:] = U
return m | Given a one-qubit gate matrix U, construct a controlled-U on all pointer
qubits. |
def _flat_values(self):
return tuple(
np.nan if type(x) is dict else x
for x in self._cube_dict["result"]["measures"]["mean"]["data"]
) | Return tuple of mean values as found in cube response.
Mean data may include missing items represented by a dict like
{'?': -1} in the cube response. These are replaced by np.nan in the
returned value. |
def add_external_reference(self,term_id, external_ref):
if term_id in self.idx:
term_obj = Cterm(self.idx[term_id],self.type)
term_obj.add_external_reference(external_ref)
else:
print('{term_id} not in self.idx'.format(**locals())) | Adds an external reference for the given term
@type term_id: string
@param term_id: the term identifier
@type external_ref: L{CexternalReference}
@param external_ref: the external reference object |
def prepare(cls) -> None:
if cls.version is None:
cls.version = find_version(cls.name)
if cls.long_description is None:
cls.long_description = cls.parse_readme()
if cls.packages is None:
cls.packages = find_packages(cls.root_directory)
if cls.install_requires is None:
cls.install_requires = parse_requirements()
if cls.python_requires is None:
cls.python_requires = find_required_python_version(cls.classifiers) | Fill in possibly missing package metadata. |
def _normalize_string(raw_str):
return " ".join(
token.strip()
for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str))) | Normalizes the string using tokenizer.encode.
Args:
raw_str: the input string
Returns:
A string which is ready to be tokenized using split() |
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
if value == '':
return None
return iso_string_to_python_datetime(value) | Convert database value to Python value.
Called when data is loaded from the database. |
def _unpack(struct, bc, offset=0):
return struct.unpack_from(bc, offset), offset + struct.size | returns the unpacked data tuple, and the next offset past the
unpacked data |
def place_visual(self):
index = 0
bin_pos = string_to_array(self.bin2_body.get("pos"))
bin_size = self.bin_size
for _, obj_mjcf in self.visual_objects:
bin_x_low = bin_pos[0]
bin_y_low = bin_pos[1]
if index == 0 or index == 2:
bin_x_low -= bin_size[0] / 2
if index < 2:
bin_y_low -= bin_size[1] / 2
bin_x_high = bin_x_low + bin_size[0] / 2
bin_y_high = bin_y_low + bin_size[1] / 2
bottom_offset = obj_mjcf.get_bottom_offset()
bin_range = [bin_x_low + bin_x_high, bin_y_low + bin_y_high, 2 * bin_pos[2]]
bin_center = np.array(bin_range) / 2.0
pos = bin_center - bottom_offset
self.visual_obj_mjcf[index].set("pos", array_to_string(pos))
index += 1 | Places visual objects randomly until no collisions or max iterations hit. |
def setPololuProtocol(self):
self._compact = False
self._log and self._log.debug("Pololu protocol has been set.") | Set the pololu protocol. |
def _get_description(self):
return ", ".join([
part for part in [
"missing: {}".format(self.missing) if self.missing else "",
(
"forbidden: {}".format(self.forbidden)
if self.forbidden else ""
),
"invalid: {}:".format(self.invalid) if self.invalid else "",
(
"failed to parse: {}".format(self.failed)
if self.failed else ""
)
] if part
]) | Return human readable description error description.
This description should explain everything that went wrong during
deserialization. |
def find_and_modify(self, query=None, update=None):
update = update or {}
for document in self.find(query=query):
document.update(update)
self.update(document) | Finds documents in this collection that match a given query and updates them |
def _parse_output(self, xml_response):
count = 0
xml_dict = {}
resp_message = None
xml_start_pos = []
for m in re.finditer(r"\<\?xml", xml_response):
xml_start_pos.append(m.start())
while count < len(xml_start_pos):
if (count == len(xml_start_pos) - 1):
result = xml_response[xml_start_pos[count]:]
else:
start = xml_start_pos[count]
end = xml_start_pos[count + 1]
result = xml_response[start:end]
result = result.strip()
message = etree.fromstring(result)
resp = self._validate_message(message)
if hasattr(resp, 'tag'):
xml_dict = self._elementtree_to_dict(resp)
elif resp is not None:
resp_message = resp
count = count + 1
if xml_dict:
return xml_dict
elif resp_message is not None:
return resp_message | Parse the response XML from iLO.
This function parses the output received from ILO.
As the output contains multiple XMLs, it extracts
one xml at a time and loops over till all the xmls
in the response are exhausted.
It returns the data to APIs either in dictionary
format or as the string.
It creates the dictionary only if the Ilo response
contains the data under the requested RIBCL command.
If the Ilo response contains only the string,
then the string is returned back. |
def dumps(self, data):
data = g_serializer_drivers[self.name]['dumps'](data)
if sys.version_info[0] == 3 and isinstance(data, str):
data = data.encode(self._charset)
if self._compression == 'zlib':
data = zlib.compress(data)
assert isinstance(data, bytes)
return data | Serialize a python data type for transmission or storage.
:param data: The python object to serialize.
:return: The serialized representation of the object.
:rtype: bytes |
def dumps(self, separator='='):
s = six.StringIO()
self.dump(s, separator=separator)
return s.getvalue() | Convert the mapping to a text string in simple line-oriented
``.properties`` format.
If the instance was originally created from a file or string with
`PropertiesFile.load()` or `PropertiesFile.loads()`, then the output
will include the comments and whitespace from the original input, and
any keys that haven't been deleted or reassigned will retain their
original formatting and multiplicity. Key-value pairs that have been
modified or added to the mapping will be reformatted with
`join_key_value()` using the given separator. All key-value pairs are
output in the order they were defined, with new keys added to the end.
.. note::
Serializing a `PropertiesFile` instance with the :func:`dumps()`
function instead will cause all formatting information to be
ignored, as :func:`dumps()` will treat the instance like a normal
mapping.
:param separator: The string to use for separating new or modified keys
& values. Only ``" "``, ``"="``, and ``":"`` (possibly with added
whitespace) should ever be used as the separator.
:type separator: text string
:rtype: text string |
def put(self, url):
response = self.http_request(url, 'PUT', self, {'Content-Type': 'application/xml; charset=utf-8'})
if response.status != 200:
self.raise_http_error(response)
response_xml = response.read()
logging.getLogger('recurly.http.response').debug(response_xml)
self.update_from_element(ElementTree.fromstring(response_xml)) | Sends this `Resource` instance to the service with a
``PUT`` request to the given URL. |
def until_any_child_in_state(self, state, timeout=None):
return until_any(*[r.until_state(state) for r in dict.values(self.children)],
timeout=timeout) | Return a tornado Future; resolves when any client is in specified state |
def results(self) -> List[TrialResult]:
if not self._results:
job = self._update_job()
for _ in range(1000):
if job['executionStatus']['state'] in TERMINAL_STATES:
break
time.sleep(0.5)
job = self._update_job()
if job['executionStatus']['state'] != 'SUCCESS':
raise RuntimeError(
'Job %s did not succeed. It is in state %s.' % (
job['name'], job['executionStatus']['state']))
self._results = self._engine.get_job_results(
self.job_resource_name)
return self._results | Returns the job results, blocking until the job is complete. |
def create_api_method(restApiId, resourcePath, httpMethod, authorizationType,
apiKeyRequired=False, requestParameters=None, requestModels=None,
region=None, key=None, keyid=None, profile=None):
try:
resource = describe_api_resource(restApiId, resourcePath, region=region,
key=key, keyid=keyid, profile=profile).get('resource')
if resource:
requestParameters = dict() if requestParameters is None else requestParameters
requestModels = dict() if requestModels is None else requestModels
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
method = conn.put_method(restApiId=restApiId, resourceId=resource['id'], httpMethod=httpMethod,
authorizationType=str(authorizationType), apiKeyRequired=apiKeyRequired,
requestParameters=requestParameters, requestModels=requestModels)
return {'created': True, 'method': method}
return {'created': False, 'error': 'Failed to create method'}
except ClientError as e:
return {'created': False, 'error': __utils__['boto3.get_error'](e)} | Creates API method for a resource in the given API
CLI Example:
.. code-block:: bash
salt myminion boto_apigateway.create_api_method restApiId resourcePath, httpMethod, authorizationType, \\
apiKeyRequired=False, requestParameters='{"name", "value"}', requestModels='{"content-type", "value"}' |
def connect(nodes):
for a, b in zip(nodes[:-1], nodes[1:]):
a.output = b
b.output = queues.Queue() | Connect a list of nodes.
Connected nodes have an ``output`` member which is the following node in
the line. The last node's ``output`` is a :class:`Queue` for
easy plumbing. |
def load_fixtures():
if local("pwd", capture=True) == PRODUCTION_DOCUMENT_ROOT:
abort("Refusing to automatically load fixtures into production database!")
if not confirm("Are you sure you want to load all fixtures? This could have unintended consequences if the database is not empty."):
abort("Aborted.")
files = [
"fixtures/users/users.json", "fixtures/eighth/sponsors.json", "fixtures/eighth/rooms.json", "fixtures/eighth/blocks.json",
"fixtures/eighth/activities.json", "fixtures/eighth/scheduled_activities.json", "fixtures/eighth/signups.json",
"fixtures/announcements/announcements.json"
]
for f in files:
local("./manage.py loaddata " + f) | Populate a database with data from fixtures. |
def add_metric(self, metric_name, measurable, config=None):
metric = KafkaMetric(metric_name, measurable, config or self.config)
self.register_metric(metric) | Add a metric to monitor an object that implements measurable.
This metric won't be associated with any sensor.
This is a way to expose existing values as metrics.
Arguments:
metricName (MetricName): The name of the metric
measurable (AbstractMeasurable): The measurable that will be
measured by this metric
config (MetricConfig, optional): The configuration to use when
measuring this measurable |
def _make_expr_ops(self, op_list, op_dict=None, op_class=None):
for o in op_list:
if op_dict is not None:
if o in op_dict:
self._op_expr[o] = op_dict[o]
else:
l.warning("Operation %s not in op_dict.", o)
else:
if hasattr(op_class, o):
self._op_expr[o] = getattr(op_class, o)
else:
l.warning("Operation %s not in op_class %s.", o, op_class) | Fill up `self._op_expr` dict.
:param op_list: A list of operation names.
:param op_dict: A dictionary of operation methods.
:param op_class: Where the operation method comes from.
:return: |
def timing(func):
@functools.wraps(func)
def wrap(*args, **kw):
t0 = time()
result = func(*args, **kw)
t1 = time()
print('func:%r args:[%r, %r] took: %2.4f sec' %
(func.__name__, args, kw, t1 - t0))
return result
return wrap | Measure the execution time of a function call and print the result. |
def _adopt_notifications(self, state):
def iterator():
it = state.descriptor.pending_notifications.iteritems()
for _, nots in it:
for x in nots:
yield x
to_adopt = list(iterator())
self.info("Will adopt %d pending notifications.", len(to_adopt))
return state.sender.notify(to_adopt) | Part of the "monitor" restart strategy. The pending notifications
from descriptor of dead agent are imported to our pending list. |
def execute(self, env, args):
tasks = env.task.get_list_info()
if not tasks:
env.io.write("No tasks found.")
else:
if args.verbose:
_print_tasks(env, tasks, mark_active=True)
else:
if env.task.active:
active_task = env.task.name
else:
active_task = None
for task, options, blocks in tasks:
if task == active_task:
env.io.success(task + ' *')
else:
if options is None and blocks is None:
env.io.error(task + ' ~')
else:
env.io.write(task) | Lists all valid tasks.
`env`
Runtime ``Environment`` instance.
`args`
Arguments object from arg parser. |
def track_event(self, name, properties=None, measurements=None):
data = channel.contracts.EventData()
data.name = name or NULL_CONSTANT_STRING
if properties:
data.properties = properties
if measurements:
data.measurements = measurements
self.track(data, self._context) | Send information about a single event that has occurred in the context of the application.
Args:
name (str). the data to associate to this event.\n
properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n
measurements (dict). the set of custom measurements the client wants to attach to this data item. (defaults to: None) |
def iter_predict(self, X, include_init=False):
utils.validation.check_is_fitted(self, 'init_estimator_')
X = utils.check_array(X, accept_sparse=['csr', 'csc'], dtype=None, force_all_finite=False)
y_pred = self.init_estimator_.predict(X)
if include_init:
yield y_pred
for estimators, line_searchers, cols in itertools.zip_longest(self.estimators_,
self.line_searchers_,
self.columns_):
for i, (estimator, line_searcher) in enumerate(itertools.zip_longest(estimators,
line_searchers or [])):
if cols is None:
direction = estimator.predict(X)
else:
direction = estimator.predict(X[:, cols])
if line_searcher:
direction = line_searcher.update(direction)
y_pred[:, i] += self.learning_rate * direction
yield y_pred | Returns the predictions for ``X`` at every stage of the boosting procedure.
Args:
X (array-like or sparse matrix of shape (n_samples, n_features): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
include_init (bool, default=False): If ``True`` then the prediction from
``init_estimator`` will also be returned.
Returns:
iterator of arrays of shape (n_samples,) containing the predicted values at each stage |
def connect(self, target, acceptor, wrapper=None):
super(TCPTendrilManager, self).connect(target, acceptor, wrapper)
sock = socket.socket(self.addr_family, socket.SOCK_STREAM)
with utils.SocketCloser(sock, ignore=[application.RejectConnection]):
sock.bind(self.endpoint)
sock.connect(target)
if wrapper:
sock = wrapper(sock)
tend = TCPTendril(self, sock)
tend.application = acceptor(tend)
self._track_tendril(tend)
tend._start()
return tend
sock.close()
return None | Initiate a connection from the tendril manager's endpoint.
Once the connection is completed, a TCPTendril object will be
created and passed to the given acceptor.
:param target: The target of the connection attempt.
:param acceptor: A callable which will initialize the state of
the new TCPTendril object.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
For passing extra arguments to the acceptor or the wrapper,
see the ``TendrilPartial`` class; for chaining together
multiple wrappers, see the ``WrapperChain`` class. |
def zipentry_chunk(zipfile, name, size=_BUFFERING):
def chunks():
with zipfile.open(name) as fd:
buf = fd.read(size)
while buf:
yield buf
buf = fd.read(size)
return chunks | returns a generator function which when called will emit x-sized
chunks of the named entry in the zipfile object |
def linspace2(a, b, n, dtype=None):
a = linspace(a, b, n + 1, dtype=dtype)[:-1]
if len(a) > 1:
diff01 = ((a[1] - a[0]) / 2).astype(a.dtype)
a += diff01
return a | similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9] |
def as_subbundle(cls, name=None, title=None, title_plural=None):
return PromiseBundle(cls, name=name, title=title,
title_plural=title_plural) | Wraps the given bundle so that it can be lazily
instantiated.
:param name: The slug for this bundle.
:param title: The verbose name for this bundle. |
def _index_impl(self):
runs = self._multiplexer.Runs()
result = {run: {} for run in runs}
mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME)
for (run, tag_to_content) in six.iteritems(mapping):
for tag in tag_to_content:
summary_metadata = self._multiplexer.SummaryMetadata(run, tag)
tensor_events = self._multiplexer.Tensors(run, tag)
samples = max([self._number_of_samples(event.tensor_proto)
for event in tensor_events] + [0])
result[run][tag] = {'displayName': summary_metadata.display_name,
'description': plugin_util.markdown_to_safe_html(
summary_metadata.summary_description),
'samples': samples}
return result | Return information about the tags in each run.
Result is a dictionary of the form
{
"runName1": {
"tagName1": {
"displayName": "The first tag",
"description": "<p>Long ago there was just one tag...</p>",
"samples": 3
},
"tagName2": ...,
...
},
"runName2": ...,
...
}
For each tag, `samples` is the greatest number of audio clips that
appear at any particular step. (It's not related to "samples of a
waveform.") For example, if for tag `minibatch_input` there are
five audio clips at step 0 and ten audio clips at step 1, then the
dictionary for `"minibatch_input"` will contain `"samples": 10`. |
def generate_cutJ_genomic_CDR3_segs(self):
max_palindrome = self.max_delJ_palindrome
self.cutJ_genomic_CDR3_segs = []
for CDR3_J_seg in [x[1] for x in self.genJ]:
if len(CDR3_J_seg) < max_palindrome:
self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, len(CDR3_J_seg))]
else:
self.cutJ_genomic_CDR3_segs += [cutL_seq(CDR3_J_seg, 0, max_palindrome)] | Add palindromic inserted nucleotides to germline J sequences.
The maximum number of palindromic insertions are appended to the
germline J segments so that delJ can index directly for number of
nucleotides to delete from a segment.
Sets the attribute cutJ_genomic_CDR3_segs. |
def __standardize_result(status, message, data=None, debug_msg=None):
result = {
'status': status,
'message': message
}
if data is not None:
result['return'] = data
if debug_msg is not None and debug:
result['debug'] = debug_msg
return result | Standardizes all responses
:param status:
:param message:
:param data:
:param debug_msg:
:return: |
def srem(self, key, *values):
redis_set = self._get_set(key, 'SREM')
if not redis_set:
return 0
before_count = len(redis_set)
for value in values:
redis_set.discard(self._encode(value))
after_count = len(redis_set)
if before_count > 0 and len(redis_set) == 0:
self.delete(key)
return before_count - after_count | Emulate srem. |
def a_not_committed(ctx):
ctx.ctrl.sendline('n')
ctx.msg = "Some active software packages are not yet committed. Reload may cause software rollback."
ctx.device.chain.connection.emit_message(ctx.msg, log_level=logging.ERROR)
ctx.failed = True
return False | Provide the message that current software is not committed and reload is not possible. |
def certify_required(value, required=False):
if not isinstance(required, bool):
raise CertifierParamError(
'required',
required,
)
if value is None:
if required:
raise CertifierValueError(
message="required value is None",
)
return True | Certify that a value is present if required.
:param object value:
The value that is to be certified.
:param bool required:
Is the value required?
:raises CertifierValueError:
Required value is `None`. |
def t_coords(self, t_coords):
if not isinstance(t_coords, np.ndarray):
raise TypeError('Texture coordinates must be a numpy array')
if t_coords.ndim != 2:
raise AssertionError('Texture coordinates must be a 2-dimensional array')
if t_coords.shape[0] != self.n_points:
raise AssertionError('Number of texture coordinates ({}) must match number of points ({})'.format(t_coords.shape[0], self.n_points))
if t_coords.shape[1] != 2:
raise AssertionError('Texture coordinates must only have 2 components, not ({})'.format(t_coords.shape[1]))
if np.min(t_coords) < 0.0 or np.max(t_coords) > 1.0:
raise AssertionError('Texture coordinates must be within (0, 1) range.')
vtkarr = numpy_to_vtk(t_coords)
vtkarr.SetName('Texture Coordinates')
self.GetPointData().SetTCoords(vtkarr)
self.GetPointData().Modified()
return | Set the array to use as the texture coordinates |
def makedirs(self, path, mode=0o700):
os.makedirs(os.path.join(self._archive_root, path), mode=mode)
self.log_debug("created directory at '%s' in FileCacheArchive '%s'"
% (path, self._archive_root)) | Create path, including leading components.
Used by sos.sosreport to set up sos_* directories. |
def _can(self, func_name, qualifier_id=None):
function_id = self._get_function_id(func_name)
if qualifier_id is None:
qualifier_id = self._qualifier_id
agent_id = self.get_effective_agent_id()
try:
return self._authz_cache[str(agent_id) + str(function_id) + str(qualifier_id)]
except KeyError:
authz = self._authz_session.is_authorized(agent_id=agent_id,
function_id=function_id,
qualifier_id=qualifier_id)
self._authz_cache[str(agent_id) + str(function_id) + str(qualifier_id)] = authz
return authz | Tests if the named function is authorized with agent and qualifier.
Also, caches authz's in a dict. It is expected that this will not grow to big, as
there are typically only a small number of qualifier + function combinations to
store for the agent. However, if this becomes an issue, we can switch to something
like cachetools. |
def select_area(self, area_uuid):
self._config.upload.current_area = area_uuid
self.save() | Update the "current area" to be the area with this UUID.
:param str area_uuid: The RFC4122-compliant UUID of the Upload Area. |
def filter_spent_outputs(self, outputs):
links = [o.to_dict() for o in outputs]
txs = list(query.get_spending_transactions(self.connection, links))
spends = {TransactionLink.from_dict(input_['fulfills'])
for tx in txs
for input_ in tx['inputs']}
return [ff for ff in outputs if ff not in spends] | Remove outputs that have been spent
Args:
outputs: list of TransactionLink |
def Description(self):
descr = " ".join((self.getId(), self.aq_parent.Title()))
return safe_unicode(descr).encode('utf-8') | Returns searchable data as Description |
def div_safe( numerator, denominator ):
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0
return result
except ValueError as e:
raise e | Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0] |
def _try_instantiate(self, ipopo, factory, component):
try:
with self.__lock:
properties = self.__queue[factory][component]
except KeyError:
return
else:
try:
ipopo.instantiate(factory, component, properties)
except TypeError:
pass
except ValueError as ex:
_logger.error("Component already running: %s", ex)
except Exception as ex:
_logger.exception("Error instantiating component: %s", ex) | Tries to instantiate a component from the queue. Hides all exceptions.
:param ipopo: The iPOPO service
:param factory: Component factory
:param component: Component name |
def generate_schedule(today=None):
schedule_days = {}
seen_items = {}
for slot in Slot.objects.all().order_by('end_time', 'start_time', 'day'):
day = slot.get_day()
if today and day != today:
continue
schedule_day = schedule_days.get(day)
if schedule_day is None:
schedule_day = schedule_days[day] = ScheduleDay(day)
row = make_schedule_row(schedule_day, slot, seen_items)
schedule_day.rows.append(row)
return sorted(schedule_days.values(), key=lambda x: x.day.date) | Helper function which creates an ordered list of schedule days |
def value_from_datadict(self, data, files, name):
upload = super(StickyUploadWidget, self).value_from_datadict(data, files, name)
if upload is not None:
return upload
else:
hidden_name = self.get_hidden_name(name)
value = data.get(hidden_name, None)
if value is not None:
upload = open_stored_file(value, self.url)
if upload is not None:
setattr(upload, '_seralized_location', value)
return upload | Returns uploaded file from serialized value. |
def _insert_data(self, connection, name, value, timestamp, interval, config):
cursor = connection.cursor()
try:
stmt = self._insert_stmt(name, value, timestamp, interval, config)
if stmt:
cursor.execute(stmt)
finally:
cursor.close() | Helper to insert data into cql. |
def add_atmost(self, lits, k, no_return=True):
if self.minicard:
res = pysolvers.minicard_add_am(self.minicard, lits, k)
if res == False:
self.status = False
if not no_return:
return res | Add a new atmost constraint to solver's internal formula. |
def diff(self, sym: Symbol, n: int = 1, expand_simplify: bool = True):
if not isinstance(sym, sympy.Basic):
raise TypeError("%s needs to be a Sympy symbol" % sym)
if sym.free_symbols.issubset(self.free_symbols):
deriv = QuantumDerivative.create(self, derivs={sym: n}, vals=None)
if not deriv.is_zero and expand_simplify:
deriv = deriv.expand().simplify_scalar()
return deriv
else:
return self.__class__._zero | Differentiate by scalar parameter `sym`.
Args:
sym: What to differentiate by.
n: How often to differentiate
expand_simplify: Whether to simplify the result.
Returns:
The n-th derivative. |
def _StopStatusUpdateThread(self):
self._status_update_active = False
if self._status_update_thread.isAlive():
self._status_update_thread.join()
self._status_update_thread = None | Stops the status update thread. |
def showPopup(self):
as_dialog = QApplication.keyboardModifiers()
anchor = self.defaultAnchor()
if anchor:
self.popupWidget().setAnchor(anchor)
else:
anchor = self.popupWidget().anchor()
if ( anchor & (XPopupWidget.Anchor.BottomLeft |
XPopupWidget.Anchor.BottomCenter |
XPopupWidget.Anchor.BottomRight) ):
pos = QPoint(self.width() / 2, 0)
else:
pos = QPoint(self.width() / 2, self.height())
pos = self.mapToGlobal(pos)
if not self.signalsBlocked():
self.popupAboutToShow.emit()
self._popupWidget.popup(pos)
if as_dialog:
self._popupWidget.setCurrentMode(XPopupWidget.Mode.Dialog) | Shows the popup for this button. |
def restart(self):
self.initGrid()
self.win.clear()
self.current_gen = 1
self.start | Restart the game from a new generation 0 |
async def _process_latching(self, key, latching_entry):
if latching_entry[Constants.LATCH_CALLBACK]:
if latching_entry[Constants.LATCH_CALLBACK_TYPE]:
await latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
else:
latching_entry[Constants.LATCH_CALLBACK] \
([key, latching_entry[Constants.LATCHED_DATA], time.time()])
self.latch_map[key] = [0, 0, 0, 0, 0, None]
else:
updated_latch_entry = latching_entry
updated_latch_entry[Constants.LATCH_STATE] = \
Constants.LATCH_LATCHED
updated_latch_entry[Constants.LATCHED_DATA] = \
latching_entry[Constants.LATCHED_DATA]
updated_latch_entry[Constants.LATCHED_TIME_STAMP] = time.time()
self.latch_map[key] = updated_latch_entry | This is a private utility method.
This method process latching events and either returns them via
callback or stores them in the latch map
:param key: Encoded pin
:param latching_entry: a latch table entry
:returns: Callback or store data in latch map |
def firmware_drivers(self):
if not self.__firmware_drivers:
self.__firmware_drivers = FirmwareDrivers(self.__connection)
return self.__firmware_drivers | Gets the FirmwareDrivers API client.
Returns:
FirmwareDrivers: |
def decode(self, charset='utf-8', errors='replace'):
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
) | Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.